blob: bd3a08c7cc15e25511add707058c91dcdc499b00 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
Daniel Borkmann21ccaf22018-01-26 23:33:48 +010024#include <limits.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020025
Mickaël Salaünd02d8982017-02-10 00:21:37 +010026#include <sys/capability.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020028#include <linux/unistd.h>
29#include <linux/filter.h>
30#include <linux/bpf_perf_event.h>
31#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080032#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070033
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010034#include <bpf/bpf.h>
35
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020036#ifdef HAVE_GENHDR
37# include "autoconf.h"
38#else
39# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
41# endif
42#endif
Daniel Borkmannfe8d6622018-02-26 22:34:32 +010043#include "bpf_rlimit.h"
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020044#include "../../../include/linux/filter.h"
45
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020046#ifndef ARRAY_SIZE
47# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
48#endif
49
50#define MAX_INSNS 512
51#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070052#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080053#define POINTER_VALUE 0xcafe4all
54#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070055
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020056#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020057#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020058
Joe Stringer0a6748742018-02-14 13:50:36 -080059#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
60static bool unpriv_disabled = false;
61
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062struct bpf_test {
63 const char *descr;
64 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020065 int fixup_map1[MAX_FIXUPS];
66 int fixup_map2[MAX_FIXUPS];
67 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070068 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070069 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070070 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080071 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070073 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070074 ACCEPT,
75 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070076 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070077 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020078 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070079};
80
Josef Bacik48461132016-09-28 10:54:32 -040081/* Note we want this to be 64 bit aligned so that the end of our array is
82 * actually the end of the structure.
83 */
84#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040085
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020086struct test_val {
87 unsigned int index;
88 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040089};
90
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070091static struct bpf_test tests[] = {
92 {
93 "add+sub+mul",
94 .insns = {
95 BPF_MOV64_IMM(BPF_REG_1, 1),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
97 BPF_MOV64_IMM(BPF_REG_2, 3),
98 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
99 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
100 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
101 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
102 BPF_EXIT_INSN(),
103 },
104 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800105 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700106 },
107 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100108 "DIV32 by 0, zero check 1",
109 .insns = {
110 BPF_MOV32_IMM(BPF_REG_0, 42),
111 BPF_MOV32_IMM(BPF_REG_1, 0),
112 BPF_MOV32_IMM(BPF_REG_2, 1),
113 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
114 BPF_EXIT_INSN(),
115 },
116 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100117 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100118 },
119 {
120 "DIV32 by 0, zero check 2",
121 .insns = {
122 BPF_MOV32_IMM(BPF_REG_0, 42),
123 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
124 BPF_MOV32_IMM(BPF_REG_2, 1),
125 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
126 BPF_EXIT_INSN(),
127 },
128 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100129 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100130 },
131 {
132 "DIV64 by 0, zero check",
133 .insns = {
134 BPF_MOV32_IMM(BPF_REG_0, 42),
135 BPF_MOV32_IMM(BPF_REG_1, 0),
136 BPF_MOV32_IMM(BPF_REG_2, 1),
137 BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
138 BPF_EXIT_INSN(),
139 },
140 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100141 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100142 },
143 {
144 "MOD32 by 0, zero check 1",
145 .insns = {
146 BPF_MOV32_IMM(BPF_REG_0, 42),
147 BPF_MOV32_IMM(BPF_REG_1, 0),
148 BPF_MOV32_IMM(BPF_REG_2, 1),
149 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
150 BPF_EXIT_INSN(),
151 },
152 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100153 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100154 },
155 {
156 "MOD32 by 0, zero check 2",
157 .insns = {
158 BPF_MOV32_IMM(BPF_REG_0, 42),
159 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
160 BPF_MOV32_IMM(BPF_REG_2, 1),
161 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
162 BPF_EXIT_INSN(),
163 },
164 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100165 .retval = 42,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100166 },
167 {
168 "MOD64 by 0, zero check",
169 .insns = {
170 BPF_MOV32_IMM(BPF_REG_0, 42),
171 BPF_MOV32_IMM(BPF_REG_1, 0),
172 BPF_MOV32_IMM(BPF_REG_2, 1),
173 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
174 BPF_EXIT_INSN(),
175 },
176 .result = ACCEPT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100177 .retval = 42,
178 },
179 {
180 "DIV32 by 0, zero check ok, cls",
181 .insns = {
182 BPF_MOV32_IMM(BPF_REG_0, 42),
183 BPF_MOV32_IMM(BPF_REG_1, 2),
184 BPF_MOV32_IMM(BPF_REG_2, 16),
185 BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
186 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
187 BPF_EXIT_INSN(),
188 },
189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
190 .result = ACCEPT,
191 .retval = 8,
192 },
193 {
194 "DIV32 by 0, zero check 1, cls",
195 .insns = {
196 BPF_MOV32_IMM(BPF_REG_1, 0),
197 BPF_MOV32_IMM(BPF_REG_0, 1),
198 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
199 BPF_EXIT_INSN(),
200 },
201 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
202 .result = ACCEPT,
Daniel Borkmann87c17932018-01-20 01:24:32 +0100203 .retval = 0,
204 },
205 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100206 "DIV32 by 0, zero check 2, cls",
207 .insns = {
208 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
209 BPF_MOV32_IMM(BPF_REG_0, 1),
210 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
211 BPF_EXIT_INSN(),
212 },
213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
214 .result = ACCEPT,
215 .retval = 0,
216 },
217 {
218 "DIV64 by 0, zero check, cls",
219 .insns = {
220 BPF_MOV32_IMM(BPF_REG_1, 0),
221 BPF_MOV32_IMM(BPF_REG_0, 1),
222 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
223 BPF_EXIT_INSN(),
224 },
225 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
226 .result = ACCEPT,
227 .retval = 0,
228 },
229 {
230 "MOD32 by 0, zero check ok, cls",
231 .insns = {
232 BPF_MOV32_IMM(BPF_REG_0, 42),
233 BPF_MOV32_IMM(BPF_REG_1, 3),
234 BPF_MOV32_IMM(BPF_REG_2, 5),
235 BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
236 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
237 BPF_EXIT_INSN(),
238 },
239 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
240 .result = ACCEPT,
241 .retval = 2,
242 },
243 {
244 "MOD32 by 0, zero check 1, cls",
245 .insns = {
246 BPF_MOV32_IMM(BPF_REG_1, 0),
247 BPF_MOV32_IMM(BPF_REG_0, 1),
248 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
249 BPF_EXIT_INSN(),
250 },
251 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
252 .result = ACCEPT,
253 .retval = 1,
254 },
255 {
256 "MOD32 by 0, zero check 2, cls",
257 .insns = {
258 BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
259 BPF_MOV32_IMM(BPF_REG_0, 1),
260 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
261 BPF_EXIT_INSN(),
262 },
263 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
264 .result = ACCEPT,
265 .retval = 1,
266 },
267 {
268 "MOD64 by 0, zero check 1, cls",
269 .insns = {
270 BPF_MOV32_IMM(BPF_REG_1, 0),
271 BPF_MOV32_IMM(BPF_REG_0, 2),
272 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
273 BPF_EXIT_INSN(),
274 },
275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
276 .result = ACCEPT,
277 .retval = 2,
278 },
279 {
280 "MOD64 by 0, zero check 2, cls",
281 .insns = {
282 BPF_MOV32_IMM(BPF_REG_1, 0),
283 BPF_MOV32_IMM(BPF_REG_0, -1),
284 BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
285 BPF_EXIT_INSN(),
286 },
287 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
288 .result = ACCEPT,
289 .retval = -1,
290 },
291 /* Just make sure that JITs used udiv/umod as otherwise we get
292 * an exception from INT_MIN/-1 overflow similarly as with div
293 * by zero.
294 */
295 {
296 "DIV32 overflow, check 1",
297 .insns = {
298 BPF_MOV32_IMM(BPF_REG_1, -1),
299 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
300 BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
301 BPF_EXIT_INSN(),
302 },
303 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
304 .result = ACCEPT,
305 .retval = 0,
306 },
307 {
308 "DIV32 overflow, check 2",
309 .insns = {
310 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
311 BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
312 BPF_EXIT_INSN(),
313 },
314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 .result = ACCEPT,
316 .retval = 0,
317 },
318 {
319 "DIV64 overflow, check 1",
320 .insns = {
321 BPF_MOV64_IMM(BPF_REG_1, -1),
322 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
323 BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 BPF_EXIT_INSN(),
325 },
326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 .result = ACCEPT,
328 .retval = 0,
329 },
330 {
331 "DIV64 overflow, check 2",
332 .insns = {
333 BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
334 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
335 BPF_EXIT_INSN(),
336 },
337 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
338 .result = ACCEPT,
339 .retval = 0,
340 },
341 {
342 "MOD32 overflow, check 1",
343 .insns = {
344 BPF_MOV32_IMM(BPF_REG_1, -1),
345 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
346 BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
347 BPF_EXIT_INSN(),
348 },
349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
350 .result = ACCEPT,
351 .retval = INT_MIN,
352 },
353 {
354 "MOD32 overflow, check 2",
355 .insns = {
356 BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
357 BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
358 BPF_EXIT_INSN(),
359 },
360 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
361 .result = ACCEPT,
362 .retval = INT_MIN,
363 },
364 {
365 "MOD64 overflow, check 1",
366 .insns = {
367 BPF_MOV64_IMM(BPF_REG_1, -1),
368 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
369 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
370 BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
371 BPF_MOV32_IMM(BPF_REG_0, 0),
372 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
373 BPF_MOV32_IMM(BPF_REG_0, 1),
374 BPF_EXIT_INSN(),
375 },
376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 .result = ACCEPT,
378 .retval = 1,
379 },
380 {
381 "MOD64 overflow, check 2",
382 .insns = {
383 BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
384 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
385 BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
386 BPF_MOV32_IMM(BPF_REG_0, 0),
387 BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
388 BPF_MOV32_IMM(BPF_REG_0, 1),
389 BPF_EXIT_INSN(),
390 },
391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
392 .result = ACCEPT,
393 .retval = 1,
394 },
395 {
396 "xor32 zero extend check",
397 .insns = {
398 BPF_MOV32_IMM(BPF_REG_2, -1),
399 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
400 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
401 BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
402 BPF_MOV32_IMM(BPF_REG_0, 2),
403 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
404 BPF_MOV32_IMM(BPF_REG_0, 1),
405 BPF_EXIT_INSN(),
406 },
407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
408 .result = ACCEPT,
409 .retval = 1,
410 },
411 {
Daniel Borkmann87c17932018-01-20 01:24:32 +0100412 "empty prog",
413 .insns = {
414 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100415 .errstr = "unknown opcode 00",
Daniel Borkmann87c17932018-01-20 01:24:32 +0100416 .result = REJECT,
417 },
418 {
419 "only exit insn",
420 .insns = {
421 BPF_EXIT_INSN(),
422 },
423 .errstr = "R0 !read_ok",
424 .result = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700425 },
426 {
427 "unreachable",
428 .insns = {
429 BPF_EXIT_INSN(),
430 BPF_EXIT_INSN(),
431 },
432 .errstr = "unreachable",
433 .result = REJECT,
434 },
435 {
436 "unreachable2",
437 .insns = {
438 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
439 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
440 BPF_EXIT_INSN(),
441 },
442 .errstr = "unreachable",
443 .result = REJECT,
444 },
445 {
446 "out of range jump",
447 .insns = {
448 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
449 BPF_EXIT_INSN(),
450 },
451 .errstr = "jump out of range",
452 .result = REJECT,
453 },
454 {
455 "out of range jump2",
456 .insns = {
457 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
458 BPF_EXIT_INSN(),
459 },
460 .errstr = "jump out of range",
461 .result = REJECT,
462 },
463 {
464 "test1 ld_imm64",
465 .insns = {
466 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
467 BPF_LD_IMM64(BPF_REG_0, 0),
468 BPF_LD_IMM64(BPF_REG_0, 0),
469 BPF_LD_IMM64(BPF_REG_0, 1),
470 BPF_LD_IMM64(BPF_REG_0, 1),
471 BPF_MOV64_IMM(BPF_REG_0, 2),
472 BPF_EXIT_INSN(),
473 },
474 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700475 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700476 .result = REJECT,
477 },
478 {
479 "test2 ld_imm64",
480 .insns = {
481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
482 BPF_LD_IMM64(BPF_REG_0, 0),
483 BPF_LD_IMM64(BPF_REG_0, 0),
484 BPF_LD_IMM64(BPF_REG_0, 1),
485 BPF_LD_IMM64(BPF_REG_0, 1),
486 BPF_EXIT_INSN(),
487 },
488 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700489 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700490 .result = REJECT,
491 },
492 {
493 "test3 ld_imm64",
494 .insns = {
495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
496 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
497 BPF_LD_IMM64(BPF_REG_0, 0),
498 BPF_LD_IMM64(BPF_REG_0, 0),
499 BPF_LD_IMM64(BPF_REG_0, 1),
500 BPF_LD_IMM64(BPF_REG_0, 1),
501 BPF_EXIT_INSN(),
502 },
503 .errstr = "invalid bpf_ld_imm64 insn",
504 .result = REJECT,
505 },
506 {
507 "test4 ld_imm64",
508 .insns = {
509 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
510 BPF_EXIT_INSN(),
511 },
512 .errstr = "invalid bpf_ld_imm64 insn",
513 .result = REJECT,
514 },
515 {
516 "test5 ld_imm64",
517 .insns = {
518 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
519 },
520 .errstr = "invalid bpf_ld_imm64 insn",
521 .result = REJECT,
522 },
523 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200524 "test6 ld_imm64",
525 .insns = {
526 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
527 BPF_RAW_INSN(0, 0, 0, 0, 0),
528 BPF_EXIT_INSN(),
529 },
530 .result = ACCEPT,
531 },
532 {
533 "test7 ld_imm64",
534 .insns = {
535 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
536 BPF_RAW_INSN(0, 0, 0, 0, 1),
537 BPF_EXIT_INSN(),
538 },
539 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800540 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200541 },
542 {
543 "test8 ld_imm64",
544 .insns = {
545 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
546 BPF_RAW_INSN(0, 0, 0, 0, 1),
547 BPF_EXIT_INSN(),
548 },
549 .errstr = "uses reserved fields",
550 .result = REJECT,
551 },
552 {
553 "test9 ld_imm64",
554 .insns = {
555 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
556 BPF_RAW_INSN(0, 0, 0, 1, 1),
557 BPF_EXIT_INSN(),
558 },
559 .errstr = "invalid bpf_ld_imm64 insn",
560 .result = REJECT,
561 },
562 {
563 "test10 ld_imm64",
564 .insns = {
565 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
566 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
567 BPF_EXIT_INSN(),
568 },
569 .errstr = "invalid bpf_ld_imm64 insn",
570 .result = REJECT,
571 },
572 {
573 "test11 ld_imm64",
574 .insns = {
575 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
576 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
577 BPF_EXIT_INSN(),
578 },
579 .errstr = "invalid bpf_ld_imm64 insn",
580 .result = REJECT,
581 },
582 {
583 "test12 ld_imm64",
584 .insns = {
585 BPF_MOV64_IMM(BPF_REG_1, 0),
586 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
587 BPF_RAW_INSN(0, 0, 0, 0, 1),
588 BPF_EXIT_INSN(),
589 },
590 .errstr = "not pointing to valid bpf_map",
591 .result = REJECT,
592 },
593 {
594 "test13 ld_imm64",
595 .insns = {
596 BPF_MOV64_IMM(BPF_REG_1, 0),
597 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
598 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
599 BPF_EXIT_INSN(),
600 },
601 .errstr = "invalid bpf_ld_imm64 insn",
602 .result = REJECT,
603 },
604 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100605 "arsh32 on imm",
606 .insns = {
607 BPF_MOV64_IMM(BPF_REG_0, 1),
608 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
609 BPF_EXIT_INSN(),
610 },
611 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100612 .errstr = "unknown opcode c4",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100613 },
614 {
615 "arsh32 on reg",
616 .insns = {
617 BPF_MOV64_IMM(BPF_REG_0, 1),
618 BPF_MOV64_IMM(BPF_REG_1, 5),
619 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
620 BPF_EXIT_INSN(),
621 },
622 .result = REJECT,
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100623 .errstr = "unknown opcode cc",
Daniel Borkmann7891a872018-01-10 20:04:37 +0100624 },
625 {
626 "arsh64 on imm",
627 .insns = {
628 BPF_MOV64_IMM(BPF_REG_0, 1),
629 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
630 BPF_EXIT_INSN(),
631 },
632 .result = ACCEPT,
633 },
634 {
635 "arsh64 on reg",
636 .insns = {
637 BPF_MOV64_IMM(BPF_REG_0, 1),
638 BPF_MOV64_IMM(BPF_REG_1, 5),
639 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
640 BPF_EXIT_INSN(),
641 },
642 .result = ACCEPT,
643 },
644 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700645 "no bpf_exit",
646 .insns = {
647 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
648 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800649 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700650 .result = REJECT,
651 },
652 {
653 "loop (back-edge)",
654 .insns = {
655 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
656 BPF_EXIT_INSN(),
657 },
658 .errstr = "back-edge",
659 .result = REJECT,
660 },
661 {
662 "loop2 (back-edge)",
663 .insns = {
664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
665 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
666 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
667 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
668 BPF_EXIT_INSN(),
669 },
670 .errstr = "back-edge",
671 .result = REJECT,
672 },
673 {
674 "conditional loop",
675 .insns = {
676 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
678 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
680 BPF_EXIT_INSN(),
681 },
682 .errstr = "back-edge",
683 .result = REJECT,
684 },
685 {
686 "read uninitialized register",
687 .insns = {
688 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
689 BPF_EXIT_INSN(),
690 },
691 .errstr = "R2 !read_ok",
692 .result = REJECT,
693 },
694 {
695 "read invalid register",
696 .insns = {
697 BPF_MOV64_REG(BPF_REG_0, -1),
698 BPF_EXIT_INSN(),
699 },
700 .errstr = "R15 is invalid",
701 .result = REJECT,
702 },
703 {
704 "program doesn't init R0 before exit",
705 .insns = {
706 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
707 BPF_EXIT_INSN(),
708 },
709 .errstr = "R0 !read_ok",
710 .result = REJECT,
711 },
712 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700713 "program doesn't init R0 before exit in all branches",
714 .insns = {
715 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
716 BPF_MOV64_IMM(BPF_REG_0, 1),
717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
718 BPF_EXIT_INSN(),
719 },
720 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700721 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700722 .result = REJECT,
723 },
724 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700725 "stack out of bounds",
726 .insns = {
727 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
728 BPF_EXIT_INSN(),
729 },
730 .errstr = "invalid stack",
731 .result = REJECT,
732 },
733 {
734 "invalid call insn1",
735 .insns = {
736 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
737 BPF_EXIT_INSN(),
738 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100739 .errstr = "unknown opcode 8d",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700740 .result = REJECT,
741 },
742 {
743 "invalid call insn2",
744 .insns = {
745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
746 BPF_EXIT_INSN(),
747 },
748 .errstr = "BPF_CALL uses reserved",
749 .result = REJECT,
750 },
751 {
752 "invalid function call",
753 .insns = {
754 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
755 BPF_EXIT_INSN(),
756 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100757 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700758 .result = REJECT,
759 },
760 {
761 "uninitialized stack1",
762 .insns = {
763 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
764 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
765 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200766 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
767 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700768 BPF_EXIT_INSN(),
769 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200770 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700771 .errstr = "invalid indirect read from stack",
772 .result = REJECT,
773 },
774 {
775 "uninitialized stack2",
776 .insns = {
777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
778 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
779 BPF_EXIT_INSN(),
780 },
781 .errstr = "invalid read from stack",
782 .result = REJECT,
783 },
784 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200785 "invalid fp arithmetic",
786 /* If this gets ever changed, make sure JITs can deal with it. */
787 .insns = {
788 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
790 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
791 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
792 BPF_EXIT_INSN(),
793 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800794 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200795 .result = REJECT,
796 },
797 {
798 "non-invalid fp arithmetic",
799 .insns = {
800 BPF_MOV64_IMM(BPF_REG_0, 0),
801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
802 BPF_EXIT_INSN(),
803 },
804 .result = ACCEPT,
805 },
806 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200807 "invalid argument register",
808 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
810 BPF_FUNC_get_cgroup_classid),
811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
812 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200813 BPF_EXIT_INSN(),
814 },
815 .errstr = "R1 !read_ok",
816 .result = REJECT,
817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
818 },
819 {
820 "non-invalid argument register",
821 .insns = {
822 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200823 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
824 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200825 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
827 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200828 BPF_EXIT_INSN(),
829 },
830 .result = ACCEPT,
831 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
832 },
833 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700834 "check valid spill/fill",
835 .insns = {
836 /* spill R1(ctx) into stack */
837 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700838 /* fill it back into R2 */
839 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700840 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100841 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700843 BPF_EXIT_INSN(),
844 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700845 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700846 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700847 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800848 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700849 },
850 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200851 "check valid spill/fill, skb mark",
852 .insns = {
853 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
854 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
856 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
857 offsetof(struct __sk_buff, mark)),
858 BPF_EXIT_INSN(),
859 },
860 .result = ACCEPT,
861 .result_unpriv = ACCEPT,
862 },
863 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700864 "check corrupted spill/fill",
865 .insns = {
866 /* spill R1(ctx) into stack */
867 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700868 /* mess up with R1 pointer on stack */
869 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700870 /* fill back into R0 should fail */
871 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700872 BPF_EXIT_INSN(),
873 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700874 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700875 .errstr = "corrupted spill",
876 .result = REJECT,
877 },
878 {
879 "invalid src register in STX",
880 .insns = {
881 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
882 BPF_EXIT_INSN(),
883 },
884 .errstr = "R15 is invalid",
885 .result = REJECT,
886 },
887 {
888 "invalid dst register in STX",
889 .insns = {
890 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
891 BPF_EXIT_INSN(),
892 },
893 .errstr = "R14 is invalid",
894 .result = REJECT,
895 },
896 {
897 "invalid dst register in ST",
898 .insns = {
899 BPF_ST_MEM(BPF_B, 14, -1, -1),
900 BPF_EXIT_INSN(),
901 },
902 .errstr = "R14 is invalid",
903 .result = REJECT,
904 },
905 {
906 "invalid src register in LDX",
907 .insns = {
908 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
909 BPF_EXIT_INSN(),
910 },
911 .errstr = "R12 is invalid",
912 .result = REJECT,
913 },
914 {
915 "invalid dst register in LDX",
916 .insns = {
917 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
918 BPF_EXIT_INSN(),
919 },
920 .errstr = "R11 is invalid",
921 .result = REJECT,
922 },
923 {
924 "junk insn",
925 .insns = {
926 BPF_RAW_INSN(0, 0, 0, 0, 0),
927 BPF_EXIT_INSN(),
928 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100929 .errstr = "unknown opcode 00",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700930 .result = REJECT,
931 },
932 {
933 "junk insn2",
934 .insns = {
935 BPF_RAW_INSN(1, 0, 0, 0, 0),
936 BPF_EXIT_INSN(),
937 },
938 .errstr = "BPF_LDX uses reserved fields",
939 .result = REJECT,
940 },
941 {
942 "junk insn3",
943 .insns = {
944 BPF_RAW_INSN(-1, 0, 0, 0, 0),
945 BPF_EXIT_INSN(),
946 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100947 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700948 .result = REJECT,
949 },
950 {
951 "junk insn4",
952 .insns = {
953 BPF_RAW_INSN(-1, -1, -1, -1, -1),
954 BPF_EXIT_INSN(),
955 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +0100956 .errstr = "unknown opcode ff",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700957 .result = REJECT,
958 },
959 {
960 "junk insn5",
961 .insns = {
962 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
963 BPF_EXIT_INSN(),
964 },
965 .errstr = "BPF_ALU uses reserved fields",
966 .result = REJECT,
967 },
968 {
969 "misaligned read from stack",
970 .insns = {
971 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
972 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
973 BPF_EXIT_INSN(),
974 },
Edward Creef65b1842017-08-07 15:27:12 +0100975 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700976 .result = REJECT,
977 },
978 {
979 "invalid map_fd for function call",
980 .insns = {
981 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
982 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
984 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200985 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
986 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700987 BPF_EXIT_INSN(),
988 },
989 .errstr = "fd 0 is not pointing to valid bpf_map",
990 .result = REJECT,
991 },
992 {
993 "don't check return value before access",
994 .insns = {
995 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
996 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
998 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200999 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1000 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001001 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1002 BPF_EXIT_INSN(),
1003 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001004 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001005 .errstr = "R0 invalid mem access 'map_value_or_null'",
1006 .result = REJECT,
1007 },
1008 {
1009 "access memory with incorrect alignment",
1010 .insns = {
1011 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1012 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1014 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001015 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1016 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001017 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1018 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1019 BPF_EXIT_INSN(),
1020 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001021 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01001022 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001023 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001024 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001025 },
1026 {
1027 "sometimes access memory with incorrect alignment",
1028 .insns = {
1029 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1032 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001033 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1034 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001035 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1037 BPF_EXIT_INSN(),
1038 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1039 BPF_EXIT_INSN(),
1040 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001041 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001042 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001043 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001044 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001045 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001046 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001047 {
1048 "jump test 1",
1049 .insns = {
1050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1051 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1052 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1053 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1054 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1055 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1057 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1059 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1060 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1061 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1063 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1064 BPF_MOV64_IMM(BPF_REG_0, 0),
1065 BPF_EXIT_INSN(),
1066 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001067 .errstr_unpriv = "R1 pointer comparison",
1068 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001069 .result = ACCEPT,
1070 },
1071 {
1072 "jump test 2",
1073 .insns = {
1074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1075 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1076 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1077 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1078 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1079 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1080 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1082 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1083 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1085 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1086 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1088 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1089 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1090 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1091 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1092 BPF_MOV64_IMM(BPF_REG_0, 0),
1093 BPF_EXIT_INSN(),
1094 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001095 .errstr_unpriv = "R1 pointer comparison",
1096 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001097 .result = ACCEPT,
1098 },
1099 {
1100 "jump test 3",
1101 .insns = {
1102 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1104 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1106 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1107 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1108 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1110 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1111 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1112 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1114 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1116 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1118 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1120 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1122 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1123 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1124 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1126 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1128 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001129 BPF_EXIT_INSN(),
1130 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001131 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001132 .errstr_unpriv = "R1 pointer comparison",
1133 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001134 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001135 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001136 },
1137 {
1138 "jump test 4",
1139 .insns = {
1140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1141 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1143 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1145 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1146 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1148 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1149 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1150 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1160 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1161 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1164 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1169 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1172 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1178 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1180 BPF_MOV64_IMM(BPF_REG_0, 0),
1181 BPF_EXIT_INSN(),
1182 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001183 .errstr_unpriv = "R1 pointer comparison",
1184 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -07001185 .result = ACCEPT,
1186 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001187 {
1188 "jump test 5",
1189 .insns = {
1190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1191 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1192 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1193 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1194 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1195 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1196 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1197 BPF_MOV64_IMM(BPF_REG_0, 0),
1198 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1199 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1200 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1201 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1202 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1203 BPF_MOV64_IMM(BPF_REG_0, 0),
1204 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1205 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1206 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1207 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1208 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1209 BPF_MOV64_IMM(BPF_REG_0, 0),
1210 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1211 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1212 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1213 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1214 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1215 BPF_MOV64_IMM(BPF_REG_0, 0),
1216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1218 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1219 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1220 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1221 BPF_MOV64_IMM(BPF_REG_0, 0),
1222 BPF_EXIT_INSN(),
1223 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001224 .errstr_unpriv = "R1 pointer comparison",
1225 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -07001226 .result = ACCEPT,
1227 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001228 {
1229 "access skb fields ok",
1230 .insns = {
1231 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1232 offsetof(struct __sk_buff, len)),
1233 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1234 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1235 offsetof(struct __sk_buff, mark)),
1236 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1237 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1238 offsetof(struct __sk_buff, pkt_type)),
1239 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1240 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1241 offsetof(struct __sk_buff, queue_mapping)),
1242 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -07001243 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1244 offsetof(struct __sk_buff, protocol)),
1245 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1247 offsetof(struct __sk_buff, vlan_present)),
1248 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1249 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1250 offsetof(struct __sk_buff, vlan_tci)),
1251 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +02001252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1253 offsetof(struct __sk_buff, napi_id)),
1254 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001255 BPF_EXIT_INSN(),
1256 },
1257 .result = ACCEPT,
1258 },
1259 {
1260 "access skb fields bad1",
1261 .insns = {
1262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1263 BPF_EXIT_INSN(),
1264 },
1265 .errstr = "invalid bpf_context access",
1266 .result = REJECT,
1267 },
1268 {
1269 "access skb fields bad2",
1270 .insns = {
1271 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1275 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1277 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001278 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1279 BPF_EXIT_INSN(),
1280 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1281 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1282 offsetof(struct __sk_buff, pkt_type)),
1283 BPF_EXIT_INSN(),
1284 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001285 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001286 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001287 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001288 .result = REJECT,
1289 },
1290 {
1291 "access skb fields bad3",
1292 .insns = {
1293 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1294 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1295 offsetof(struct __sk_buff, pkt_type)),
1296 BPF_EXIT_INSN(),
1297 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1298 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1300 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1302 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001303 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1304 BPF_EXIT_INSN(),
1305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1306 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1307 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001308 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001309 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001310 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -07001311 .result = REJECT,
1312 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001313 {
1314 "access skb fields bad4",
1315 .insns = {
1316 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1317 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1318 offsetof(struct __sk_buff, len)),
1319 BPF_MOV64_IMM(BPF_REG_0, 0),
1320 BPF_EXIT_INSN(),
1321 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1322 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1324 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001325 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1326 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001327 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1328 BPF_EXIT_INSN(),
1329 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1330 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1331 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001332 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001333 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001334 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001335 .result = REJECT,
1336 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001337 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001338 "invalid access __sk_buff family",
1339 .insns = {
1340 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1341 offsetof(struct __sk_buff, family)),
1342 BPF_EXIT_INSN(),
1343 },
1344 .errstr = "invalid bpf_context access",
1345 .result = REJECT,
1346 },
1347 {
1348 "invalid access __sk_buff remote_ip4",
1349 .insns = {
1350 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, remote_ip4)),
1352 BPF_EXIT_INSN(),
1353 },
1354 .errstr = "invalid bpf_context access",
1355 .result = REJECT,
1356 },
1357 {
1358 "invalid access __sk_buff local_ip4",
1359 .insns = {
1360 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, local_ip4)),
1362 BPF_EXIT_INSN(),
1363 },
1364 .errstr = "invalid bpf_context access",
1365 .result = REJECT,
1366 },
1367 {
1368 "invalid access __sk_buff remote_ip6",
1369 .insns = {
1370 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, remote_ip6)),
1372 BPF_EXIT_INSN(),
1373 },
1374 .errstr = "invalid bpf_context access",
1375 .result = REJECT,
1376 },
1377 {
1378 "invalid access __sk_buff local_ip6",
1379 .insns = {
1380 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1381 offsetof(struct __sk_buff, local_ip6)),
1382 BPF_EXIT_INSN(),
1383 },
1384 .errstr = "invalid bpf_context access",
1385 .result = REJECT,
1386 },
1387 {
1388 "invalid access __sk_buff remote_port",
1389 .insns = {
1390 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1391 offsetof(struct __sk_buff, remote_port)),
1392 BPF_EXIT_INSN(),
1393 },
1394 .errstr = "invalid bpf_context access",
1395 .result = REJECT,
1396 },
1397 {
1398 "invalid access __sk_buff remote_port",
1399 .insns = {
1400 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1401 offsetof(struct __sk_buff, local_port)),
1402 BPF_EXIT_INSN(),
1403 },
1404 .errstr = "invalid bpf_context access",
1405 .result = REJECT,
1406 },
1407 {
1408 "valid access __sk_buff family",
1409 .insns = {
1410 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1411 offsetof(struct __sk_buff, family)),
1412 BPF_EXIT_INSN(),
1413 },
1414 .result = ACCEPT,
1415 .prog_type = BPF_PROG_TYPE_SK_SKB,
1416 },
1417 {
1418 "valid access __sk_buff remote_ip4",
1419 .insns = {
1420 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1421 offsetof(struct __sk_buff, remote_ip4)),
1422 BPF_EXIT_INSN(),
1423 },
1424 .result = ACCEPT,
1425 .prog_type = BPF_PROG_TYPE_SK_SKB,
1426 },
1427 {
1428 "valid access __sk_buff local_ip4",
1429 .insns = {
1430 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1431 offsetof(struct __sk_buff, local_ip4)),
1432 BPF_EXIT_INSN(),
1433 },
1434 .result = ACCEPT,
1435 .prog_type = BPF_PROG_TYPE_SK_SKB,
1436 },
1437 {
1438 "valid access __sk_buff remote_ip6",
1439 .insns = {
1440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1441 offsetof(struct __sk_buff, remote_ip6[0])),
1442 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1443 offsetof(struct __sk_buff, remote_ip6[1])),
1444 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, remote_ip6[2])),
1446 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1447 offsetof(struct __sk_buff, remote_ip6[3])),
1448 BPF_EXIT_INSN(),
1449 },
1450 .result = ACCEPT,
1451 .prog_type = BPF_PROG_TYPE_SK_SKB,
1452 },
1453 {
1454 "valid access __sk_buff local_ip6",
1455 .insns = {
1456 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1457 offsetof(struct __sk_buff, local_ip6[0])),
1458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1459 offsetof(struct __sk_buff, local_ip6[1])),
1460 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1461 offsetof(struct __sk_buff, local_ip6[2])),
1462 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1463 offsetof(struct __sk_buff, local_ip6[3])),
1464 BPF_EXIT_INSN(),
1465 },
1466 .result = ACCEPT,
1467 .prog_type = BPF_PROG_TYPE_SK_SKB,
1468 },
1469 {
1470 "valid access __sk_buff remote_port",
1471 .insns = {
1472 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1473 offsetof(struct __sk_buff, remote_port)),
1474 BPF_EXIT_INSN(),
1475 },
1476 .result = ACCEPT,
1477 .prog_type = BPF_PROG_TYPE_SK_SKB,
1478 },
1479 {
1480 "valid access __sk_buff remote_port",
1481 .insns = {
1482 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1483 offsetof(struct __sk_buff, local_port)),
1484 BPF_EXIT_INSN(),
1485 },
1486 .result = ACCEPT,
1487 .prog_type = BPF_PROG_TYPE_SK_SKB,
1488 },
1489 {
John Fastabended850542017-08-28 07:11:24 -07001490 "invalid access of tc_classid for SK_SKB",
1491 .insns = {
1492 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1493 offsetof(struct __sk_buff, tc_classid)),
1494 BPF_EXIT_INSN(),
1495 },
1496 .result = REJECT,
1497 .prog_type = BPF_PROG_TYPE_SK_SKB,
1498 .errstr = "invalid bpf_context access",
1499 },
1500 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001501 "invalid access of skb->mark for SK_SKB",
1502 .insns = {
1503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, mark)),
1505 BPF_EXIT_INSN(),
1506 },
1507 .result = REJECT,
1508 .prog_type = BPF_PROG_TYPE_SK_SKB,
1509 .errstr = "invalid bpf_context access",
1510 },
1511 {
1512 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001513 .insns = {
1514 BPF_MOV64_IMM(BPF_REG_0, 0),
1515 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1516 offsetof(struct __sk_buff, mark)),
1517 BPF_EXIT_INSN(),
1518 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001519 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001520 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001521 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001522 },
1523 {
1524 "check skb->tc_index is writeable by SK_SKB",
1525 .insns = {
1526 BPF_MOV64_IMM(BPF_REG_0, 0),
1527 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1528 offsetof(struct __sk_buff, tc_index)),
1529 BPF_EXIT_INSN(),
1530 },
1531 .result = ACCEPT,
1532 .prog_type = BPF_PROG_TYPE_SK_SKB,
1533 },
1534 {
1535 "check skb->priority is writeable by SK_SKB",
1536 .insns = {
1537 BPF_MOV64_IMM(BPF_REG_0, 0),
1538 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1539 offsetof(struct __sk_buff, priority)),
1540 BPF_EXIT_INSN(),
1541 },
1542 .result = ACCEPT,
1543 .prog_type = BPF_PROG_TYPE_SK_SKB,
1544 },
1545 {
1546 "direct packet read for SK_SKB",
1547 .insns = {
1548 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1549 offsetof(struct __sk_buff, data)),
1550 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1551 offsetof(struct __sk_buff, data_end)),
1552 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1554 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1555 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1556 BPF_MOV64_IMM(BPF_REG_0, 0),
1557 BPF_EXIT_INSN(),
1558 },
1559 .result = ACCEPT,
1560 .prog_type = BPF_PROG_TYPE_SK_SKB,
1561 },
1562 {
1563 "direct packet write for SK_SKB",
1564 .insns = {
1565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1566 offsetof(struct __sk_buff, data)),
1567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1568 offsetof(struct __sk_buff, data_end)),
1569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1571 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1572 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1573 BPF_MOV64_IMM(BPF_REG_0, 0),
1574 BPF_EXIT_INSN(),
1575 },
1576 .result = ACCEPT,
1577 .prog_type = BPF_PROG_TYPE_SK_SKB,
1578 },
1579 {
1580 "overlapping checks for direct packet access SK_SKB",
1581 .insns = {
1582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1583 offsetof(struct __sk_buff, data)),
1584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1585 offsetof(struct __sk_buff, data_end)),
1586 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1588 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1591 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1592 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1593 BPF_MOV64_IMM(BPF_REG_0, 0),
1594 BPF_EXIT_INSN(),
1595 },
1596 .result = ACCEPT,
1597 .prog_type = BPF_PROG_TYPE_SK_SKB,
1598 },
1599 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001600 "check skb->mark is not writeable by sockets",
1601 .insns = {
1602 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1603 offsetof(struct __sk_buff, mark)),
1604 BPF_EXIT_INSN(),
1605 },
1606 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001607 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001608 .result = REJECT,
1609 },
1610 {
1611 "check skb->tc_index is not writeable by sockets",
1612 .insns = {
1613 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1614 offsetof(struct __sk_buff, tc_index)),
1615 BPF_EXIT_INSN(),
1616 },
1617 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001618 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001619 .result = REJECT,
1620 },
1621 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001622 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001623 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001624 BPF_MOV64_IMM(BPF_REG_0, 0),
1625 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1626 offsetof(struct __sk_buff, cb[0])),
1627 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1628 offsetof(struct __sk_buff, cb[0]) + 1),
1629 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1630 offsetof(struct __sk_buff, cb[0]) + 2),
1631 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1632 offsetof(struct __sk_buff, cb[0]) + 3),
1633 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1634 offsetof(struct __sk_buff, cb[1])),
1635 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1636 offsetof(struct __sk_buff, cb[1]) + 1),
1637 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1638 offsetof(struct __sk_buff, cb[1]) + 2),
1639 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1640 offsetof(struct __sk_buff, cb[1]) + 3),
1641 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1642 offsetof(struct __sk_buff, cb[2])),
1643 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1644 offsetof(struct __sk_buff, cb[2]) + 1),
1645 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1646 offsetof(struct __sk_buff, cb[2]) + 2),
1647 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1648 offsetof(struct __sk_buff, cb[2]) + 3),
1649 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1650 offsetof(struct __sk_buff, cb[3])),
1651 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1652 offsetof(struct __sk_buff, cb[3]) + 1),
1653 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, cb[3]) + 2),
1655 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1656 offsetof(struct __sk_buff, cb[3]) + 3),
1657 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, cb[4])),
1659 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[4]) + 1),
1661 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1662 offsetof(struct __sk_buff, cb[4]) + 2),
1663 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1664 offsetof(struct __sk_buff, cb[4]) + 3),
1665 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1666 offsetof(struct __sk_buff, cb[0])),
1667 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1668 offsetof(struct __sk_buff, cb[0]) + 1),
1669 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1670 offsetof(struct __sk_buff, cb[0]) + 2),
1671 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1672 offsetof(struct __sk_buff, cb[0]) + 3),
1673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1674 offsetof(struct __sk_buff, cb[1])),
1675 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1676 offsetof(struct __sk_buff, cb[1]) + 1),
1677 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1678 offsetof(struct __sk_buff, cb[1]) + 2),
1679 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1680 offsetof(struct __sk_buff, cb[1]) + 3),
1681 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1682 offsetof(struct __sk_buff, cb[2])),
1683 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[2]) + 1),
1685 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[2]) + 2),
1687 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1688 offsetof(struct __sk_buff, cb[2]) + 3),
1689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1690 offsetof(struct __sk_buff, cb[3])),
1691 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1692 offsetof(struct __sk_buff, cb[3]) + 1),
1693 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1694 offsetof(struct __sk_buff, cb[3]) + 2),
1695 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1696 offsetof(struct __sk_buff, cb[3]) + 3),
1697 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1698 offsetof(struct __sk_buff, cb[4])),
1699 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1700 offsetof(struct __sk_buff, cb[4]) + 1),
1701 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1702 offsetof(struct __sk_buff, cb[4]) + 2),
1703 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1704 offsetof(struct __sk_buff, cb[4]) + 3),
1705 BPF_EXIT_INSN(),
1706 },
1707 .result = ACCEPT,
1708 },
1709 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001710 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001711 .insns = {
1712 BPF_MOV64_IMM(BPF_REG_0, 0),
1713 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001714 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001715 BPF_EXIT_INSN(),
1716 },
1717 .errstr = "invalid bpf_context access",
1718 .result = REJECT,
1719 },
1720 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001721 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001722 .insns = {
1723 BPF_MOV64_IMM(BPF_REG_0, 0),
1724 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001725 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001726 BPF_EXIT_INSN(),
1727 },
1728 .errstr = "invalid bpf_context access",
1729 .result = REJECT,
1730 },
1731 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001732 "check skb->hash byte load permitted",
1733 .insns = {
1734 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001735#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001736 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1737 offsetof(struct __sk_buff, hash)),
1738#else
1739 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1740 offsetof(struct __sk_buff, hash) + 3),
1741#endif
1742 BPF_EXIT_INSN(),
1743 },
1744 .result = ACCEPT,
1745 },
1746 {
1747 "check skb->hash byte load not permitted 1",
1748 .insns = {
1749 BPF_MOV64_IMM(BPF_REG_0, 0),
1750 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1751 offsetof(struct __sk_buff, hash) + 1),
1752 BPF_EXIT_INSN(),
1753 },
1754 .errstr = "invalid bpf_context access",
1755 .result = REJECT,
1756 },
1757 {
1758 "check skb->hash byte load not permitted 2",
1759 .insns = {
1760 BPF_MOV64_IMM(BPF_REG_0, 0),
1761 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1762 offsetof(struct __sk_buff, hash) + 2),
1763 BPF_EXIT_INSN(),
1764 },
1765 .errstr = "invalid bpf_context access",
1766 .result = REJECT,
1767 },
1768 {
1769 "check skb->hash byte load not permitted 3",
1770 .insns = {
1771 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001772#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001773 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1774 offsetof(struct __sk_buff, hash) + 3),
1775#else
1776 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1777 offsetof(struct __sk_buff, hash)),
1778#endif
1779 BPF_EXIT_INSN(),
1780 },
1781 .errstr = "invalid bpf_context access",
1782 .result = REJECT,
1783 },
1784 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001785 "check cb access: byte, wrong type",
1786 .insns = {
1787 BPF_MOV64_IMM(BPF_REG_0, 0),
1788 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001789 offsetof(struct __sk_buff, cb[0])),
1790 BPF_EXIT_INSN(),
1791 },
1792 .errstr = "invalid bpf_context access",
1793 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001794 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1795 },
1796 {
1797 "check cb access: half",
1798 .insns = {
1799 BPF_MOV64_IMM(BPF_REG_0, 0),
1800 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1801 offsetof(struct __sk_buff, cb[0])),
1802 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1803 offsetof(struct __sk_buff, cb[0]) + 2),
1804 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1805 offsetof(struct __sk_buff, cb[1])),
1806 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1807 offsetof(struct __sk_buff, cb[1]) + 2),
1808 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1809 offsetof(struct __sk_buff, cb[2])),
1810 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1811 offsetof(struct __sk_buff, cb[2]) + 2),
1812 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1813 offsetof(struct __sk_buff, cb[3])),
1814 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1815 offsetof(struct __sk_buff, cb[3]) + 2),
1816 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1817 offsetof(struct __sk_buff, cb[4])),
1818 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1819 offsetof(struct __sk_buff, cb[4]) + 2),
1820 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1821 offsetof(struct __sk_buff, cb[0])),
1822 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1823 offsetof(struct __sk_buff, cb[0]) + 2),
1824 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1825 offsetof(struct __sk_buff, cb[1])),
1826 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1827 offsetof(struct __sk_buff, cb[1]) + 2),
1828 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1829 offsetof(struct __sk_buff, cb[2])),
1830 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1831 offsetof(struct __sk_buff, cb[2]) + 2),
1832 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1833 offsetof(struct __sk_buff, cb[3])),
1834 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1835 offsetof(struct __sk_buff, cb[3]) + 2),
1836 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1837 offsetof(struct __sk_buff, cb[4])),
1838 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1839 offsetof(struct __sk_buff, cb[4]) + 2),
1840 BPF_EXIT_INSN(),
1841 },
1842 .result = ACCEPT,
1843 },
1844 {
1845 "check cb access: half, unaligned",
1846 .insns = {
1847 BPF_MOV64_IMM(BPF_REG_0, 0),
1848 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1849 offsetof(struct __sk_buff, cb[0]) + 1),
1850 BPF_EXIT_INSN(),
1851 },
Edward Creef65b1842017-08-07 15:27:12 +01001852 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001853 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001854 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001855 },
1856 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001857 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001858 .insns = {
1859 BPF_MOV64_IMM(BPF_REG_0, 0),
1860 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001861 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001862 BPF_EXIT_INSN(),
1863 },
1864 .errstr = "invalid bpf_context access",
1865 .result = REJECT,
1866 },
1867 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001868 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001869 .insns = {
1870 BPF_MOV64_IMM(BPF_REG_0, 0),
1871 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001872 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001873 BPF_EXIT_INSN(),
1874 },
1875 .errstr = "invalid bpf_context access",
1876 .result = REJECT,
1877 },
1878 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001879 "check skb->hash half load permitted",
1880 .insns = {
1881 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001882#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001883 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1884 offsetof(struct __sk_buff, hash)),
1885#else
1886 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1887 offsetof(struct __sk_buff, hash) + 2),
1888#endif
1889 BPF_EXIT_INSN(),
1890 },
1891 .result = ACCEPT,
1892 },
1893 {
1894 "check skb->hash half load not permitted",
1895 .insns = {
1896 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001897#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001898 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1899 offsetof(struct __sk_buff, hash) + 2),
1900#else
1901 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1902 offsetof(struct __sk_buff, hash)),
1903#endif
1904 BPF_EXIT_INSN(),
1905 },
1906 .errstr = "invalid bpf_context access",
1907 .result = REJECT,
1908 },
1909 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001910 "check cb access: half, wrong type",
1911 .insns = {
1912 BPF_MOV64_IMM(BPF_REG_0, 0),
1913 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1914 offsetof(struct __sk_buff, cb[0])),
1915 BPF_EXIT_INSN(),
1916 },
1917 .errstr = "invalid bpf_context access",
1918 .result = REJECT,
1919 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1920 },
1921 {
1922 "check cb access: word",
1923 .insns = {
1924 BPF_MOV64_IMM(BPF_REG_0, 0),
1925 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1926 offsetof(struct __sk_buff, cb[0])),
1927 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1928 offsetof(struct __sk_buff, cb[1])),
1929 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1930 offsetof(struct __sk_buff, cb[2])),
1931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1932 offsetof(struct __sk_buff, cb[3])),
1933 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1934 offsetof(struct __sk_buff, cb[4])),
1935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1936 offsetof(struct __sk_buff, cb[0])),
1937 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1938 offsetof(struct __sk_buff, cb[1])),
1939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1940 offsetof(struct __sk_buff, cb[2])),
1941 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1942 offsetof(struct __sk_buff, cb[3])),
1943 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1944 offsetof(struct __sk_buff, cb[4])),
1945 BPF_EXIT_INSN(),
1946 },
1947 .result = ACCEPT,
1948 },
1949 {
1950 "check cb access: word, unaligned 1",
1951 .insns = {
1952 BPF_MOV64_IMM(BPF_REG_0, 0),
1953 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1954 offsetof(struct __sk_buff, cb[0]) + 2),
1955 BPF_EXIT_INSN(),
1956 },
Edward Creef65b1842017-08-07 15:27:12 +01001957 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001958 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001959 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001960 },
1961 {
1962 "check cb access: word, unaligned 2",
1963 .insns = {
1964 BPF_MOV64_IMM(BPF_REG_0, 0),
1965 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1966 offsetof(struct __sk_buff, cb[4]) + 1),
1967 BPF_EXIT_INSN(),
1968 },
Edward Creef65b1842017-08-07 15:27:12 +01001969 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001970 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001971 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001972 },
1973 {
1974 "check cb access: word, unaligned 3",
1975 .insns = {
1976 BPF_MOV64_IMM(BPF_REG_0, 0),
1977 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1978 offsetof(struct __sk_buff, cb[4]) + 2),
1979 BPF_EXIT_INSN(),
1980 },
Edward Creef65b1842017-08-07 15:27:12 +01001981 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001982 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001983 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001984 },
1985 {
1986 "check cb access: word, unaligned 4",
1987 .insns = {
1988 BPF_MOV64_IMM(BPF_REG_0, 0),
1989 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1990 offsetof(struct __sk_buff, cb[4]) + 3),
1991 BPF_EXIT_INSN(),
1992 },
Edward Creef65b1842017-08-07 15:27:12 +01001993 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001994 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001995 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001996 },
1997 {
1998 "check cb access: double",
1999 .insns = {
2000 BPF_MOV64_IMM(BPF_REG_0, 0),
2001 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2002 offsetof(struct __sk_buff, cb[0])),
2003 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2004 offsetof(struct __sk_buff, cb[2])),
2005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2006 offsetof(struct __sk_buff, cb[0])),
2007 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2008 offsetof(struct __sk_buff, cb[2])),
2009 BPF_EXIT_INSN(),
2010 },
2011 .result = ACCEPT,
2012 },
2013 {
2014 "check cb access: double, unaligned 1",
2015 .insns = {
2016 BPF_MOV64_IMM(BPF_REG_0, 0),
2017 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2018 offsetof(struct __sk_buff, cb[1])),
2019 BPF_EXIT_INSN(),
2020 },
Edward Creef65b1842017-08-07 15:27:12 +01002021 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002022 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002023 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002024 },
2025 {
2026 "check cb access: double, unaligned 2",
2027 .insns = {
2028 BPF_MOV64_IMM(BPF_REG_0, 0),
2029 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2030 offsetof(struct __sk_buff, cb[3])),
2031 BPF_EXIT_INSN(),
2032 },
Edward Creef65b1842017-08-07 15:27:12 +01002033 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002034 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002035 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01002036 },
2037 {
2038 "check cb access: double, oob 1",
2039 .insns = {
2040 BPF_MOV64_IMM(BPF_REG_0, 0),
2041 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2042 offsetof(struct __sk_buff, cb[4])),
2043 BPF_EXIT_INSN(),
2044 },
2045 .errstr = "invalid bpf_context access",
2046 .result = REJECT,
2047 },
2048 {
2049 "check cb access: double, oob 2",
2050 .insns = {
2051 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002052 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2053 offsetof(struct __sk_buff, cb[4])),
2054 BPF_EXIT_INSN(),
2055 },
2056 .errstr = "invalid bpf_context access",
2057 .result = REJECT,
2058 },
2059 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002060 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002061 .insns = {
2062 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07002063 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2064 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002065 BPF_EXIT_INSN(),
2066 },
2067 .errstr = "invalid bpf_context access",
2068 .result = REJECT,
2069 },
2070 {
Yonghong Song31fd8582017-06-13 15:52:13 -07002071 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01002072 .insns = {
2073 BPF_MOV64_IMM(BPF_REG_0, 0),
2074 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07002075 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01002076 BPF_EXIT_INSN(),
2077 },
2078 .errstr = "invalid bpf_context access",
2079 .result = REJECT,
2080 },
2081 {
2082 "check cb access: double, wrong type",
2083 .insns = {
2084 BPF_MOV64_IMM(BPF_REG_0, 0),
2085 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2086 offsetof(struct __sk_buff, cb[0])),
2087 BPF_EXIT_INSN(),
2088 },
2089 .errstr = "invalid bpf_context access",
2090 .result = REJECT,
2091 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002092 },
2093 {
2094 "check out of range skb->cb access",
2095 .insns = {
2096 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002097 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002098 BPF_EXIT_INSN(),
2099 },
2100 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002101 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002102 .result = REJECT,
2103 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
2104 },
2105 {
2106 "write skb fields from socket prog",
2107 .insns = {
2108 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2109 offsetof(struct __sk_buff, cb[4])),
2110 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2111 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2112 offsetof(struct __sk_buff, mark)),
2113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2114 offsetof(struct __sk_buff, tc_index)),
2115 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2116 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2117 offsetof(struct __sk_buff, cb[0])),
2118 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2119 offsetof(struct __sk_buff, cb[2])),
2120 BPF_EXIT_INSN(),
2121 },
2122 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002123 .errstr_unpriv = "R1 leaks addr",
2124 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002125 },
2126 {
2127 "write skb fields from tc_cls_act prog",
2128 .insns = {
2129 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2130 offsetof(struct __sk_buff, cb[0])),
2131 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2132 offsetof(struct __sk_buff, mark)),
2133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2134 offsetof(struct __sk_buff, tc_index)),
2135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2136 offsetof(struct __sk_buff, tc_index)),
2137 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2138 offsetof(struct __sk_buff, cb[3])),
2139 BPF_EXIT_INSN(),
2140 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002141 .errstr_unpriv = "",
2142 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07002143 .result = ACCEPT,
2144 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2145 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002146 {
2147 "PTR_TO_STACK store/load",
2148 .insns = {
2149 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2151 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2152 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2153 BPF_EXIT_INSN(),
2154 },
2155 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002156 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002157 },
2158 {
2159 "PTR_TO_STACK store/load - bad alignment on off",
2160 .insns = {
2161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2163 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2164 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2165 BPF_EXIT_INSN(),
2166 },
2167 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002168 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002169 },
2170 {
2171 "PTR_TO_STACK store/load - bad alignment on reg",
2172 .insns = {
2173 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2175 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2176 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2177 BPF_EXIT_INSN(),
2178 },
2179 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002180 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07002181 },
2182 {
2183 "PTR_TO_STACK store/load - out of bounds low",
2184 .insns = {
2185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2187 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2188 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2189 BPF_EXIT_INSN(),
2190 },
2191 .result = REJECT,
2192 .errstr = "invalid stack off=-79992 size=8",
2193 },
2194 {
2195 "PTR_TO_STACK store/load - out of bounds high",
2196 .insns = {
2197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2199 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2200 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2201 BPF_EXIT_INSN(),
2202 },
2203 .result = REJECT,
2204 .errstr = "invalid stack off=0 size=8",
2205 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002206 {
2207 "unpriv: return pointer",
2208 .insns = {
2209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2210 BPF_EXIT_INSN(),
2211 },
2212 .result = ACCEPT,
2213 .result_unpriv = REJECT,
2214 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002215 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002216 },
2217 {
2218 "unpriv: add const to pointer",
2219 .insns = {
2220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2221 BPF_MOV64_IMM(BPF_REG_0, 0),
2222 BPF_EXIT_INSN(),
2223 },
2224 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002225 },
2226 {
2227 "unpriv: add pointer to pointer",
2228 .insns = {
2229 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2230 BPF_MOV64_IMM(BPF_REG_0, 0),
2231 BPF_EXIT_INSN(),
2232 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002233 .result = REJECT,
2234 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002235 },
2236 {
2237 "unpriv: neg pointer",
2238 .insns = {
2239 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2240 BPF_MOV64_IMM(BPF_REG_0, 0),
2241 BPF_EXIT_INSN(),
2242 },
2243 .result = ACCEPT,
2244 .result_unpriv = REJECT,
2245 .errstr_unpriv = "R1 pointer arithmetic",
2246 },
2247 {
2248 "unpriv: cmp pointer with const",
2249 .insns = {
2250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2251 BPF_MOV64_IMM(BPF_REG_0, 0),
2252 BPF_EXIT_INSN(),
2253 },
2254 .result = ACCEPT,
2255 .result_unpriv = REJECT,
2256 .errstr_unpriv = "R1 pointer comparison",
2257 },
2258 {
2259 "unpriv: cmp pointer with pointer",
2260 .insns = {
2261 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2262 BPF_MOV64_IMM(BPF_REG_0, 0),
2263 BPF_EXIT_INSN(),
2264 },
2265 .result = ACCEPT,
2266 .result_unpriv = REJECT,
2267 .errstr_unpriv = "R10 pointer comparison",
2268 },
2269 {
2270 "unpriv: check that printk is disallowed",
2271 .insns = {
2272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2275 BPF_MOV64_IMM(BPF_REG_2, 8),
2276 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2278 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002279 BPF_MOV64_IMM(BPF_REG_0, 0),
2280 BPF_EXIT_INSN(),
2281 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01002282 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002283 .result_unpriv = REJECT,
2284 .result = ACCEPT,
2285 },
2286 {
2287 "unpriv: pass pointer to helper function",
2288 .insns = {
2289 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2292 BPF_LD_MAP_FD(BPF_REG_1, 0),
2293 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2294 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002295 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2296 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002297 BPF_MOV64_IMM(BPF_REG_0, 0),
2298 BPF_EXIT_INSN(),
2299 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002300 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002301 .errstr_unpriv = "R4 leaks addr",
2302 .result_unpriv = REJECT,
2303 .result = ACCEPT,
2304 },
2305 {
2306 "unpriv: indirectly pass pointer on stack to helper function",
2307 .insns = {
2308 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2311 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2313 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002314 BPF_MOV64_IMM(BPF_REG_0, 0),
2315 BPF_EXIT_INSN(),
2316 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002317 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002318 .errstr = "invalid indirect read from stack off -8+0 size 8",
2319 .result = REJECT,
2320 },
2321 {
2322 "unpriv: mangle pointer on stack 1",
2323 .insns = {
2324 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2325 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2326 BPF_MOV64_IMM(BPF_REG_0, 0),
2327 BPF_EXIT_INSN(),
2328 },
2329 .errstr_unpriv = "attempt to corrupt spilled",
2330 .result_unpriv = REJECT,
2331 .result = ACCEPT,
2332 },
2333 {
2334 "unpriv: mangle pointer on stack 2",
2335 .insns = {
2336 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2337 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2338 BPF_MOV64_IMM(BPF_REG_0, 0),
2339 BPF_EXIT_INSN(),
2340 },
2341 .errstr_unpriv = "attempt to corrupt spilled",
2342 .result_unpriv = REJECT,
2343 .result = ACCEPT,
2344 },
2345 {
2346 "unpriv: read pointer from stack in small chunks",
2347 .insns = {
2348 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2349 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2350 BPF_MOV64_IMM(BPF_REG_0, 0),
2351 BPF_EXIT_INSN(),
2352 },
2353 .errstr = "invalid size",
2354 .result = REJECT,
2355 },
2356 {
2357 "unpriv: write pointer into ctx",
2358 .insns = {
2359 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2360 BPF_MOV64_IMM(BPF_REG_0, 0),
2361 BPF_EXIT_INSN(),
2362 },
2363 .errstr_unpriv = "R1 leaks addr",
2364 .result_unpriv = REJECT,
2365 .errstr = "invalid bpf_context access",
2366 .result = REJECT,
2367 },
2368 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002369 "unpriv: spill/fill of ctx",
2370 .insns = {
2371 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2373 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2374 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2375 BPF_MOV64_IMM(BPF_REG_0, 0),
2376 BPF_EXIT_INSN(),
2377 },
2378 .result = ACCEPT,
2379 },
2380 {
2381 "unpriv: spill/fill of ctx 2",
2382 .insns = {
2383 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2384 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2385 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2386 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2388 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002389 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002390 BPF_EXIT_INSN(),
2391 },
2392 .result = ACCEPT,
2393 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2394 },
2395 {
2396 "unpriv: spill/fill of ctx 3",
2397 .insns = {
2398 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2400 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2401 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2402 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2404 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002405 BPF_EXIT_INSN(),
2406 },
2407 .result = REJECT,
2408 .errstr = "R1 type=fp expected=ctx",
2409 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2410 },
2411 {
2412 "unpriv: spill/fill of ctx 4",
2413 .insns = {
2414 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2416 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2417 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002418 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2419 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002420 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2422 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002423 BPF_EXIT_INSN(),
2424 },
2425 .result = REJECT,
2426 .errstr = "R1 type=inv expected=ctx",
2427 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2428 },
2429 {
2430 "unpriv: spill/fill of different pointers stx",
2431 .insns = {
2432 BPF_MOV64_IMM(BPF_REG_3, 42),
2433 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2435 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2436 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2439 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2440 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2441 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2442 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2443 offsetof(struct __sk_buff, mark)),
2444 BPF_MOV64_IMM(BPF_REG_0, 0),
2445 BPF_EXIT_INSN(),
2446 },
2447 .result = REJECT,
2448 .errstr = "same insn cannot be used with different pointers",
2449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2450 },
2451 {
2452 "unpriv: spill/fill of different pointers ldx",
2453 .insns = {
2454 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2457 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2459 -(__s32)offsetof(struct bpf_perf_event_data,
2460 sample_period) - 8),
2461 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2462 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2463 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2464 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2465 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2466 offsetof(struct bpf_perf_event_data,
2467 sample_period)),
2468 BPF_MOV64_IMM(BPF_REG_0, 0),
2469 BPF_EXIT_INSN(),
2470 },
2471 .result = REJECT,
2472 .errstr = "same insn cannot be used with different pointers",
2473 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2474 },
2475 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002476 "unpriv: write pointer into map elem value",
2477 .insns = {
2478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2481 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2483 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2485 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2486 BPF_EXIT_INSN(),
2487 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002488 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002489 .errstr_unpriv = "R0 leaks addr",
2490 .result_unpriv = REJECT,
2491 .result = ACCEPT,
2492 },
2493 {
2494 "unpriv: partial copy of pointer",
2495 .insns = {
2496 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2497 BPF_MOV64_IMM(BPF_REG_0, 0),
2498 BPF_EXIT_INSN(),
2499 },
2500 .errstr_unpriv = "R10 partial copy",
2501 .result_unpriv = REJECT,
2502 .result = ACCEPT,
2503 },
2504 {
2505 "unpriv: pass pointer to tail_call",
2506 .insns = {
2507 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2508 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2510 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002511 BPF_MOV64_IMM(BPF_REG_0, 0),
2512 BPF_EXIT_INSN(),
2513 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002514 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002515 .errstr_unpriv = "R3 leaks addr into helper",
2516 .result_unpriv = REJECT,
2517 .result = ACCEPT,
2518 },
2519 {
2520 "unpriv: cmp map pointer with zero",
2521 .insns = {
2522 BPF_MOV64_IMM(BPF_REG_1, 0),
2523 BPF_LD_MAP_FD(BPF_REG_1, 0),
2524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2525 BPF_MOV64_IMM(BPF_REG_0, 0),
2526 BPF_EXIT_INSN(),
2527 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002528 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002529 .errstr_unpriv = "R1 pointer comparison",
2530 .result_unpriv = REJECT,
2531 .result = ACCEPT,
2532 },
2533 {
2534 "unpriv: write into frame pointer",
2535 .insns = {
2536 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2537 BPF_MOV64_IMM(BPF_REG_0, 0),
2538 BPF_EXIT_INSN(),
2539 },
2540 .errstr = "frame pointer is read only",
2541 .result = REJECT,
2542 },
2543 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002544 "unpriv: spill/fill frame pointer",
2545 .insns = {
2546 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2548 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2549 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2550 BPF_MOV64_IMM(BPF_REG_0, 0),
2551 BPF_EXIT_INSN(),
2552 },
2553 .errstr = "frame pointer is read only",
2554 .result = REJECT,
2555 },
2556 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002557 "unpriv: cmp of frame pointer",
2558 .insns = {
2559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2560 BPF_MOV64_IMM(BPF_REG_0, 0),
2561 BPF_EXIT_INSN(),
2562 },
2563 .errstr_unpriv = "R10 pointer comparison",
2564 .result_unpriv = REJECT,
2565 .result = ACCEPT,
2566 },
2567 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002568 "unpriv: adding of fp",
2569 .insns = {
2570 BPF_MOV64_IMM(BPF_REG_0, 0),
2571 BPF_MOV64_IMM(BPF_REG_1, 0),
2572 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2573 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2574 BPF_EXIT_INSN(),
2575 },
Edward Creef65b1842017-08-07 15:27:12 +01002576 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002577 },
2578 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002579 "unpriv: cmp of stack pointer",
2580 .insns = {
2581 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2583 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2584 BPF_MOV64_IMM(BPF_REG_0, 0),
2585 BPF_EXIT_INSN(),
2586 },
2587 .errstr_unpriv = "R2 pointer comparison",
2588 .result_unpriv = REJECT,
2589 .result = ACCEPT,
2590 },
2591 {
Daniel Borkmann16338a92018-02-23 01:03:43 +01002592 "runtime/jit: pass negative index to tail_call",
2593 .insns = {
2594 BPF_MOV64_IMM(BPF_REG_3, -1),
2595 BPF_LD_MAP_FD(BPF_REG_2, 0),
2596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2597 BPF_FUNC_tail_call),
2598 BPF_MOV64_IMM(BPF_REG_0, 0),
2599 BPF_EXIT_INSN(),
2600 },
2601 .fixup_prog = { 1 },
2602 .result = ACCEPT,
2603 },
2604 {
2605 "runtime/jit: pass > 32bit index to tail_call",
2606 .insns = {
2607 BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
2608 BPF_LD_MAP_FD(BPF_REG_2, 0),
2609 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2610 BPF_FUNC_tail_call),
2611 BPF_MOV64_IMM(BPF_REG_0, 0),
2612 BPF_EXIT_INSN(),
2613 },
2614 .fixup_prog = { 2 },
2615 .result = ACCEPT,
2616 },
2617 {
Yonghong Song332270f2017-04-29 22:52:42 -07002618 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002619 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002620 BPF_MOV64_IMM(BPF_REG_1, 4),
2621 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2622 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2626 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2627 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2630 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002631 BPF_MOV64_IMM(BPF_REG_0, 0),
2632 BPF_EXIT_INSN(),
2633 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002634 .result = ACCEPT,
2635 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002636 {
2637 "raw_stack: no skb_load_bytes",
2638 .insns = {
2639 BPF_MOV64_IMM(BPF_REG_2, 4),
2640 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2642 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2643 BPF_MOV64_IMM(BPF_REG_4, 8),
2644 /* Call to skb_load_bytes() omitted. */
2645 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2646 BPF_EXIT_INSN(),
2647 },
2648 .result = REJECT,
2649 .errstr = "invalid read from stack off -8+0 size 8",
2650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2651 },
2652 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002653 "raw_stack: skb_load_bytes, negative len",
2654 .insns = {
2655 BPF_MOV64_IMM(BPF_REG_2, 4),
2656 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2658 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2659 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2661 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002662 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2663 BPF_EXIT_INSN(),
2664 },
2665 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002666 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002667 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2668 },
2669 {
2670 "raw_stack: skb_load_bytes, negative len 2",
2671 .insns = {
2672 BPF_MOV64_IMM(BPF_REG_2, 4),
2673 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2675 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2676 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2678 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002679 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2680 BPF_EXIT_INSN(),
2681 },
2682 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002683 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002684 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2685 },
2686 {
2687 "raw_stack: skb_load_bytes, zero len",
2688 .insns = {
2689 BPF_MOV64_IMM(BPF_REG_2, 4),
2690 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2692 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2693 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002694 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2695 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002696 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2697 BPF_EXIT_INSN(),
2698 },
2699 .result = REJECT,
2700 .errstr = "invalid stack type R3",
2701 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2702 },
2703 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002704 "raw_stack: skb_load_bytes, no init",
2705 .insns = {
2706 BPF_MOV64_IMM(BPF_REG_2, 4),
2707 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2709 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2710 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2712 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002713 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2714 BPF_EXIT_INSN(),
2715 },
2716 .result = ACCEPT,
2717 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2718 },
2719 {
2720 "raw_stack: skb_load_bytes, init",
2721 .insns = {
2722 BPF_MOV64_IMM(BPF_REG_2, 4),
2723 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2725 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2726 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2727 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2729 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002730 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2731 BPF_EXIT_INSN(),
2732 },
2733 .result = ACCEPT,
2734 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2735 },
2736 {
2737 "raw_stack: skb_load_bytes, spilled regs around bounds",
2738 .insns = {
2739 BPF_MOV64_IMM(BPF_REG_2, 4),
2740 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002742 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2743 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002744 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2745 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2747 BPF_FUNC_skb_load_bytes),
2748 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2749 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002750 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2751 offsetof(struct __sk_buff, mark)),
2752 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2753 offsetof(struct __sk_buff, priority)),
2754 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2755 BPF_EXIT_INSN(),
2756 },
2757 .result = ACCEPT,
2758 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2759 },
2760 {
2761 "raw_stack: skb_load_bytes, spilled regs corruption",
2762 .insns = {
2763 BPF_MOV64_IMM(BPF_REG_2, 4),
2764 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002766 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002767 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2768 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2770 BPF_FUNC_skb_load_bytes),
2771 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002772 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2773 offsetof(struct __sk_buff, mark)),
2774 BPF_EXIT_INSN(),
2775 },
2776 .result = REJECT,
2777 .errstr = "R0 invalid mem access 'inv'",
2778 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2779 },
2780 {
2781 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2782 .insns = {
2783 BPF_MOV64_IMM(BPF_REG_2, 4),
2784 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002786 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2787 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2788 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002789 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2790 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002791 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2792 BPF_FUNC_skb_load_bytes),
2793 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2794 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2795 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2797 offsetof(struct __sk_buff, mark)),
2798 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2799 offsetof(struct __sk_buff, priority)),
2800 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2802 offsetof(struct __sk_buff, pkt_type)),
2803 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2804 BPF_EXIT_INSN(),
2805 },
2806 .result = REJECT,
2807 .errstr = "R3 invalid mem access 'inv'",
2808 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2809 },
2810 {
2811 "raw_stack: skb_load_bytes, spilled regs + data",
2812 .insns = {
2813 BPF_MOV64_IMM(BPF_REG_2, 4),
2814 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002816 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2817 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2818 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002819 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2820 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002821 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2822 BPF_FUNC_skb_load_bytes),
2823 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2824 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2825 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002826 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2827 offsetof(struct __sk_buff, mark)),
2828 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2829 offsetof(struct __sk_buff, priority)),
2830 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2831 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2832 BPF_EXIT_INSN(),
2833 },
2834 .result = ACCEPT,
2835 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2836 },
2837 {
2838 "raw_stack: skb_load_bytes, invalid access 1",
2839 .insns = {
2840 BPF_MOV64_IMM(BPF_REG_2, 4),
2841 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2843 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2844 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2846 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002847 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2848 BPF_EXIT_INSN(),
2849 },
2850 .result = REJECT,
2851 .errstr = "invalid stack type R3 off=-513 access_size=8",
2852 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2853 },
2854 {
2855 "raw_stack: skb_load_bytes, invalid access 2",
2856 .insns = {
2857 BPF_MOV64_IMM(BPF_REG_2, 4),
2858 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2860 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2861 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2863 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002864 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2865 BPF_EXIT_INSN(),
2866 },
2867 .result = REJECT,
2868 .errstr = "invalid stack type R3 off=-1 access_size=8",
2869 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2870 },
2871 {
2872 "raw_stack: skb_load_bytes, invalid access 3",
2873 .insns = {
2874 BPF_MOV64_IMM(BPF_REG_2, 4),
2875 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2877 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2878 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2880 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002881 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2882 BPF_EXIT_INSN(),
2883 },
2884 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002885 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002886 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2887 },
2888 {
2889 "raw_stack: skb_load_bytes, invalid access 4",
2890 .insns = {
2891 BPF_MOV64_IMM(BPF_REG_2, 4),
2892 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2894 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2895 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2897 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002898 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2899 BPF_EXIT_INSN(),
2900 },
2901 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002902 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002903 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2904 },
2905 {
2906 "raw_stack: skb_load_bytes, invalid access 5",
2907 .insns = {
2908 BPF_MOV64_IMM(BPF_REG_2, 4),
2909 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2911 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2912 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2914 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002915 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2916 BPF_EXIT_INSN(),
2917 },
2918 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002919 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002920 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2921 },
2922 {
2923 "raw_stack: skb_load_bytes, invalid access 6",
2924 .insns = {
2925 BPF_MOV64_IMM(BPF_REG_2, 4),
2926 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2928 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2929 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002930 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2931 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002932 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2933 BPF_EXIT_INSN(),
2934 },
2935 .result = REJECT,
2936 .errstr = "invalid stack type R3 off=-512 access_size=0",
2937 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2938 },
2939 {
2940 "raw_stack: skb_load_bytes, large access",
2941 .insns = {
2942 BPF_MOV64_IMM(BPF_REG_2, 4),
2943 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2945 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2946 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2948 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002949 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2950 BPF_EXIT_INSN(),
2951 },
2952 .result = ACCEPT,
2953 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2954 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002955 {
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01002956 "context stores via ST",
2957 .insns = {
2958 BPF_MOV64_IMM(BPF_REG_0, 0),
2959 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
2960 BPF_EXIT_INSN(),
2961 },
2962 .errstr = "BPF_ST stores into R1 context is not allowed",
2963 .result = REJECT,
2964 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2965 },
2966 {
2967 "context stores via XADD",
2968 .insns = {
2969 BPF_MOV64_IMM(BPF_REG_0, 0),
2970 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
2971 BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
2972 BPF_EXIT_INSN(),
2973 },
2974 .errstr = "BPF_XADD stores into R1 context is not allowed",
2975 .result = REJECT,
2976 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2977 },
2978 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002979 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002980 .insns = {
2981 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2982 offsetof(struct __sk_buff, data)),
2983 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2984 offsetof(struct __sk_buff, data_end)),
2985 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2987 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2988 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2989 BPF_MOV64_IMM(BPF_REG_0, 0),
2990 BPF_EXIT_INSN(),
2991 },
2992 .result = ACCEPT,
2993 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2994 },
2995 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002996 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002997 .insns = {
2998 BPF_MOV64_IMM(BPF_REG_0, 1),
2999 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3000 offsetof(struct __sk_buff, data_end)),
3001 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3002 offsetof(struct __sk_buff, data)),
3003 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3005 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3006 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3007 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3008 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3009 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3010 offsetof(struct __sk_buff, data)),
3011 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003012 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3013 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01003014 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3015 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003016 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3019 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3020 offsetof(struct __sk_buff, data_end)),
3021 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3022 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3023 BPF_MOV64_IMM(BPF_REG_0, 0),
3024 BPF_EXIT_INSN(),
3025 },
3026 .result = ACCEPT,
3027 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3028 },
3029 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003030 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003031 .insns = {
3032 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3033 offsetof(struct __sk_buff, data)),
3034 BPF_MOV64_IMM(BPF_REG_0, 0),
3035 BPF_EXIT_INSN(),
3036 },
3037 .errstr = "invalid bpf_context access off=76",
3038 .result = REJECT,
3039 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3040 },
3041 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003042 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003043 .insns = {
3044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3045 offsetof(struct __sk_buff, data)),
3046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3047 offsetof(struct __sk_buff, data_end)),
3048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3050 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3051 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3052 BPF_MOV64_IMM(BPF_REG_0, 0),
3053 BPF_EXIT_INSN(),
3054 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003055 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07003056 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3057 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003058 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02003059 "direct packet access: test5 (pkt_end >= reg, good access)",
3060 .insns = {
3061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3062 offsetof(struct __sk_buff, data)),
3063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3064 offsetof(struct __sk_buff, data_end)),
3065 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3067 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3068 BPF_MOV64_IMM(BPF_REG_0, 1),
3069 BPF_EXIT_INSN(),
3070 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3071 BPF_MOV64_IMM(BPF_REG_0, 0),
3072 BPF_EXIT_INSN(),
3073 },
3074 .result = ACCEPT,
3075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3076 },
3077 {
3078 "direct packet access: test6 (pkt_end >= reg, bad access)",
3079 .insns = {
3080 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3081 offsetof(struct __sk_buff, data)),
3082 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3083 offsetof(struct __sk_buff, data_end)),
3084 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3086 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3087 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3088 BPF_MOV64_IMM(BPF_REG_0, 1),
3089 BPF_EXIT_INSN(),
3090 BPF_MOV64_IMM(BPF_REG_0, 0),
3091 BPF_EXIT_INSN(),
3092 },
3093 .errstr = "invalid access to packet",
3094 .result = REJECT,
3095 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3096 },
3097 {
3098 "direct packet access: test7 (pkt_end >= reg, both accesses)",
3099 .insns = {
3100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3101 offsetof(struct __sk_buff, data)),
3102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3103 offsetof(struct __sk_buff, data_end)),
3104 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3106 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3107 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3108 BPF_MOV64_IMM(BPF_REG_0, 1),
3109 BPF_EXIT_INSN(),
3110 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3111 BPF_MOV64_IMM(BPF_REG_0, 0),
3112 BPF_EXIT_INSN(),
3113 },
3114 .errstr = "invalid access to packet",
3115 .result = REJECT,
3116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3117 },
3118 {
3119 "direct packet access: test8 (double test, variant 1)",
3120 .insns = {
3121 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3122 offsetof(struct __sk_buff, data)),
3123 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3124 offsetof(struct __sk_buff, data_end)),
3125 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3127 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3128 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3129 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3130 BPF_MOV64_IMM(BPF_REG_0, 1),
3131 BPF_EXIT_INSN(),
3132 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3133 BPF_MOV64_IMM(BPF_REG_0, 0),
3134 BPF_EXIT_INSN(),
3135 },
3136 .result = ACCEPT,
3137 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3138 },
3139 {
3140 "direct packet access: test9 (double test, variant 2)",
3141 .insns = {
3142 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3143 offsetof(struct __sk_buff, data)),
3144 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3145 offsetof(struct __sk_buff, data_end)),
3146 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3148 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3149 BPF_MOV64_IMM(BPF_REG_0, 1),
3150 BPF_EXIT_INSN(),
3151 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3152 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3153 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3154 BPF_MOV64_IMM(BPF_REG_0, 0),
3155 BPF_EXIT_INSN(),
3156 },
3157 .result = ACCEPT,
3158 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3159 },
3160 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003161 "direct packet access: test10 (write invalid)",
3162 .insns = {
3163 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3164 offsetof(struct __sk_buff, data)),
3165 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3166 offsetof(struct __sk_buff, data_end)),
3167 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3169 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3170 BPF_MOV64_IMM(BPF_REG_0, 0),
3171 BPF_EXIT_INSN(),
3172 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3173 BPF_MOV64_IMM(BPF_REG_0, 0),
3174 BPF_EXIT_INSN(),
3175 },
3176 .errstr = "invalid access to packet",
3177 .result = REJECT,
3178 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3179 },
3180 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003181 "direct packet access: test11 (shift, good access)",
3182 .insns = {
3183 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3184 offsetof(struct __sk_buff, data)),
3185 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3186 offsetof(struct __sk_buff, data_end)),
3187 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3189 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3190 BPF_MOV64_IMM(BPF_REG_3, 144),
3191 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3193 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3194 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3195 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3196 BPF_MOV64_IMM(BPF_REG_0, 1),
3197 BPF_EXIT_INSN(),
3198 BPF_MOV64_IMM(BPF_REG_0, 0),
3199 BPF_EXIT_INSN(),
3200 },
3201 .result = ACCEPT,
3202 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003203 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003204 },
3205 {
3206 "direct packet access: test12 (and, good access)",
3207 .insns = {
3208 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3209 offsetof(struct __sk_buff, data)),
3210 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3211 offsetof(struct __sk_buff, data_end)),
3212 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3214 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3215 BPF_MOV64_IMM(BPF_REG_3, 144),
3216 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3218 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3219 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3220 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3221 BPF_MOV64_IMM(BPF_REG_0, 1),
3222 BPF_EXIT_INSN(),
3223 BPF_MOV64_IMM(BPF_REG_0, 0),
3224 BPF_EXIT_INSN(),
3225 },
3226 .result = ACCEPT,
3227 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003228 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003229 },
3230 {
3231 "direct packet access: test13 (branches, good access)",
3232 .insns = {
3233 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3234 offsetof(struct __sk_buff, data)),
3235 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3236 offsetof(struct __sk_buff, data_end)),
3237 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3239 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3240 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3241 offsetof(struct __sk_buff, mark)),
3242 BPF_MOV64_IMM(BPF_REG_4, 1),
3243 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3244 BPF_MOV64_IMM(BPF_REG_3, 14),
3245 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3246 BPF_MOV64_IMM(BPF_REG_3, 24),
3247 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3249 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3250 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3251 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3252 BPF_MOV64_IMM(BPF_REG_0, 1),
3253 BPF_EXIT_INSN(),
3254 BPF_MOV64_IMM(BPF_REG_0, 0),
3255 BPF_EXIT_INSN(),
3256 },
3257 .result = ACCEPT,
3258 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003259 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01003260 },
3261 {
William Tu63dfef72017-02-04 08:37:29 -08003262 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3263 .insns = {
3264 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3265 offsetof(struct __sk_buff, data)),
3266 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3267 offsetof(struct __sk_buff, data_end)),
3268 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3270 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3271 BPF_MOV64_IMM(BPF_REG_5, 12),
3272 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3273 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3274 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3275 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3276 BPF_MOV64_IMM(BPF_REG_0, 1),
3277 BPF_EXIT_INSN(),
3278 BPF_MOV64_IMM(BPF_REG_0, 0),
3279 BPF_EXIT_INSN(),
3280 },
3281 .result = ACCEPT,
3282 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003283 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08003284 },
3285 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003286 "direct packet access: test15 (spill with xadd)",
3287 .insns = {
3288 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3289 offsetof(struct __sk_buff, data)),
3290 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3291 offsetof(struct __sk_buff, data_end)),
3292 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3294 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3295 BPF_MOV64_IMM(BPF_REG_5, 4096),
3296 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3298 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3299 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3300 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3301 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3302 BPF_MOV64_IMM(BPF_REG_0, 0),
3303 BPF_EXIT_INSN(),
3304 },
3305 .errstr = "R2 invalid mem access 'inv'",
3306 .result = REJECT,
3307 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3308 },
3309 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02003310 "direct packet access: test16 (arith on data_end)",
3311 .insns = {
3312 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3313 offsetof(struct __sk_buff, data)),
3314 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3315 offsetof(struct __sk_buff, data_end)),
3316 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3319 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3320 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3321 BPF_MOV64_IMM(BPF_REG_0, 0),
3322 BPF_EXIT_INSN(),
3323 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003324 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02003325 .result = REJECT,
3326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3327 },
3328 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003329 "direct packet access: test17 (pruning, alignment)",
3330 .insns = {
3331 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3332 offsetof(struct __sk_buff, data)),
3333 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3334 offsetof(struct __sk_buff, data_end)),
3335 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3336 offsetof(struct __sk_buff, mark)),
3337 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3339 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3340 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3341 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3342 BPF_MOV64_IMM(BPF_REG_0, 0),
3343 BPF_EXIT_INSN(),
3344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3345 BPF_JMP_A(-6),
3346 },
Edward Creef65b1842017-08-07 15:27:12 +01003347 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003348 .result = REJECT,
3349 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3350 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3351 },
3352 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003353 "direct packet access: test18 (imm += pkt_ptr, 1)",
3354 .insns = {
3355 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3356 offsetof(struct __sk_buff, data)),
3357 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3358 offsetof(struct __sk_buff, data_end)),
3359 BPF_MOV64_IMM(BPF_REG_0, 8),
3360 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3361 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3362 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3363 BPF_MOV64_IMM(BPF_REG_0, 0),
3364 BPF_EXIT_INSN(),
3365 },
3366 .result = ACCEPT,
3367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3368 },
3369 {
3370 "direct packet access: test19 (imm += pkt_ptr, 2)",
3371 .insns = {
3372 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3373 offsetof(struct __sk_buff, data)),
3374 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3375 offsetof(struct __sk_buff, data_end)),
3376 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3378 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3379 BPF_MOV64_IMM(BPF_REG_4, 4),
3380 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3381 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3382 BPF_MOV64_IMM(BPF_REG_0, 0),
3383 BPF_EXIT_INSN(),
3384 },
3385 .result = ACCEPT,
3386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3387 },
3388 {
3389 "direct packet access: test20 (x += pkt_ptr, 1)",
3390 .insns = {
3391 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3392 offsetof(struct __sk_buff, data)),
3393 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3394 offsetof(struct __sk_buff, data_end)),
3395 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3397 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003398 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003399 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3400 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3401 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003403 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3404 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3405 BPF_MOV64_IMM(BPF_REG_0, 0),
3406 BPF_EXIT_INSN(),
3407 },
3408 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3409 .result = ACCEPT,
3410 },
3411 {
3412 "direct packet access: test21 (x += pkt_ptr, 2)",
3413 .insns = {
3414 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3415 offsetof(struct __sk_buff, data)),
3416 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3417 offsetof(struct __sk_buff, data_end)),
3418 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3420 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3421 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3422 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3423 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003424 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003425 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3426 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003428 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3429 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3430 BPF_MOV64_IMM(BPF_REG_0, 0),
3431 BPF_EXIT_INSN(),
3432 },
3433 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3434 .result = ACCEPT,
3435 },
3436 {
3437 "direct packet access: test22 (x += pkt_ptr, 3)",
3438 .insns = {
3439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3440 offsetof(struct __sk_buff, data)),
3441 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3442 offsetof(struct __sk_buff, data_end)),
3443 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3445 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3446 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3447 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3448 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3449 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3450 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3451 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3452 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003453 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003454 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3455 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3457 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3458 BPF_MOV64_IMM(BPF_REG_2, 1),
3459 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3460 BPF_MOV64_IMM(BPF_REG_0, 0),
3461 BPF_EXIT_INSN(),
3462 },
3463 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3464 .result = ACCEPT,
3465 },
3466 {
3467 "direct packet access: test23 (x += pkt_ptr, 4)",
3468 .insns = {
3469 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3470 offsetof(struct __sk_buff, data)),
3471 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3472 offsetof(struct __sk_buff, data_end)),
3473 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3474 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3475 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3476 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3477 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3478 BPF_MOV64_IMM(BPF_REG_0, 31),
3479 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3480 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3481 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3483 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3484 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3485 BPF_MOV64_IMM(BPF_REG_0, 0),
3486 BPF_EXIT_INSN(),
3487 },
3488 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3489 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003490 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003491 },
3492 {
3493 "direct packet access: test24 (x += pkt_ptr, 5)",
3494 .insns = {
3495 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3496 offsetof(struct __sk_buff, data)),
3497 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3498 offsetof(struct __sk_buff, data_end)),
3499 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3502 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3503 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3504 BPF_MOV64_IMM(BPF_REG_0, 64),
3505 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3506 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3507 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003509 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3510 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3511 BPF_MOV64_IMM(BPF_REG_0, 0),
3512 BPF_EXIT_INSN(),
3513 },
3514 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3515 .result = ACCEPT,
3516 },
3517 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003518 "direct packet access: test25 (marking on <, good access)",
3519 .insns = {
3520 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3521 offsetof(struct __sk_buff, data)),
3522 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3523 offsetof(struct __sk_buff, data_end)),
3524 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3526 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3527 BPF_MOV64_IMM(BPF_REG_0, 0),
3528 BPF_EXIT_INSN(),
3529 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3530 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3531 },
3532 .result = ACCEPT,
3533 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3534 },
3535 {
3536 "direct packet access: test26 (marking on <, bad access)",
3537 .insns = {
3538 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3539 offsetof(struct __sk_buff, data)),
3540 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3541 offsetof(struct __sk_buff, data_end)),
3542 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3544 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3545 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3546 BPF_MOV64_IMM(BPF_REG_0, 0),
3547 BPF_EXIT_INSN(),
3548 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3549 },
3550 .result = REJECT,
3551 .errstr = "invalid access to packet",
3552 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3553 },
3554 {
3555 "direct packet access: test27 (marking on <=, good access)",
3556 .insns = {
3557 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3558 offsetof(struct __sk_buff, data)),
3559 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3560 offsetof(struct __sk_buff, data_end)),
3561 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3563 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3564 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3565 BPF_MOV64_IMM(BPF_REG_0, 1),
3566 BPF_EXIT_INSN(),
3567 },
3568 .result = ACCEPT,
3569 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003570 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003571 },
3572 {
3573 "direct packet access: test28 (marking on <=, bad access)",
3574 .insns = {
3575 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3576 offsetof(struct __sk_buff, data)),
3577 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3578 offsetof(struct __sk_buff, data_end)),
3579 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3581 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3582 BPF_MOV64_IMM(BPF_REG_0, 1),
3583 BPF_EXIT_INSN(),
3584 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3585 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3586 },
3587 .result = REJECT,
3588 .errstr = "invalid access to packet",
3589 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3590 },
3591 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003592 "helper access to packet: test1, valid packet_ptr range",
3593 .insns = {
3594 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3595 offsetof(struct xdp_md, data)),
3596 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3597 offsetof(struct xdp_md, data_end)),
3598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3600 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3601 BPF_LD_MAP_FD(BPF_REG_1, 0),
3602 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3603 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003604 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3605 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003606 BPF_MOV64_IMM(BPF_REG_0, 0),
3607 BPF_EXIT_INSN(),
3608 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003609 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003610 .result_unpriv = ACCEPT,
3611 .result = ACCEPT,
3612 .prog_type = BPF_PROG_TYPE_XDP,
3613 },
3614 {
3615 "helper access to packet: test2, unchecked packet_ptr",
3616 .insns = {
3617 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3618 offsetof(struct xdp_md, data)),
3619 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3621 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003622 BPF_MOV64_IMM(BPF_REG_0, 0),
3623 BPF_EXIT_INSN(),
3624 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003625 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003626 .result = REJECT,
3627 .errstr = "invalid access to packet",
3628 .prog_type = BPF_PROG_TYPE_XDP,
3629 },
3630 {
3631 "helper access to packet: test3, variable add",
3632 .insns = {
3633 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3634 offsetof(struct xdp_md, data)),
3635 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3636 offsetof(struct xdp_md, data_end)),
3637 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3639 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3640 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3641 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3642 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3643 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3645 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3646 BPF_LD_MAP_FD(BPF_REG_1, 0),
3647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3649 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003650 BPF_MOV64_IMM(BPF_REG_0, 0),
3651 BPF_EXIT_INSN(),
3652 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003653 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003654 .result = ACCEPT,
3655 .prog_type = BPF_PROG_TYPE_XDP,
3656 },
3657 {
3658 "helper access to packet: test4, packet_ptr with bad range",
3659 .insns = {
3660 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3661 offsetof(struct xdp_md, data)),
3662 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3663 offsetof(struct xdp_md, data_end)),
3664 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3666 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3667 BPF_MOV64_IMM(BPF_REG_0, 0),
3668 BPF_EXIT_INSN(),
3669 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003670 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3671 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003672 BPF_MOV64_IMM(BPF_REG_0, 0),
3673 BPF_EXIT_INSN(),
3674 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003675 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003676 .result = REJECT,
3677 .errstr = "invalid access to packet",
3678 .prog_type = BPF_PROG_TYPE_XDP,
3679 },
3680 {
3681 "helper access to packet: test5, packet_ptr with too short range",
3682 .insns = {
3683 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3684 offsetof(struct xdp_md, data)),
3685 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3686 offsetof(struct xdp_md, data_end)),
3687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3688 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3690 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3691 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3693 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003694 BPF_MOV64_IMM(BPF_REG_0, 0),
3695 BPF_EXIT_INSN(),
3696 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003697 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003698 .result = REJECT,
3699 .errstr = "invalid access to packet",
3700 .prog_type = BPF_PROG_TYPE_XDP,
3701 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003702 {
3703 "helper access to packet: test6, cls valid packet_ptr range",
3704 .insns = {
3705 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3706 offsetof(struct __sk_buff, data)),
3707 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3708 offsetof(struct __sk_buff, data_end)),
3709 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3711 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3712 BPF_LD_MAP_FD(BPF_REG_1, 0),
3713 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3714 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003715 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3716 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003717 BPF_MOV64_IMM(BPF_REG_0, 0),
3718 BPF_EXIT_INSN(),
3719 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003720 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003721 .result = ACCEPT,
3722 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3723 },
3724 {
3725 "helper access to packet: test7, cls unchecked packet_ptr",
3726 .insns = {
3727 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3728 offsetof(struct __sk_buff, data)),
3729 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3731 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003732 BPF_MOV64_IMM(BPF_REG_0, 0),
3733 BPF_EXIT_INSN(),
3734 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003735 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003736 .result = REJECT,
3737 .errstr = "invalid access to packet",
3738 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3739 },
3740 {
3741 "helper access to packet: test8, cls variable add",
3742 .insns = {
3743 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3744 offsetof(struct __sk_buff, data)),
3745 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3746 offsetof(struct __sk_buff, data_end)),
3747 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3749 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3750 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3751 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3752 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3753 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3755 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3756 BPF_LD_MAP_FD(BPF_REG_1, 0),
3757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3759 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003760 BPF_MOV64_IMM(BPF_REG_0, 0),
3761 BPF_EXIT_INSN(),
3762 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003763 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003764 .result = ACCEPT,
3765 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3766 },
3767 {
3768 "helper access to packet: test9, cls packet_ptr with bad range",
3769 .insns = {
3770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3771 offsetof(struct __sk_buff, data)),
3772 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3773 offsetof(struct __sk_buff, data_end)),
3774 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3776 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3777 BPF_MOV64_IMM(BPF_REG_0, 0),
3778 BPF_EXIT_INSN(),
3779 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3781 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003782 BPF_MOV64_IMM(BPF_REG_0, 0),
3783 BPF_EXIT_INSN(),
3784 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003785 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003786 .result = REJECT,
3787 .errstr = "invalid access to packet",
3788 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3789 },
3790 {
3791 "helper access to packet: test10, cls packet_ptr with too short range",
3792 .insns = {
3793 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3794 offsetof(struct __sk_buff, data)),
3795 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3796 offsetof(struct __sk_buff, data_end)),
3797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3798 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3800 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3801 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003802 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3803 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003804 BPF_MOV64_IMM(BPF_REG_0, 0),
3805 BPF_EXIT_INSN(),
3806 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003807 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003808 .result = REJECT,
3809 .errstr = "invalid access to packet",
3810 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3811 },
3812 {
3813 "helper access to packet: test11, cls unsuitable helper 1",
3814 .insns = {
3815 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3816 offsetof(struct __sk_buff, data)),
3817 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3818 offsetof(struct __sk_buff, data_end)),
3819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3820 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3822 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3823 BPF_MOV64_IMM(BPF_REG_2, 0),
3824 BPF_MOV64_IMM(BPF_REG_4, 42),
3825 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003826 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3827 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003828 BPF_MOV64_IMM(BPF_REG_0, 0),
3829 BPF_EXIT_INSN(),
3830 },
3831 .result = REJECT,
3832 .errstr = "helper access to the packet",
3833 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3834 },
3835 {
3836 "helper access to packet: test12, cls unsuitable helper 2",
3837 .insns = {
3838 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3839 offsetof(struct __sk_buff, data)),
3840 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3841 offsetof(struct __sk_buff, data_end)),
3842 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3844 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3845 BPF_MOV64_IMM(BPF_REG_2, 0),
3846 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003847 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3848 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003849 BPF_MOV64_IMM(BPF_REG_0, 0),
3850 BPF_EXIT_INSN(),
3851 },
3852 .result = REJECT,
3853 .errstr = "helper access to the packet",
3854 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3855 },
3856 {
3857 "helper access to packet: test13, cls helper ok",
3858 .insns = {
3859 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3860 offsetof(struct __sk_buff, data)),
3861 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3862 offsetof(struct __sk_buff, data_end)),
3863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3864 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3865 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3866 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3868 BPF_MOV64_IMM(BPF_REG_2, 4),
3869 BPF_MOV64_IMM(BPF_REG_3, 0),
3870 BPF_MOV64_IMM(BPF_REG_4, 0),
3871 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003872 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3873 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003874 BPF_MOV64_IMM(BPF_REG_0, 0),
3875 BPF_EXIT_INSN(),
3876 },
3877 .result = ACCEPT,
3878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3879 },
3880 {
Edward Creef65b1842017-08-07 15:27:12 +01003881 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003882 .insns = {
3883 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3884 offsetof(struct __sk_buff, data)),
3885 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3886 offsetof(struct __sk_buff, data_end)),
3887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3888 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3890 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3891 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3892 BPF_MOV64_IMM(BPF_REG_2, 4),
3893 BPF_MOV64_IMM(BPF_REG_3, 0),
3894 BPF_MOV64_IMM(BPF_REG_4, 0),
3895 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3897 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003898 BPF_MOV64_IMM(BPF_REG_0, 0),
3899 BPF_EXIT_INSN(),
3900 },
Edward Creef65b1842017-08-07 15:27:12 +01003901 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003902 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3903 },
3904 {
Edward Creef65b1842017-08-07 15:27:12 +01003905 "helper access to packet: test15, cls helper fail sub",
3906 .insns = {
3907 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3908 offsetof(struct __sk_buff, data)),
3909 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3910 offsetof(struct __sk_buff, data_end)),
3911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3914 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3915 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3916 BPF_MOV64_IMM(BPF_REG_2, 4),
3917 BPF_MOV64_IMM(BPF_REG_3, 0),
3918 BPF_MOV64_IMM(BPF_REG_4, 0),
3919 BPF_MOV64_IMM(BPF_REG_5, 0),
3920 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3921 BPF_FUNC_csum_diff),
3922 BPF_MOV64_IMM(BPF_REG_0, 0),
3923 BPF_EXIT_INSN(),
3924 },
3925 .result = REJECT,
3926 .errstr = "invalid access to packet",
3927 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3928 },
3929 {
3930 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003931 .insns = {
3932 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3933 offsetof(struct __sk_buff, data)),
3934 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3935 offsetof(struct __sk_buff, data_end)),
3936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3938 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3939 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3941 BPF_MOV64_IMM(BPF_REG_2, 8),
3942 BPF_MOV64_IMM(BPF_REG_3, 0),
3943 BPF_MOV64_IMM(BPF_REG_4, 0),
3944 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3946 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003947 BPF_MOV64_IMM(BPF_REG_0, 0),
3948 BPF_EXIT_INSN(),
3949 },
3950 .result = REJECT,
3951 .errstr = "invalid access to packet",
3952 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3953 },
3954 {
Edward Creef65b1842017-08-07 15:27:12 +01003955 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003956 .insns = {
3957 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3958 offsetof(struct __sk_buff, data)),
3959 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3960 offsetof(struct __sk_buff, data_end)),
3961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3964 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3965 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3966 BPF_MOV64_IMM(BPF_REG_2, -9),
3967 BPF_MOV64_IMM(BPF_REG_3, 0),
3968 BPF_MOV64_IMM(BPF_REG_4, 0),
3969 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003970 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3971 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003972 BPF_MOV64_IMM(BPF_REG_0, 0),
3973 BPF_EXIT_INSN(),
3974 },
3975 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003976 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003977 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3978 },
3979 {
Edward Creef65b1842017-08-07 15:27:12 +01003980 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003981 .insns = {
3982 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3983 offsetof(struct __sk_buff, data)),
3984 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3985 offsetof(struct __sk_buff, data_end)),
3986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3987 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3989 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3990 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3991 BPF_MOV64_IMM(BPF_REG_2, ~0),
3992 BPF_MOV64_IMM(BPF_REG_3, 0),
3993 BPF_MOV64_IMM(BPF_REG_4, 0),
3994 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003995 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3996 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003997 BPF_MOV64_IMM(BPF_REG_0, 0),
3998 BPF_EXIT_INSN(),
3999 },
4000 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004001 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004002 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4003 },
4004 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08004005 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004006 .insns = {
4007 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4008 offsetof(struct __sk_buff, data)),
4009 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4010 offsetof(struct __sk_buff, data_end)),
4011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4012 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4014 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4016 BPF_MOV64_IMM(BPF_REG_2, 0),
4017 BPF_MOV64_IMM(BPF_REG_3, 0),
4018 BPF_MOV64_IMM(BPF_REG_4, 0),
4019 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4021 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004022 BPF_MOV64_IMM(BPF_REG_0, 0),
4023 BPF_EXIT_INSN(),
4024 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08004025 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004026 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4027 },
4028 {
Edward Creef65b1842017-08-07 15:27:12 +01004029 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004030 .insns = {
4031 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4032 offsetof(struct __sk_buff, data)),
4033 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4034 offsetof(struct __sk_buff, data_end)),
4035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4036 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4038 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4039 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4040 BPF_MOV64_IMM(BPF_REG_2, 4),
4041 BPF_MOV64_IMM(BPF_REG_3, 0),
4042 BPF_MOV64_IMM(BPF_REG_4, 0),
4043 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004044 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4045 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004046 BPF_MOV64_IMM(BPF_REG_0, 0),
4047 BPF_EXIT_INSN(),
4048 },
4049 .result = REJECT,
4050 .errstr = "R1 type=pkt_end expected=fp",
4051 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4052 },
4053 {
Edward Creef65b1842017-08-07 15:27:12 +01004054 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004055 .insns = {
4056 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4057 offsetof(struct __sk_buff, data)),
4058 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4059 offsetof(struct __sk_buff, data_end)),
4060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4061 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4063 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4064 BPF_MOV64_IMM(BPF_REG_2, 4),
4065 BPF_MOV64_IMM(BPF_REG_3, 0),
4066 BPF_MOV64_IMM(BPF_REG_4, 0),
4067 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004068 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4069 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02004070 BPF_MOV64_IMM(BPF_REG_0, 0),
4071 BPF_EXIT_INSN(),
4072 },
4073 .result = REJECT,
4074 .errstr = "invalid access to packet",
4075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
4076 },
Josef Bacik48461132016-09-28 10:54:32 -04004077 {
4078 "valid map access into an array with a constant",
4079 .insns = {
4080 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4081 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4083 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004084 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4085 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004086 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004087 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4088 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004089 BPF_EXIT_INSN(),
4090 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004091 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004092 .errstr_unpriv = "R0 leaks addr",
4093 .result_unpriv = REJECT,
4094 .result = ACCEPT,
4095 },
4096 {
4097 "valid map access into an array with a register",
4098 .insns = {
4099 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4100 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4101 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4102 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4104 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4106 BPF_MOV64_IMM(BPF_REG_1, 4),
4107 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4108 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004109 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4110 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004111 BPF_EXIT_INSN(),
4112 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004113 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004114 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004115 .result_unpriv = REJECT,
4116 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004118 },
4119 {
4120 "valid map access into an array with a variable",
4121 .insns = {
4122 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4123 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4125 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004126 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4127 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004128 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4129 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4130 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4131 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4132 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004133 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4134 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004135 BPF_EXIT_INSN(),
4136 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004137 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004138 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004139 .result_unpriv = REJECT,
4140 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004141 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004142 },
4143 {
4144 "valid map access into an array with a signed variable",
4145 .insns = {
4146 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4149 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4151 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4153 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4154 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4155 BPF_MOV32_IMM(BPF_REG_1, 0),
4156 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4157 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4158 BPF_MOV32_IMM(BPF_REG_1, 0),
4159 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4160 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004161 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4162 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004163 BPF_EXIT_INSN(),
4164 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004165 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004166 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004167 .result_unpriv = REJECT,
4168 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004169 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004170 },
4171 {
4172 "invalid map access into an array with a constant",
4173 .insns = {
4174 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4177 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4179 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4181 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4182 offsetof(struct test_val, foo)),
4183 BPF_EXIT_INSN(),
4184 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004185 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004186 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
4187 .result = REJECT,
4188 },
4189 {
4190 "invalid map access into an array with a register",
4191 .insns = {
4192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4195 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4197 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004198 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4199 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4200 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4201 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004202 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4203 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004204 BPF_EXIT_INSN(),
4205 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004206 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04004207 .errstr = "R0 min value is outside of the array range",
4208 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004209 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004210 },
4211 {
4212 "invalid map access into an array with a variable",
4213 .insns = {
4214 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4217 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4219 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4221 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4222 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4223 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004224 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4225 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004226 BPF_EXIT_INSN(),
4227 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004228 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004229 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04004230 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004231 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004232 },
4233 {
4234 "invalid map access into an array with no floor check",
4235 .insns = {
4236 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4239 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4241 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01004243 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04004244 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4245 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4246 BPF_MOV32_IMM(BPF_REG_1, 0),
4247 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4248 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004249 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4250 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004251 BPF_EXIT_INSN(),
4252 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004253 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004254 .errstr_unpriv = "R0 leaks addr",
4255 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004256 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004257 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004258 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004259 },
4260 {
4261 "invalid map access into an array with a invalid max check",
4262 .insns = {
4263 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4264 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4266 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004267 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4268 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4270 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4271 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4272 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4273 BPF_MOV32_IMM(BPF_REG_1, 0),
4274 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4275 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004276 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4277 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004278 BPF_EXIT_INSN(),
4279 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004280 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004281 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04004282 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004283 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04004284 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004285 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004286 },
4287 {
4288 "invalid map access into an array with a invalid max check",
4289 .insns = {
4290 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4291 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4293 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4295 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004296 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4297 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4298 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4299 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4301 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004302 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4303 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04004304 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4305 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004306 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4307 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04004308 BPF_EXIT_INSN(),
4309 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02004310 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004311 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04004312 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004313 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04004314 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02004315 {
4316 "multiple registers share map_lookup_elem result",
4317 .insns = {
4318 BPF_MOV64_IMM(BPF_REG_1, 10),
4319 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4320 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4321 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4322 BPF_LD_MAP_FD(BPF_REG_1, 0),
4323 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4324 BPF_FUNC_map_lookup_elem),
4325 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4326 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4327 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4328 BPF_EXIT_INSN(),
4329 },
4330 .fixup_map1 = { 4 },
4331 .result = ACCEPT,
4332 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4333 },
4334 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004335 "alu ops on ptr_to_map_value_or_null, 1",
4336 .insns = {
4337 BPF_MOV64_IMM(BPF_REG_1, 10),
4338 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4339 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4341 BPF_LD_MAP_FD(BPF_REG_1, 0),
4342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4343 BPF_FUNC_map_lookup_elem),
4344 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
4346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
4347 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4348 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4349 BPF_EXIT_INSN(),
4350 },
4351 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004352 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004353 .result = REJECT,
4354 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4355 },
4356 {
4357 "alu ops on ptr_to_map_value_or_null, 2",
4358 .insns = {
4359 BPF_MOV64_IMM(BPF_REG_1, 10),
4360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4363 BPF_LD_MAP_FD(BPF_REG_1, 0),
4364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4365 BPF_FUNC_map_lookup_elem),
4366 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4367 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
4368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4369 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4370 BPF_EXIT_INSN(),
4371 },
4372 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004373 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004374 .result = REJECT,
4375 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4376 },
4377 {
4378 "alu ops on ptr_to_map_value_or_null, 3",
4379 .insns = {
4380 BPF_MOV64_IMM(BPF_REG_1, 10),
4381 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4384 BPF_LD_MAP_FD(BPF_REG_1, 0),
4385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4386 BPF_FUNC_map_lookup_elem),
4387 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4388 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4389 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4390 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4391 BPF_EXIT_INSN(),
4392 },
4393 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004394 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004395 .result = REJECT,
4396 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4397 },
4398 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004399 "invalid memory access with multiple map_lookup_elem calls",
4400 .insns = {
4401 BPF_MOV64_IMM(BPF_REG_1, 10),
4402 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4403 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4405 BPF_LD_MAP_FD(BPF_REG_1, 0),
4406 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4407 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4409 BPF_FUNC_map_lookup_elem),
4410 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4412 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4414 BPF_FUNC_map_lookup_elem),
4415 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4416 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4417 BPF_EXIT_INSN(),
4418 },
4419 .fixup_map1 = { 4 },
4420 .result = REJECT,
4421 .errstr = "R4 !read_ok",
4422 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4423 },
4424 {
4425 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4426 .insns = {
4427 BPF_MOV64_IMM(BPF_REG_1, 10),
4428 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4429 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4431 BPF_LD_MAP_FD(BPF_REG_1, 0),
4432 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4433 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4435 BPF_FUNC_map_lookup_elem),
4436 BPF_MOV64_IMM(BPF_REG_2, 10),
4437 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4441 BPF_FUNC_map_lookup_elem),
4442 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4443 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4444 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4445 BPF_EXIT_INSN(),
4446 },
4447 .fixup_map1 = { 4 },
4448 .result = ACCEPT,
4449 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4450 },
Josef Bacike9548902016-11-29 12:35:19 -05004451 {
4452 "invalid map access from else condition",
4453 .insns = {
4454 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4457 BPF_LD_MAP_FD(BPF_REG_1, 0),
4458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4460 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4461 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4463 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4464 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4465 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4466 BPF_EXIT_INSN(),
4467 },
4468 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004469 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004470 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004471 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004472 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004473 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004474 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004475 {
4476 "constant register |= constant should keep constant type",
4477 .insns = {
4478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4480 BPF_MOV64_IMM(BPF_REG_2, 34),
4481 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4482 BPF_MOV64_IMM(BPF_REG_3, 0),
4483 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4484 BPF_EXIT_INSN(),
4485 },
4486 .result = ACCEPT,
4487 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4488 },
4489 {
4490 "constant register |= constant should not bypass stack boundary checks",
4491 .insns = {
4492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4494 BPF_MOV64_IMM(BPF_REG_2, 34),
4495 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4496 BPF_MOV64_IMM(BPF_REG_3, 0),
4497 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4498 BPF_EXIT_INSN(),
4499 },
4500 .errstr = "invalid stack type R1 off=-48 access_size=58",
4501 .result = REJECT,
4502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4503 },
4504 {
4505 "constant register |= constant register should keep constant type",
4506 .insns = {
4507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4509 BPF_MOV64_IMM(BPF_REG_2, 34),
4510 BPF_MOV64_IMM(BPF_REG_4, 13),
4511 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4512 BPF_MOV64_IMM(BPF_REG_3, 0),
4513 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4514 BPF_EXIT_INSN(),
4515 },
4516 .result = ACCEPT,
4517 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4518 },
4519 {
4520 "constant register |= constant register should not bypass stack boundary checks",
4521 .insns = {
4522 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4523 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4524 BPF_MOV64_IMM(BPF_REG_2, 34),
4525 BPF_MOV64_IMM(BPF_REG_4, 24),
4526 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4527 BPF_MOV64_IMM(BPF_REG_3, 0),
4528 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4529 BPF_EXIT_INSN(),
4530 },
4531 .errstr = "invalid stack type R1 off=-48 access_size=58",
4532 .result = REJECT,
4533 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4534 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004535 {
4536 "invalid direct packet write for LWT_IN",
4537 .insns = {
4538 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4539 offsetof(struct __sk_buff, data)),
4540 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4541 offsetof(struct __sk_buff, data_end)),
4542 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4544 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4545 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4546 BPF_MOV64_IMM(BPF_REG_0, 0),
4547 BPF_EXIT_INSN(),
4548 },
4549 .errstr = "cannot write into packet",
4550 .result = REJECT,
4551 .prog_type = BPF_PROG_TYPE_LWT_IN,
4552 },
4553 {
4554 "invalid direct packet write for LWT_OUT",
4555 .insns = {
4556 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4557 offsetof(struct __sk_buff, data)),
4558 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4559 offsetof(struct __sk_buff, data_end)),
4560 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4562 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4563 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4564 BPF_MOV64_IMM(BPF_REG_0, 0),
4565 BPF_EXIT_INSN(),
4566 },
4567 .errstr = "cannot write into packet",
4568 .result = REJECT,
4569 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4570 },
4571 {
4572 "direct packet write for LWT_XMIT",
4573 .insns = {
4574 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4575 offsetof(struct __sk_buff, data)),
4576 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4577 offsetof(struct __sk_buff, data_end)),
4578 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4580 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4581 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4582 BPF_MOV64_IMM(BPF_REG_0, 0),
4583 BPF_EXIT_INSN(),
4584 },
4585 .result = ACCEPT,
4586 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4587 },
4588 {
4589 "direct packet read for LWT_IN",
4590 .insns = {
4591 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4592 offsetof(struct __sk_buff, data)),
4593 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4594 offsetof(struct __sk_buff, data_end)),
4595 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4597 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4598 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4599 BPF_MOV64_IMM(BPF_REG_0, 0),
4600 BPF_EXIT_INSN(),
4601 },
4602 .result = ACCEPT,
4603 .prog_type = BPF_PROG_TYPE_LWT_IN,
4604 },
4605 {
4606 "direct packet read for LWT_OUT",
4607 .insns = {
4608 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4609 offsetof(struct __sk_buff, data)),
4610 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4611 offsetof(struct __sk_buff, data_end)),
4612 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4614 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4615 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4616 BPF_MOV64_IMM(BPF_REG_0, 0),
4617 BPF_EXIT_INSN(),
4618 },
4619 .result = ACCEPT,
4620 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4621 },
4622 {
4623 "direct packet read for LWT_XMIT",
4624 .insns = {
4625 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4626 offsetof(struct __sk_buff, data)),
4627 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4628 offsetof(struct __sk_buff, data_end)),
4629 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4631 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4632 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4633 BPF_MOV64_IMM(BPF_REG_0, 0),
4634 BPF_EXIT_INSN(),
4635 },
4636 .result = ACCEPT,
4637 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4638 },
4639 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004640 "overlapping checks for direct packet access",
4641 .insns = {
4642 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4643 offsetof(struct __sk_buff, data)),
4644 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4645 offsetof(struct __sk_buff, data_end)),
4646 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4648 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4651 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4652 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4653 BPF_MOV64_IMM(BPF_REG_0, 0),
4654 BPF_EXIT_INSN(),
4655 },
4656 .result = ACCEPT,
4657 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4658 },
4659 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004660 "invalid access of tc_classid for LWT_IN",
4661 .insns = {
4662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4663 offsetof(struct __sk_buff, tc_classid)),
4664 BPF_EXIT_INSN(),
4665 },
4666 .result = REJECT,
4667 .errstr = "invalid bpf_context access",
4668 },
4669 {
4670 "invalid access of tc_classid for LWT_OUT",
4671 .insns = {
4672 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4673 offsetof(struct __sk_buff, tc_classid)),
4674 BPF_EXIT_INSN(),
4675 },
4676 .result = REJECT,
4677 .errstr = "invalid bpf_context access",
4678 },
4679 {
4680 "invalid access of tc_classid for LWT_XMIT",
4681 .insns = {
4682 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4683 offsetof(struct __sk_buff, tc_classid)),
4684 BPF_EXIT_INSN(),
4685 },
4686 .result = REJECT,
4687 .errstr = "invalid bpf_context access",
4688 },
Gianluca Borello57225692017-01-09 10:19:47 -08004689 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004690 "leak pointer into ctx 1",
4691 .insns = {
4692 BPF_MOV64_IMM(BPF_REG_0, 0),
4693 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4694 offsetof(struct __sk_buff, cb[0])),
4695 BPF_LD_MAP_FD(BPF_REG_2, 0),
4696 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4697 offsetof(struct __sk_buff, cb[0])),
4698 BPF_EXIT_INSN(),
4699 },
4700 .fixup_map1 = { 2 },
4701 .errstr_unpriv = "R2 leaks addr into mem",
4702 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004703 .result = REJECT,
4704 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004705 },
4706 {
4707 "leak pointer into ctx 2",
4708 .insns = {
4709 BPF_MOV64_IMM(BPF_REG_0, 0),
4710 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4711 offsetof(struct __sk_buff, cb[0])),
4712 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4713 offsetof(struct __sk_buff, cb[0])),
4714 BPF_EXIT_INSN(),
4715 },
4716 .errstr_unpriv = "R10 leaks addr into mem",
4717 .result_unpriv = REJECT,
Daniel Borkmannf37a8cb2018-01-16 23:30:10 +01004718 .result = REJECT,
4719 .errstr = "BPF_XADD stores into R1 context is not allowed",
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004720 },
4721 {
4722 "leak pointer into ctx 3",
4723 .insns = {
4724 BPF_MOV64_IMM(BPF_REG_0, 0),
4725 BPF_LD_MAP_FD(BPF_REG_2, 0),
4726 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4727 offsetof(struct __sk_buff, cb[0])),
4728 BPF_EXIT_INSN(),
4729 },
4730 .fixup_map1 = { 1 },
4731 .errstr_unpriv = "R2 leaks addr into ctx",
4732 .result_unpriv = REJECT,
4733 .result = ACCEPT,
4734 },
4735 {
4736 "leak pointer into map val",
4737 .insns = {
4738 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4742 BPF_LD_MAP_FD(BPF_REG_1, 0),
4743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4744 BPF_FUNC_map_lookup_elem),
4745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4746 BPF_MOV64_IMM(BPF_REG_3, 0),
4747 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4748 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4749 BPF_MOV64_IMM(BPF_REG_0, 0),
4750 BPF_EXIT_INSN(),
4751 },
4752 .fixup_map1 = { 4 },
4753 .errstr_unpriv = "R6 leaks addr into mem",
4754 .result_unpriv = REJECT,
4755 .result = ACCEPT,
4756 },
4757 {
Gianluca Borello57225692017-01-09 10:19:47 -08004758 "helper access to map: full range",
4759 .insns = {
4760 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4762 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4763 BPF_LD_MAP_FD(BPF_REG_1, 0),
4764 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4765 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4766 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4767 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4768 BPF_MOV64_IMM(BPF_REG_3, 0),
4769 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4770 BPF_EXIT_INSN(),
4771 },
4772 .fixup_map2 = { 3 },
4773 .result = ACCEPT,
4774 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4775 },
4776 {
4777 "helper access to map: partial range",
4778 .insns = {
4779 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4781 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4782 BPF_LD_MAP_FD(BPF_REG_1, 0),
4783 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4785 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4786 BPF_MOV64_IMM(BPF_REG_2, 8),
4787 BPF_MOV64_IMM(BPF_REG_3, 0),
4788 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4789 BPF_EXIT_INSN(),
4790 },
4791 .fixup_map2 = { 3 },
4792 .result = ACCEPT,
4793 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4794 },
4795 {
4796 "helper access to map: empty range",
4797 .insns = {
4798 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4800 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4801 BPF_LD_MAP_FD(BPF_REG_1, 0),
4802 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4805 BPF_MOV64_IMM(BPF_REG_2, 0),
4806 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004807 BPF_EXIT_INSN(),
4808 },
4809 .fixup_map2 = { 3 },
4810 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4811 .result = REJECT,
4812 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4813 },
4814 {
4815 "helper access to map: out-of-bound range",
4816 .insns = {
4817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4819 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4820 BPF_LD_MAP_FD(BPF_REG_1, 0),
4821 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4824 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4825 BPF_MOV64_IMM(BPF_REG_3, 0),
4826 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4827 BPF_EXIT_INSN(),
4828 },
4829 .fixup_map2 = { 3 },
4830 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4831 .result = REJECT,
4832 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4833 },
4834 {
4835 "helper access to map: negative range",
4836 .insns = {
4837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4840 BPF_LD_MAP_FD(BPF_REG_1, 0),
4841 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4844 BPF_MOV64_IMM(BPF_REG_2, -8),
4845 BPF_MOV64_IMM(BPF_REG_3, 0),
4846 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4847 BPF_EXIT_INSN(),
4848 },
4849 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004850 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004851 .result = REJECT,
4852 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4853 },
4854 {
4855 "helper access to adjusted map (via const imm): full range",
4856 .insns = {
4857 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4858 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4859 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4860 BPF_LD_MAP_FD(BPF_REG_1, 0),
4861 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4862 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4865 offsetof(struct test_val, foo)),
4866 BPF_MOV64_IMM(BPF_REG_2,
4867 sizeof(struct test_val) -
4868 offsetof(struct test_val, foo)),
4869 BPF_MOV64_IMM(BPF_REG_3, 0),
4870 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4871 BPF_EXIT_INSN(),
4872 },
4873 .fixup_map2 = { 3 },
4874 .result = ACCEPT,
4875 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4876 },
4877 {
4878 "helper access to adjusted map (via const imm): partial range",
4879 .insns = {
4880 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4881 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4882 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4883 BPF_LD_MAP_FD(BPF_REG_1, 0),
4884 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4888 offsetof(struct test_val, foo)),
4889 BPF_MOV64_IMM(BPF_REG_2, 8),
4890 BPF_MOV64_IMM(BPF_REG_3, 0),
4891 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4892 BPF_EXIT_INSN(),
4893 },
4894 .fixup_map2 = { 3 },
4895 .result = ACCEPT,
4896 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4897 },
4898 {
4899 "helper access to adjusted map (via const imm): empty range",
4900 .insns = {
4901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4903 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4904 BPF_LD_MAP_FD(BPF_REG_1, 0),
4905 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4909 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004910 BPF_MOV64_IMM(BPF_REG_2, 0),
4911 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004912 BPF_EXIT_INSN(),
4913 },
4914 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004915 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004916 .result = REJECT,
4917 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4918 },
4919 {
4920 "helper access to adjusted map (via const imm): out-of-bound range",
4921 .insns = {
4922 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4924 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4925 BPF_LD_MAP_FD(BPF_REG_1, 0),
4926 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4927 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4928 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4930 offsetof(struct test_val, foo)),
4931 BPF_MOV64_IMM(BPF_REG_2,
4932 sizeof(struct test_val) -
4933 offsetof(struct test_val, foo) + 8),
4934 BPF_MOV64_IMM(BPF_REG_3, 0),
4935 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4936 BPF_EXIT_INSN(),
4937 },
4938 .fixup_map2 = { 3 },
4939 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4940 .result = REJECT,
4941 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4942 },
4943 {
4944 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4945 .insns = {
4946 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4948 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4949 BPF_LD_MAP_FD(BPF_REG_1, 0),
4950 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4951 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4952 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4954 offsetof(struct test_val, foo)),
4955 BPF_MOV64_IMM(BPF_REG_2, -8),
4956 BPF_MOV64_IMM(BPF_REG_3, 0),
4957 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4958 BPF_EXIT_INSN(),
4959 },
4960 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004961 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004962 .result = REJECT,
4963 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4964 },
4965 {
4966 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4967 .insns = {
4968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4973 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4976 offsetof(struct test_val, foo)),
4977 BPF_MOV64_IMM(BPF_REG_2, -1),
4978 BPF_MOV64_IMM(BPF_REG_3, 0),
4979 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4980 BPF_EXIT_INSN(),
4981 },
4982 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004983 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004984 .result = REJECT,
4985 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4986 },
4987 {
4988 "helper access to adjusted map (via const reg): full range",
4989 .insns = {
4990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4992 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4993 BPF_LD_MAP_FD(BPF_REG_1, 0),
4994 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4996 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4997 BPF_MOV64_IMM(BPF_REG_3,
4998 offsetof(struct test_val, foo)),
4999 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5000 BPF_MOV64_IMM(BPF_REG_2,
5001 sizeof(struct test_val) -
5002 offsetof(struct test_val, foo)),
5003 BPF_MOV64_IMM(BPF_REG_3, 0),
5004 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5005 BPF_EXIT_INSN(),
5006 },
5007 .fixup_map2 = { 3 },
5008 .result = ACCEPT,
5009 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5010 },
5011 {
5012 "helper access to adjusted map (via const reg): partial range",
5013 .insns = {
5014 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5015 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5016 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5017 BPF_LD_MAP_FD(BPF_REG_1, 0),
5018 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5019 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5020 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5021 BPF_MOV64_IMM(BPF_REG_3,
5022 offsetof(struct test_val, foo)),
5023 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5024 BPF_MOV64_IMM(BPF_REG_2, 8),
5025 BPF_MOV64_IMM(BPF_REG_3, 0),
5026 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5027 BPF_EXIT_INSN(),
5028 },
5029 .fixup_map2 = { 3 },
5030 .result = ACCEPT,
5031 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5032 },
5033 {
5034 "helper access to adjusted map (via const reg): empty range",
5035 .insns = {
5036 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5037 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5038 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5039 BPF_LD_MAP_FD(BPF_REG_1, 0),
5040 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005041 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08005042 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5043 BPF_MOV64_IMM(BPF_REG_3, 0),
5044 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005045 BPF_MOV64_IMM(BPF_REG_2, 0),
5046 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005047 BPF_EXIT_INSN(),
5048 },
5049 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005050 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005051 .result = REJECT,
5052 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5053 },
5054 {
5055 "helper access to adjusted map (via const reg): out-of-bound range",
5056 .insns = {
5057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5059 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5060 BPF_LD_MAP_FD(BPF_REG_1, 0),
5061 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5063 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5064 BPF_MOV64_IMM(BPF_REG_3,
5065 offsetof(struct test_val, foo)),
5066 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5067 BPF_MOV64_IMM(BPF_REG_2,
5068 sizeof(struct test_val) -
5069 offsetof(struct test_val, foo) + 8),
5070 BPF_MOV64_IMM(BPF_REG_3, 0),
5071 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5072 BPF_EXIT_INSN(),
5073 },
5074 .fixup_map2 = { 3 },
5075 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
5076 .result = REJECT,
5077 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5078 },
5079 {
5080 "helper access to adjusted map (via const reg): negative range (> adjustment)",
5081 .insns = {
5082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5084 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5085 BPF_LD_MAP_FD(BPF_REG_1, 0),
5086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5089 BPF_MOV64_IMM(BPF_REG_3,
5090 offsetof(struct test_val, foo)),
5091 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5092 BPF_MOV64_IMM(BPF_REG_2, -8),
5093 BPF_MOV64_IMM(BPF_REG_3, 0),
5094 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5095 BPF_EXIT_INSN(),
5096 },
5097 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005098 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005099 .result = REJECT,
5100 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5101 },
5102 {
5103 "helper access to adjusted map (via const reg): negative range (< adjustment)",
5104 .insns = {
5105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5107 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5108 BPF_LD_MAP_FD(BPF_REG_1, 0),
5109 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5111 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5112 BPF_MOV64_IMM(BPF_REG_3,
5113 offsetof(struct test_val, foo)),
5114 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5115 BPF_MOV64_IMM(BPF_REG_2, -1),
5116 BPF_MOV64_IMM(BPF_REG_3, 0),
5117 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5118 BPF_EXIT_INSN(),
5119 },
5120 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005121 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08005122 .result = REJECT,
5123 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5124 },
5125 {
5126 "helper access to adjusted map (via variable): full range",
5127 .insns = {
5128 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5130 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5131 BPF_LD_MAP_FD(BPF_REG_1, 0),
5132 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5133 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5134 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5136 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5137 offsetof(struct test_val, foo), 4),
5138 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5139 BPF_MOV64_IMM(BPF_REG_2,
5140 sizeof(struct test_val) -
5141 offsetof(struct test_val, foo)),
5142 BPF_MOV64_IMM(BPF_REG_3, 0),
5143 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5144 BPF_EXIT_INSN(),
5145 },
5146 .fixup_map2 = { 3 },
5147 .result = ACCEPT,
5148 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5149 },
5150 {
5151 "helper access to adjusted map (via variable): partial range",
5152 .insns = {
5153 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5155 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5156 BPF_LD_MAP_FD(BPF_REG_1, 0),
5157 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5158 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5159 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5160 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5161 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5162 offsetof(struct test_val, foo), 4),
5163 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5164 BPF_MOV64_IMM(BPF_REG_2, 8),
5165 BPF_MOV64_IMM(BPF_REG_3, 0),
5166 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5167 BPF_EXIT_INSN(),
5168 },
5169 .fixup_map2 = { 3 },
5170 .result = ACCEPT,
5171 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5172 },
5173 {
5174 "helper access to adjusted map (via variable): empty range",
5175 .insns = {
5176 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5178 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5179 BPF_LD_MAP_FD(BPF_REG_1, 0),
5180 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005181 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08005182 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5183 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5184 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005185 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08005186 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005187 BPF_MOV64_IMM(BPF_REG_2, 0),
5188 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08005189 BPF_EXIT_INSN(),
5190 },
5191 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08005192 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08005193 .result = REJECT,
5194 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5195 },
5196 {
5197 "helper access to adjusted map (via variable): no max check",
5198 .insns = {
5199 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5201 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5202 BPF_LD_MAP_FD(BPF_REG_1, 0),
5203 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5205 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5206 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5207 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01005208 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08005209 BPF_MOV64_IMM(BPF_REG_3, 0),
5210 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5211 BPF_EXIT_INSN(),
5212 },
5213 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005214 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08005215 .result = REJECT,
5216 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5217 },
5218 {
5219 "helper access to adjusted map (via variable): wrong max check",
5220 .insns = {
5221 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5223 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5224 BPF_LD_MAP_FD(BPF_REG_1, 0),
5225 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5226 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5228 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5229 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5230 offsetof(struct test_val, foo), 4),
5231 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5232 BPF_MOV64_IMM(BPF_REG_2,
5233 sizeof(struct test_val) -
5234 offsetof(struct test_val, foo) + 1),
5235 BPF_MOV64_IMM(BPF_REG_3, 0),
5236 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5237 BPF_EXIT_INSN(),
5238 },
5239 .fixup_map2 = { 3 },
5240 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
5241 .result = REJECT,
5242 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5243 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08005244 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02005245 "helper access to map: bounds check using <, good access",
5246 .insns = {
5247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5249 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5250 BPF_LD_MAP_FD(BPF_REG_1, 0),
5251 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5254 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5255 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
5256 BPF_MOV64_IMM(BPF_REG_0, 0),
5257 BPF_EXIT_INSN(),
5258 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5259 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5260 BPF_MOV64_IMM(BPF_REG_0, 0),
5261 BPF_EXIT_INSN(),
5262 },
5263 .fixup_map2 = { 3 },
5264 .result = ACCEPT,
5265 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5266 },
5267 {
5268 "helper access to map: bounds check using <, bad access",
5269 .insns = {
5270 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5272 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5273 BPF_LD_MAP_FD(BPF_REG_1, 0),
5274 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5276 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5277 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5278 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
5279 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5280 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5281 BPF_MOV64_IMM(BPF_REG_0, 0),
5282 BPF_EXIT_INSN(),
5283 BPF_MOV64_IMM(BPF_REG_0, 0),
5284 BPF_EXIT_INSN(),
5285 },
5286 .fixup_map2 = { 3 },
5287 .result = REJECT,
5288 .errstr = "R1 unbounded memory access",
5289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5290 },
5291 {
5292 "helper access to map: bounds check using <=, good access",
5293 .insns = {
5294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5296 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5297 BPF_LD_MAP_FD(BPF_REG_1, 0),
5298 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5300 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5301 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5302 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
5303 BPF_MOV64_IMM(BPF_REG_0, 0),
5304 BPF_EXIT_INSN(),
5305 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5306 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5307 BPF_MOV64_IMM(BPF_REG_0, 0),
5308 BPF_EXIT_INSN(),
5309 },
5310 .fixup_map2 = { 3 },
5311 .result = ACCEPT,
5312 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5313 },
5314 {
5315 "helper access to map: bounds check using <=, bad access",
5316 .insns = {
5317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5319 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5320 BPF_LD_MAP_FD(BPF_REG_1, 0),
5321 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5323 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5324 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5325 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
5326 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5327 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5328 BPF_MOV64_IMM(BPF_REG_0, 0),
5329 BPF_EXIT_INSN(),
5330 BPF_MOV64_IMM(BPF_REG_0, 0),
5331 BPF_EXIT_INSN(),
5332 },
5333 .fixup_map2 = { 3 },
5334 .result = REJECT,
5335 .errstr = "R1 unbounded memory access",
5336 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5337 },
5338 {
5339 "helper access to map: bounds check using s<, good access",
5340 .insns = {
5341 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5343 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5344 BPF_LD_MAP_FD(BPF_REG_1, 0),
5345 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5346 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5348 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5349 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5350 BPF_MOV64_IMM(BPF_REG_0, 0),
5351 BPF_EXIT_INSN(),
5352 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
5353 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5354 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5355 BPF_MOV64_IMM(BPF_REG_0, 0),
5356 BPF_EXIT_INSN(),
5357 },
5358 .fixup_map2 = { 3 },
5359 .result = ACCEPT,
5360 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5361 },
5362 {
5363 "helper access to map: bounds check using s<, good access 2",
5364 .insns = {
5365 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5367 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5368 BPF_LD_MAP_FD(BPF_REG_1, 0),
5369 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5370 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5373 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5374 BPF_MOV64_IMM(BPF_REG_0, 0),
5375 BPF_EXIT_INSN(),
5376 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5377 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5378 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5379 BPF_MOV64_IMM(BPF_REG_0, 0),
5380 BPF_EXIT_INSN(),
5381 },
5382 .fixup_map2 = { 3 },
5383 .result = ACCEPT,
5384 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5385 },
5386 {
5387 "helper access to map: bounds check using s<, bad access",
5388 .insns = {
5389 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5390 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5391 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5392 BPF_LD_MAP_FD(BPF_REG_1, 0),
5393 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5394 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5395 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5396 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5397 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5398 BPF_MOV64_IMM(BPF_REG_0, 0),
5399 BPF_EXIT_INSN(),
5400 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5401 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5402 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5403 BPF_MOV64_IMM(BPF_REG_0, 0),
5404 BPF_EXIT_INSN(),
5405 },
5406 .fixup_map2 = { 3 },
5407 .result = REJECT,
5408 .errstr = "R1 min value is negative",
5409 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5410 },
5411 {
5412 "helper access to map: bounds check using s<=, good access",
5413 .insns = {
5414 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5416 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5417 BPF_LD_MAP_FD(BPF_REG_1, 0),
5418 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5419 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5421 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5422 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5423 BPF_MOV64_IMM(BPF_REG_0, 0),
5424 BPF_EXIT_INSN(),
5425 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5426 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5427 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5428 BPF_MOV64_IMM(BPF_REG_0, 0),
5429 BPF_EXIT_INSN(),
5430 },
5431 .fixup_map2 = { 3 },
5432 .result = ACCEPT,
5433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5434 },
5435 {
5436 "helper access to map: bounds check using s<=, good access 2",
5437 .insns = {
5438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5440 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5441 BPF_LD_MAP_FD(BPF_REG_1, 0),
5442 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5443 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5445 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5446 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5447 BPF_MOV64_IMM(BPF_REG_0, 0),
5448 BPF_EXIT_INSN(),
5449 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5450 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5451 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5452 BPF_MOV64_IMM(BPF_REG_0, 0),
5453 BPF_EXIT_INSN(),
5454 },
5455 .fixup_map2 = { 3 },
5456 .result = ACCEPT,
5457 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5458 },
5459 {
5460 "helper access to map: bounds check using s<=, bad access",
5461 .insns = {
5462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5464 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5465 BPF_LD_MAP_FD(BPF_REG_1, 0),
5466 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5468 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5469 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5470 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5471 BPF_MOV64_IMM(BPF_REG_0, 0),
5472 BPF_EXIT_INSN(),
5473 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5474 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5475 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5476 BPF_MOV64_IMM(BPF_REG_0, 0),
5477 BPF_EXIT_INSN(),
5478 },
5479 .fixup_map2 = { 3 },
5480 .result = REJECT,
5481 .errstr = "R1 min value is negative",
5482 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5483 },
5484 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005485 "map element value is preserved across register spilling",
5486 .insns = {
5487 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5489 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5490 BPF_LD_MAP_FD(BPF_REG_1, 0),
5491 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5492 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5493 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5496 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5497 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5498 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5499 BPF_EXIT_INSN(),
5500 },
5501 .fixup_map2 = { 3 },
5502 .errstr_unpriv = "R0 leaks addr",
5503 .result = ACCEPT,
5504 .result_unpriv = REJECT,
5505 },
5506 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005507 "map element value or null is marked on register spilling",
5508 .insns = {
5509 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5511 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5512 BPF_LD_MAP_FD(BPF_REG_1, 0),
5513 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5514 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5516 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5517 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5518 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5519 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5520 BPF_EXIT_INSN(),
5521 },
5522 .fixup_map2 = { 3 },
5523 .errstr_unpriv = "R0 leaks addr",
5524 .result = ACCEPT,
5525 .result_unpriv = REJECT,
5526 },
5527 {
5528 "map element value store of cleared call register",
5529 .insns = {
5530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5532 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5533 BPF_LD_MAP_FD(BPF_REG_1, 0),
5534 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5536 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5537 BPF_EXIT_INSN(),
5538 },
5539 .fixup_map2 = { 3 },
5540 .errstr_unpriv = "R1 !read_ok",
5541 .errstr = "R1 !read_ok",
5542 .result = REJECT,
5543 .result_unpriv = REJECT,
5544 },
5545 {
5546 "map element value with unaligned store",
5547 .insns = {
5548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5550 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5551 BPF_LD_MAP_FD(BPF_REG_1, 0),
5552 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5555 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5556 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5557 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5558 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5559 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5560 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5561 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5563 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5564 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5565 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5566 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5568 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5569 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5570 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5571 BPF_EXIT_INSN(),
5572 },
5573 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005574 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005575 .result = ACCEPT,
5576 .result_unpriv = REJECT,
5577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5578 },
5579 {
5580 "map element value with unaligned load",
5581 .insns = {
5582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5584 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5585 BPF_LD_MAP_FD(BPF_REG_1, 0),
5586 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5588 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5589 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5591 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5592 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5593 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5594 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5595 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5597 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5598 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5599 BPF_EXIT_INSN(),
5600 },
5601 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005602 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005603 .result = ACCEPT,
5604 .result_unpriv = REJECT,
5605 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5606 },
5607 {
5608 "map element value illegal alu op, 1",
5609 .insns = {
5610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5612 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5613 BPF_LD_MAP_FD(BPF_REG_1, 0),
5614 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5616 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5617 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5618 BPF_EXIT_INSN(),
5619 },
5620 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005621 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005622 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005623 },
5624 {
5625 "map element value illegal alu op, 2",
5626 .insns = {
5627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5629 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5630 BPF_LD_MAP_FD(BPF_REG_1, 0),
5631 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5632 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5633 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5635 BPF_EXIT_INSN(),
5636 },
5637 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005638 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005639 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005640 },
5641 {
5642 "map element value illegal alu op, 3",
5643 .insns = {
5644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5646 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5647 BPF_LD_MAP_FD(BPF_REG_1, 0),
5648 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5650 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5652 BPF_EXIT_INSN(),
5653 },
5654 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005655 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005656 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005657 },
5658 {
5659 "map element value illegal alu op, 4",
5660 .insns = {
5661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5664 BPF_LD_MAP_FD(BPF_REG_1, 0),
5665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5667 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5669 BPF_EXIT_INSN(),
5670 },
5671 .fixup_map2 = { 3 },
5672 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5673 .errstr = "invalid mem access 'inv'",
5674 .result = REJECT,
5675 .result_unpriv = REJECT,
5676 },
5677 {
5678 "map element value illegal alu op, 5",
5679 .insns = {
5680 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5682 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5683 BPF_LD_MAP_FD(BPF_REG_1, 0),
5684 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5686 BPF_MOV64_IMM(BPF_REG_3, 4096),
5687 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5689 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5690 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5691 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5692 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5693 BPF_EXIT_INSN(),
5694 },
5695 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005696 .errstr = "R0 invalid mem access 'inv'",
5697 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005698 },
5699 {
5700 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005701 .insns = {
5702 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5703 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5704 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5705 BPF_LD_MAP_FD(BPF_REG_1, 0),
5706 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5709 offsetof(struct test_val, foo)),
5710 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5711 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5712 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5713 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5714 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5715 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5716 BPF_EXIT_INSN(),
5717 },
5718 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005719 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005720 .result = ACCEPT,
5721 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005723 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005724 {
5725 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5726 .insns = {
5727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5729 BPF_MOV64_IMM(BPF_REG_0, 0),
5730 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5731 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5732 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5733 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5734 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5735 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5736 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5737 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5738 BPF_MOV64_IMM(BPF_REG_2, 16),
5739 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5740 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5741 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5742 BPF_MOV64_IMM(BPF_REG_4, 0),
5743 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5744 BPF_MOV64_IMM(BPF_REG_3, 0),
5745 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5746 BPF_MOV64_IMM(BPF_REG_0, 0),
5747 BPF_EXIT_INSN(),
5748 },
5749 .result = ACCEPT,
5750 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5751 },
5752 {
5753 "helper access to variable memory: stack, bitwise AND, zero included",
5754 .insns = {
5755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5757 BPF_MOV64_IMM(BPF_REG_2, 16),
5758 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5759 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5760 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5761 BPF_MOV64_IMM(BPF_REG_3, 0),
5762 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5763 BPF_EXIT_INSN(),
5764 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005765 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005766 .result = REJECT,
5767 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5768 },
5769 {
5770 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5771 .insns = {
5772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5774 BPF_MOV64_IMM(BPF_REG_2, 16),
5775 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5776 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5777 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5778 BPF_MOV64_IMM(BPF_REG_4, 0),
5779 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5780 BPF_MOV64_IMM(BPF_REG_3, 0),
5781 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5782 BPF_MOV64_IMM(BPF_REG_0, 0),
5783 BPF_EXIT_INSN(),
5784 },
5785 .errstr = "invalid stack type R1 off=-64 access_size=65",
5786 .result = REJECT,
5787 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5788 },
5789 {
5790 "helper access to variable memory: stack, JMP, correct bounds",
5791 .insns = {
5792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5794 BPF_MOV64_IMM(BPF_REG_0, 0),
5795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5796 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5797 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5798 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5799 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5800 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5801 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5802 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5803 BPF_MOV64_IMM(BPF_REG_2, 16),
5804 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5805 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5806 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5807 BPF_MOV64_IMM(BPF_REG_4, 0),
5808 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5809 BPF_MOV64_IMM(BPF_REG_3, 0),
5810 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5811 BPF_MOV64_IMM(BPF_REG_0, 0),
5812 BPF_EXIT_INSN(),
5813 },
5814 .result = ACCEPT,
5815 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5816 },
5817 {
5818 "helper access to variable memory: stack, JMP (signed), correct bounds",
5819 .insns = {
5820 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5822 BPF_MOV64_IMM(BPF_REG_0, 0),
5823 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5824 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5825 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5826 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5827 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5828 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5829 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5830 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5831 BPF_MOV64_IMM(BPF_REG_2, 16),
5832 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5833 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5834 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5835 BPF_MOV64_IMM(BPF_REG_4, 0),
5836 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5837 BPF_MOV64_IMM(BPF_REG_3, 0),
5838 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5839 BPF_MOV64_IMM(BPF_REG_0, 0),
5840 BPF_EXIT_INSN(),
5841 },
5842 .result = ACCEPT,
5843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5844 },
5845 {
5846 "helper access to variable memory: stack, JMP, bounds + offset",
5847 .insns = {
5848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5850 BPF_MOV64_IMM(BPF_REG_2, 16),
5851 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5852 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5853 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5854 BPF_MOV64_IMM(BPF_REG_4, 0),
5855 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5857 BPF_MOV64_IMM(BPF_REG_3, 0),
5858 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5859 BPF_MOV64_IMM(BPF_REG_0, 0),
5860 BPF_EXIT_INSN(),
5861 },
5862 .errstr = "invalid stack type R1 off=-64 access_size=65",
5863 .result = REJECT,
5864 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5865 },
5866 {
5867 "helper access to variable memory: stack, JMP, wrong max",
5868 .insns = {
5869 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5871 BPF_MOV64_IMM(BPF_REG_2, 16),
5872 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5873 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5874 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5875 BPF_MOV64_IMM(BPF_REG_4, 0),
5876 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5877 BPF_MOV64_IMM(BPF_REG_3, 0),
5878 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5879 BPF_MOV64_IMM(BPF_REG_0, 0),
5880 BPF_EXIT_INSN(),
5881 },
5882 .errstr = "invalid stack type R1 off=-64 access_size=65",
5883 .result = REJECT,
5884 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5885 },
5886 {
5887 "helper access to variable memory: stack, JMP, no max check",
5888 .insns = {
5889 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5891 BPF_MOV64_IMM(BPF_REG_2, 16),
5892 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5893 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5894 BPF_MOV64_IMM(BPF_REG_4, 0),
5895 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5896 BPF_MOV64_IMM(BPF_REG_3, 0),
5897 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5898 BPF_MOV64_IMM(BPF_REG_0, 0),
5899 BPF_EXIT_INSN(),
5900 },
Edward Creef65b1842017-08-07 15:27:12 +01005901 /* because max wasn't checked, signed min is negative */
5902 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005903 .result = REJECT,
5904 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5905 },
5906 {
5907 "helper access to variable memory: stack, JMP, no min check",
5908 .insns = {
5909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5911 BPF_MOV64_IMM(BPF_REG_2, 16),
5912 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5913 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5914 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5915 BPF_MOV64_IMM(BPF_REG_3, 0),
5916 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5917 BPF_MOV64_IMM(BPF_REG_0, 0),
5918 BPF_EXIT_INSN(),
5919 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005920 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005921 .result = REJECT,
5922 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5923 },
5924 {
5925 "helper access to variable memory: stack, JMP (signed), no min check",
5926 .insns = {
5927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5929 BPF_MOV64_IMM(BPF_REG_2, 16),
5930 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5931 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5932 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5933 BPF_MOV64_IMM(BPF_REG_3, 0),
5934 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5935 BPF_MOV64_IMM(BPF_REG_0, 0),
5936 BPF_EXIT_INSN(),
5937 },
5938 .errstr = "R2 min value is negative",
5939 .result = REJECT,
5940 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5941 },
5942 {
5943 "helper access to variable memory: map, JMP, correct bounds",
5944 .insns = {
5945 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5947 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5948 BPF_LD_MAP_FD(BPF_REG_1, 0),
5949 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5950 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5952 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5953 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5954 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5955 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5956 sizeof(struct test_val), 4),
5957 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005958 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005959 BPF_MOV64_IMM(BPF_REG_3, 0),
5960 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5961 BPF_MOV64_IMM(BPF_REG_0, 0),
5962 BPF_EXIT_INSN(),
5963 },
5964 .fixup_map2 = { 3 },
5965 .result = ACCEPT,
5966 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5967 },
5968 {
5969 "helper access to variable memory: map, JMP, wrong max",
5970 .insns = {
5971 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5973 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5974 BPF_LD_MAP_FD(BPF_REG_1, 0),
5975 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5976 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5978 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5979 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5980 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5981 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5982 sizeof(struct test_val) + 1, 4),
5983 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005984 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005985 BPF_MOV64_IMM(BPF_REG_3, 0),
5986 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5987 BPF_MOV64_IMM(BPF_REG_0, 0),
5988 BPF_EXIT_INSN(),
5989 },
5990 .fixup_map2 = { 3 },
5991 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5992 .result = REJECT,
5993 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5994 },
5995 {
5996 "helper access to variable memory: map adjusted, JMP, correct bounds",
5997 .insns = {
5998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6000 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6001 BPF_LD_MAP_FD(BPF_REG_1, 0),
6002 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6006 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6007 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6008 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6009 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6010 sizeof(struct test_val) - 20, 4),
6011 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006012 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006013 BPF_MOV64_IMM(BPF_REG_3, 0),
6014 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6015 BPF_MOV64_IMM(BPF_REG_0, 0),
6016 BPF_EXIT_INSN(),
6017 },
6018 .fixup_map2 = { 3 },
6019 .result = ACCEPT,
6020 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6021 },
6022 {
6023 "helper access to variable memory: map adjusted, JMP, wrong max",
6024 .insns = {
6025 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6027 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6028 BPF_LD_MAP_FD(BPF_REG_1, 0),
6029 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6030 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6031 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
6033 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
6034 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6035 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
6036 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
6037 sizeof(struct test_val) - 19, 4),
6038 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02006039 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006040 BPF_MOV64_IMM(BPF_REG_3, 0),
6041 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6042 BPF_MOV64_IMM(BPF_REG_0, 0),
6043 BPF_EXIT_INSN(),
6044 },
6045 .fixup_map2 = { 3 },
6046 .errstr = "R1 min value is outside of the array range",
6047 .result = REJECT,
6048 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6049 },
6050 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006051 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01006052 .insns = {
6053 BPF_MOV64_IMM(BPF_REG_1, 0),
6054 BPF_MOV64_IMM(BPF_REG_2, 0),
6055 BPF_MOV64_IMM(BPF_REG_3, 0),
6056 BPF_MOV64_IMM(BPF_REG_4, 0),
6057 BPF_MOV64_IMM(BPF_REG_5, 0),
6058 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6059 BPF_EXIT_INSN(),
6060 },
6061 .result = ACCEPT,
6062 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6063 },
6064 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006065 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006066 .insns = {
6067 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006068 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006069 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6070 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006071 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6072 BPF_MOV64_IMM(BPF_REG_3, 0),
6073 BPF_MOV64_IMM(BPF_REG_4, 0),
6074 BPF_MOV64_IMM(BPF_REG_5, 0),
6075 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6076 BPF_EXIT_INSN(),
6077 },
Edward Creef65b1842017-08-07 15:27:12 +01006078 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006079 .result = REJECT,
6080 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6081 },
6082 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006083 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08006084 .insns = {
6085 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6087 BPF_MOV64_IMM(BPF_REG_2, 0),
6088 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6089 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
6090 BPF_MOV64_IMM(BPF_REG_3, 0),
6091 BPF_MOV64_IMM(BPF_REG_4, 0),
6092 BPF_MOV64_IMM(BPF_REG_5, 0),
6093 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6094 BPF_EXIT_INSN(),
6095 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08006096 .result = ACCEPT,
6097 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6098 },
6099 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006100 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006101 .insns = {
6102 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6105 BPF_LD_MAP_FD(BPF_REG_1, 0),
6106 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6107 BPF_FUNC_map_lookup_elem),
6108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6109 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6110 BPF_MOV64_IMM(BPF_REG_2, 0),
6111 BPF_MOV64_IMM(BPF_REG_3, 0),
6112 BPF_MOV64_IMM(BPF_REG_4, 0),
6113 BPF_MOV64_IMM(BPF_REG_5, 0),
6114 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6115 BPF_EXIT_INSN(),
6116 },
6117 .fixup_map1 = { 3 },
6118 .result = ACCEPT,
6119 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6120 },
6121 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006122 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006123 .insns = {
6124 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6125 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6127 BPF_LD_MAP_FD(BPF_REG_1, 0),
6128 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6129 BPF_FUNC_map_lookup_elem),
6130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6131 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6132 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
6133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6135 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
6136 BPF_MOV64_IMM(BPF_REG_3, 0),
6137 BPF_MOV64_IMM(BPF_REG_4, 0),
6138 BPF_MOV64_IMM(BPF_REG_5, 0),
6139 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6140 BPF_EXIT_INSN(),
6141 },
6142 .fixup_map1 = { 3 },
6143 .result = ACCEPT,
6144 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6145 },
6146 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006147 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006148 .insns = {
6149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6152 BPF_LD_MAP_FD(BPF_REG_1, 0),
6153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6154 BPF_FUNC_map_lookup_elem),
6155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6156 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6157 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6158 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6159 BPF_MOV64_IMM(BPF_REG_3, 0),
6160 BPF_MOV64_IMM(BPF_REG_4, 0),
6161 BPF_MOV64_IMM(BPF_REG_5, 0),
6162 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6163 BPF_EXIT_INSN(),
6164 },
6165 .fixup_map1 = { 3 },
6166 .result = ACCEPT,
6167 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6168 },
6169 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006170 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08006171 .insns = {
6172 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6173 offsetof(struct __sk_buff, data)),
6174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6175 offsetof(struct __sk_buff, data_end)),
6176 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
6177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6178 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
6179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
6180 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
6181 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6182 BPF_MOV64_IMM(BPF_REG_3, 0),
6183 BPF_MOV64_IMM(BPF_REG_4, 0),
6184 BPF_MOV64_IMM(BPF_REG_5, 0),
6185 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
6186 BPF_EXIT_INSN(),
6187 },
6188 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006190 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08006191 },
6192 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00006193 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6194 .insns = {
6195 BPF_MOV64_IMM(BPF_REG_1, 0),
6196 BPF_MOV64_IMM(BPF_REG_2, 0),
6197 BPF_MOV64_IMM(BPF_REG_3, 0),
6198 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6199 BPF_EXIT_INSN(),
6200 },
6201 .errstr = "R1 type=inv expected=fp",
6202 .result = REJECT,
6203 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6204 },
6205 {
6206 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
6207 .insns = {
6208 BPF_MOV64_IMM(BPF_REG_1, 0),
6209 BPF_MOV64_IMM(BPF_REG_2, 1),
6210 BPF_MOV64_IMM(BPF_REG_3, 0),
6211 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6212 BPF_EXIT_INSN(),
6213 },
6214 .errstr = "R1 type=inv expected=fp",
6215 .result = REJECT,
6216 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6217 },
6218 {
6219 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6220 .insns = {
6221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6223 BPF_MOV64_IMM(BPF_REG_2, 0),
6224 BPF_MOV64_IMM(BPF_REG_3, 0),
6225 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6226 BPF_EXIT_INSN(),
6227 },
6228 .result = ACCEPT,
6229 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6230 },
6231 {
6232 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6233 .insns = {
6234 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6237 BPF_LD_MAP_FD(BPF_REG_1, 0),
6238 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6240 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6241 BPF_MOV64_IMM(BPF_REG_2, 0),
6242 BPF_MOV64_IMM(BPF_REG_3, 0),
6243 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6244 BPF_EXIT_INSN(),
6245 },
6246 .fixup_map1 = { 3 },
6247 .result = ACCEPT,
6248 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6249 },
6250 {
6251 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6252 .insns = {
6253 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6254 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6256 BPF_LD_MAP_FD(BPF_REG_1, 0),
6257 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6258 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6259 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6260 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
6261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
6263 BPF_MOV64_IMM(BPF_REG_3, 0),
6264 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6265 BPF_EXIT_INSN(),
6266 },
6267 .fixup_map1 = { 3 },
6268 .result = ACCEPT,
6269 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6270 },
6271 {
6272 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
6273 .insns = {
6274 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6275 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6277 BPF_LD_MAP_FD(BPF_REG_1, 0),
6278 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6279 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6280 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6281 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6282 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
6283 BPF_MOV64_IMM(BPF_REG_3, 0),
6284 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6285 BPF_EXIT_INSN(),
6286 },
6287 .fixup_map1 = { 3 },
6288 .result = ACCEPT,
6289 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6290 },
6291 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08006292 "helper access to variable memory: 8 bytes leak",
6293 .insns = {
6294 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6296 BPF_MOV64_IMM(BPF_REG_0, 0),
6297 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6298 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6299 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6300 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08006304 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01006305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
6306 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08006307 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
6308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6309 BPF_MOV64_IMM(BPF_REG_3, 0),
6310 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6311 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6312 BPF_EXIT_INSN(),
6313 },
6314 .errstr = "invalid indirect read from stack off -64+32 size 64",
6315 .result = REJECT,
6316 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6317 },
6318 {
6319 "helper access to variable memory: 8 bytes no leak (init memory)",
6320 .insns = {
6321 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6322 BPF_MOV64_IMM(BPF_REG_0, 0),
6323 BPF_MOV64_IMM(BPF_REG_0, 0),
6324 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6325 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6328 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6329 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6330 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6331 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6333 BPF_MOV64_IMM(BPF_REG_2, 0),
6334 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
6335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
6336 BPF_MOV64_IMM(BPF_REG_3, 0),
6337 BPF_EMIT_CALL(BPF_FUNC_probe_read),
6338 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6339 BPF_EXIT_INSN(),
6340 },
6341 .result = ACCEPT,
6342 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
6343 },
Josef Bacik29200c12017-02-03 16:25:23 -05006344 {
6345 "invalid and of negative number",
6346 .insns = {
6347 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6348 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6350 BPF_LD_MAP_FD(BPF_REG_1, 0),
6351 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6352 BPF_FUNC_map_lookup_elem),
6353 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01006354 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05006355 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
6356 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
6357 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6358 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6359 offsetof(struct test_val, foo)),
6360 BPF_EXIT_INSN(),
6361 },
6362 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006363 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006364 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006365 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05006366 },
6367 {
6368 "invalid range check",
6369 .insns = {
6370 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6371 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6373 BPF_LD_MAP_FD(BPF_REG_1, 0),
6374 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6375 BPF_FUNC_map_lookup_elem),
6376 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6377 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6378 BPF_MOV64_IMM(BPF_REG_9, 1),
6379 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6380 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6381 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6382 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6383 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6384 BPF_MOV32_IMM(BPF_REG_3, 1),
6385 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6386 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6387 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6388 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6389 BPF_MOV64_REG(BPF_REG_0, 0),
6390 BPF_EXIT_INSN(),
6391 },
6392 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006393 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006394 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006395 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006396 },
6397 {
6398 "map in map access",
6399 .insns = {
6400 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6403 BPF_LD_MAP_FD(BPF_REG_1, 0),
6404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6405 BPF_FUNC_map_lookup_elem),
6406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6407 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6410 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6411 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6412 BPF_FUNC_map_lookup_elem),
6413 BPF_MOV64_REG(BPF_REG_0, 0),
6414 BPF_EXIT_INSN(),
6415 },
6416 .fixup_map_in_map = { 3 },
6417 .result = ACCEPT,
6418 },
6419 {
6420 "invalid inner map pointer",
6421 .insns = {
6422 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6425 BPF_LD_MAP_FD(BPF_REG_1, 0),
6426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6427 BPF_FUNC_map_lookup_elem),
6428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6429 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6435 BPF_FUNC_map_lookup_elem),
6436 BPF_MOV64_REG(BPF_REG_0, 0),
6437 BPF_EXIT_INSN(),
6438 },
6439 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006440 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006441 .result = REJECT,
6442 },
6443 {
6444 "forgot null checking on the inner map pointer",
6445 .insns = {
6446 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6449 BPF_LD_MAP_FD(BPF_REG_1, 0),
6450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6451 BPF_FUNC_map_lookup_elem),
6452 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6453 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6457 BPF_FUNC_map_lookup_elem),
6458 BPF_MOV64_REG(BPF_REG_0, 0),
6459 BPF_EXIT_INSN(),
6460 },
6461 .fixup_map_in_map = { 3 },
6462 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6463 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006464 },
6465 {
6466 "ld_abs: check calling conv, r1",
6467 .insns = {
6468 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6469 BPF_MOV64_IMM(BPF_REG_1, 0),
6470 BPF_LD_ABS(BPF_W, -0x200000),
6471 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6472 BPF_EXIT_INSN(),
6473 },
6474 .errstr = "R1 !read_ok",
6475 .result = REJECT,
6476 },
6477 {
6478 "ld_abs: check calling conv, r2",
6479 .insns = {
6480 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6481 BPF_MOV64_IMM(BPF_REG_2, 0),
6482 BPF_LD_ABS(BPF_W, -0x200000),
6483 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6484 BPF_EXIT_INSN(),
6485 },
6486 .errstr = "R2 !read_ok",
6487 .result = REJECT,
6488 },
6489 {
6490 "ld_abs: check calling conv, r3",
6491 .insns = {
6492 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6493 BPF_MOV64_IMM(BPF_REG_3, 0),
6494 BPF_LD_ABS(BPF_W, -0x200000),
6495 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6496 BPF_EXIT_INSN(),
6497 },
6498 .errstr = "R3 !read_ok",
6499 .result = REJECT,
6500 },
6501 {
6502 "ld_abs: check calling conv, r4",
6503 .insns = {
6504 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6505 BPF_MOV64_IMM(BPF_REG_4, 0),
6506 BPF_LD_ABS(BPF_W, -0x200000),
6507 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6508 BPF_EXIT_INSN(),
6509 },
6510 .errstr = "R4 !read_ok",
6511 .result = REJECT,
6512 },
6513 {
6514 "ld_abs: check calling conv, r5",
6515 .insns = {
6516 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6517 BPF_MOV64_IMM(BPF_REG_5, 0),
6518 BPF_LD_ABS(BPF_W, -0x200000),
6519 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6520 BPF_EXIT_INSN(),
6521 },
6522 .errstr = "R5 !read_ok",
6523 .result = REJECT,
6524 },
6525 {
6526 "ld_abs: check calling conv, r7",
6527 .insns = {
6528 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6529 BPF_MOV64_IMM(BPF_REG_7, 0),
6530 BPF_LD_ABS(BPF_W, -0x200000),
6531 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6532 BPF_EXIT_INSN(),
6533 },
6534 .result = ACCEPT,
6535 },
6536 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006537 "ld_abs: tests on r6 and skb data reload helper",
6538 .insns = {
6539 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6540 BPF_LD_ABS(BPF_B, 0),
6541 BPF_LD_ABS(BPF_H, 0),
6542 BPF_LD_ABS(BPF_W, 0),
6543 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6544 BPF_MOV64_IMM(BPF_REG_6, 0),
6545 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6546 BPF_MOV64_IMM(BPF_REG_2, 1),
6547 BPF_MOV64_IMM(BPF_REG_3, 2),
6548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6549 BPF_FUNC_skb_vlan_push),
6550 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6551 BPF_LD_ABS(BPF_B, 0),
6552 BPF_LD_ABS(BPF_H, 0),
6553 BPF_LD_ABS(BPF_W, 0),
6554 BPF_MOV64_IMM(BPF_REG_0, 42),
6555 BPF_EXIT_INSN(),
6556 },
6557 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6558 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006559 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006560 },
6561 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006562 "ld_ind: check calling conv, r1",
6563 .insns = {
6564 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6565 BPF_MOV64_IMM(BPF_REG_1, 1),
6566 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6567 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6568 BPF_EXIT_INSN(),
6569 },
6570 .errstr = "R1 !read_ok",
6571 .result = REJECT,
6572 },
6573 {
6574 "ld_ind: check calling conv, r2",
6575 .insns = {
6576 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6577 BPF_MOV64_IMM(BPF_REG_2, 1),
6578 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6579 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6580 BPF_EXIT_INSN(),
6581 },
6582 .errstr = "R2 !read_ok",
6583 .result = REJECT,
6584 },
6585 {
6586 "ld_ind: check calling conv, r3",
6587 .insns = {
6588 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6589 BPF_MOV64_IMM(BPF_REG_3, 1),
6590 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6591 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6592 BPF_EXIT_INSN(),
6593 },
6594 .errstr = "R3 !read_ok",
6595 .result = REJECT,
6596 },
6597 {
6598 "ld_ind: check calling conv, r4",
6599 .insns = {
6600 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6601 BPF_MOV64_IMM(BPF_REG_4, 1),
6602 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6603 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6604 BPF_EXIT_INSN(),
6605 },
6606 .errstr = "R4 !read_ok",
6607 .result = REJECT,
6608 },
6609 {
6610 "ld_ind: check calling conv, r5",
6611 .insns = {
6612 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6613 BPF_MOV64_IMM(BPF_REG_5, 1),
6614 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6615 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6616 BPF_EXIT_INSN(),
6617 },
6618 .errstr = "R5 !read_ok",
6619 .result = REJECT,
6620 },
6621 {
6622 "ld_ind: check calling conv, r7",
6623 .insns = {
6624 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6625 BPF_MOV64_IMM(BPF_REG_7, 1),
6626 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6627 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6628 BPF_EXIT_INSN(),
6629 },
6630 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006631 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006632 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006633 {
6634 "check bpf_perf_event_data->sample_period byte load permitted",
6635 .insns = {
6636 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006637#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006638 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6639 offsetof(struct bpf_perf_event_data, sample_period)),
6640#else
6641 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6642 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6643#endif
6644 BPF_EXIT_INSN(),
6645 },
6646 .result = ACCEPT,
6647 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6648 },
6649 {
6650 "check bpf_perf_event_data->sample_period half load permitted",
6651 .insns = {
6652 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006653#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006654 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6655 offsetof(struct bpf_perf_event_data, sample_period)),
6656#else
6657 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6658 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6659#endif
6660 BPF_EXIT_INSN(),
6661 },
6662 .result = ACCEPT,
6663 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6664 },
6665 {
6666 "check bpf_perf_event_data->sample_period word load permitted",
6667 .insns = {
6668 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006669#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006670 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6671 offsetof(struct bpf_perf_event_data, sample_period)),
6672#else
6673 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6674 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6675#endif
6676 BPF_EXIT_INSN(),
6677 },
6678 .result = ACCEPT,
6679 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6680 },
6681 {
6682 "check bpf_perf_event_data->sample_period dword load permitted",
6683 .insns = {
6684 BPF_MOV64_IMM(BPF_REG_0, 0),
6685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6686 offsetof(struct bpf_perf_event_data, sample_period)),
6687 BPF_EXIT_INSN(),
6688 },
6689 .result = ACCEPT,
6690 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6691 },
6692 {
6693 "check skb->data half load not permitted",
6694 .insns = {
6695 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006696#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6698 offsetof(struct __sk_buff, data)),
6699#else
6700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6701 offsetof(struct __sk_buff, data) + 2),
6702#endif
6703 BPF_EXIT_INSN(),
6704 },
6705 .result = REJECT,
6706 .errstr = "invalid bpf_context access",
6707 },
6708 {
6709 "check skb->tc_classid half load not permitted for lwt prog",
6710 .insns = {
6711 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006712#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006713 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6714 offsetof(struct __sk_buff, tc_classid)),
6715#else
6716 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6717 offsetof(struct __sk_buff, tc_classid) + 2),
6718#endif
6719 BPF_EXIT_INSN(),
6720 },
6721 .result = REJECT,
6722 .errstr = "invalid bpf_context access",
6723 .prog_type = BPF_PROG_TYPE_LWT_IN,
6724 },
Edward Creeb712296a2017-07-21 00:00:24 +02006725 {
6726 "bounds checks mixing signed and unsigned, positive bounds",
6727 .insns = {
6728 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6731 BPF_LD_MAP_FD(BPF_REG_1, 0),
6732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6733 BPF_FUNC_map_lookup_elem),
6734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6735 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6736 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6737 BPF_MOV64_IMM(BPF_REG_2, 2),
6738 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6739 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6740 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6741 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6742 BPF_MOV64_IMM(BPF_REG_0, 0),
6743 BPF_EXIT_INSN(),
6744 },
6745 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006746 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02006747 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006748 },
6749 {
6750 "bounds checks mixing signed and unsigned",
6751 .insns = {
6752 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6755 BPF_LD_MAP_FD(BPF_REG_1, 0),
6756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6757 BPF_FUNC_map_lookup_elem),
6758 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6759 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6760 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6761 BPF_MOV64_IMM(BPF_REG_2, -1),
6762 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6763 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6764 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6765 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6766 BPF_MOV64_IMM(BPF_REG_0, 0),
6767 BPF_EXIT_INSN(),
6768 },
6769 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006770 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02006771 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006772 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006773 {
6774 "bounds checks mixing signed and unsigned, variant 2",
6775 .insns = {
6776 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6779 BPF_LD_MAP_FD(BPF_REG_1, 0),
6780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6781 BPF_FUNC_map_lookup_elem),
6782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6783 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6784 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6785 BPF_MOV64_IMM(BPF_REG_2, -1),
6786 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6787 BPF_MOV64_IMM(BPF_REG_8, 0),
6788 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6789 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6790 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6791 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6792 BPF_MOV64_IMM(BPF_REG_0, 0),
6793 BPF_EXIT_INSN(),
6794 },
6795 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006796 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006797 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006798 },
6799 {
6800 "bounds checks mixing signed and unsigned, variant 3",
6801 .insns = {
6802 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6805 BPF_LD_MAP_FD(BPF_REG_1, 0),
6806 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6807 BPF_FUNC_map_lookup_elem),
6808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6809 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6810 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6811 BPF_MOV64_IMM(BPF_REG_2, -1),
6812 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6813 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6814 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6815 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6816 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6817 BPF_MOV64_IMM(BPF_REG_0, 0),
6818 BPF_EXIT_INSN(),
6819 },
6820 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006821 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006822 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006823 },
6824 {
6825 "bounds checks mixing signed and unsigned, variant 4",
6826 .insns = {
6827 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6828 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6830 BPF_LD_MAP_FD(BPF_REG_1, 0),
6831 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6832 BPF_FUNC_map_lookup_elem),
6833 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6834 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6835 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6836 BPF_MOV64_IMM(BPF_REG_2, 1),
6837 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6838 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6839 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6840 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6841 BPF_MOV64_IMM(BPF_REG_0, 0),
6842 BPF_EXIT_INSN(),
6843 },
6844 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006845 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006846 },
6847 {
6848 "bounds checks mixing signed and unsigned, variant 5",
6849 .insns = {
6850 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6851 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6853 BPF_LD_MAP_FD(BPF_REG_1, 0),
6854 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6855 BPF_FUNC_map_lookup_elem),
6856 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6857 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6858 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6859 BPF_MOV64_IMM(BPF_REG_2, -1),
6860 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6861 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6863 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6864 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6865 BPF_MOV64_IMM(BPF_REG_0, 0),
6866 BPF_EXIT_INSN(),
6867 },
6868 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006869 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006870 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006871 },
6872 {
6873 "bounds checks mixing signed and unsigned, variant 6",
6874 .insns = {
6875 BPF_MOV64_IMM(BPF_REG_2, 0),
6876 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6878 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6879 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6880 BPF_MOV64_IMM(BPF_REG_6, -1),
6881 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6882 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6883 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6884 BPF_MOV64_IMM(BPF_REG_5, 0),
6885 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6887 BPF_FUNC_skb_load_bytes),
6888 BPF_MOV64_IMM(BPF_REG_0, 0),
6889 BPF_EXIT_INSN(),
6890 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006891 .errstr = "R4 min value is negative, either use unsigned",
6892 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006893 },
6894 {
6895 "bounds checks mixing signed and unsigned, variant 7",
6896 .insns = {
6897 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6900 BPF_LD_MAP_FD(BPF_REG_1, 0),
6901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6902 BPF_FUNC_map_lookup_elem),
6903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6904 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6905 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6906 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6907 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6908 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6909 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6910 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6911 BPF_MOV64_IMM(BPF_REG_0, 0),
6912 BPF_EXIT_INSN(),
6913 },
6914 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006915 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006916 },
6917 {
6918 "bounds checks mixing signed and unsigned, variant 8",
6919 .insns = {
6920 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6923 BPF_LD_MAP_FD(BPF_REG_1, 0),
6924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6925 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6927 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6928 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6929 BPF_MOV64_IMM(BPF_REG_2, -1),
6930 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6931 BPF_MOV64_IMM(BPF_REG_0, 0),
6932 BPF_EXIT_INSN(),
6933 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6935 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6936 BPF_MOV64_IMM(BPF_REG_0, 0),
6937 BPF_EXIT_INSN(),
6938 },
6939 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006940 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006941 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006942 },
6943 {
Edward Creef65b1842017-08-07 15:27:12 +01006944 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006945 .insns = {
6946 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6947 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6949 BPF_LD_MAP_FD(BPF_REG_1, 0),
6950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6951 BPF_FUNC_map_lookup_elem),
6952 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6954 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6955 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6956 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6957 BPF_MOV64_IMM(BPF_REG_0, 0),
6958 BPF_EXIT_INSN(),
6959 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6960 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6961 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6962 BPF_MOV64_IMM(BPF_REG_0, 0),
6963 BPF_EXIT_INSN(),
6964 },
6965 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006966 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006967 },
6968 {
Edward Creef65b1842017-08-07 15:27:12 +01006969 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006970 .insns = {
6971 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6972 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6974 BPF_LD_MAP_FD(BPF_REG_1, 0),
6975 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6976 BPF_FUNC_map_lookup_elem),
6977 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6978 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6979 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6980 BPF_MOV64_IMM(BPF_REG_2, 0),
6981 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6982 BPF_MOV64_IMM(BPF_REG_0, 0),
6983 BPF_EXIT_INSN(),
6984 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6985 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6986 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6987 BPF_MOV64_IMM(BPF_REG_0, 0),
6988 BPF_EXIT_INSN(),
6989 },
6990 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006991 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006992 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006993 },
6994 {
Edward Creef65b1842017-08-07 15:27:12 +01006995 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006996 .insns = {
6997 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7000 BPF_LD_MAP_FD(BPF_REG_1, 0),
7001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7002 BPF_FUNC_map_lookup_elem),
7003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7004 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7005 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7006 BPF_MOV64_IMM(BPF_REG_2, -1),
7007 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7008 /* Dead branch. */
7009 BPF_MOV64_IMM(BPF_REG_0, 0),
7010 BPF_EXIT_INSN(),
7011 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7012 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7013 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7014 BPF_MOV64_IMM(BPF_REG_0, 0),
7015 BPF_EXIT_INSN(),
7016 },
7017 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007018 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007019 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007020 },
7021 {
Edward Creef65b1842017-08-07 15:27:12 +01007022 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02007023 .insns = {
7024 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7025 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7027 BPF_LD_MAP_FD(BPF_REG_1, 0),
7028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7029 BPF_FUNC_map_lookup_elem),
7030 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7031 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7032 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7033 BPF_MOV64_IMM(BPF_REG_2, -6),
7034 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7035 BPF_MOV64_IMM(BPF_REG_0, 0),
7036 BPF_EXIT_INSN(),
7037 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7038 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7039 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7040 BPF_MOV64_IMM(BPF_REG_0, 0),
7041 BPF_EXIT_INSN(),
7042 },
7043 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007044 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007045 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007046 },
7047 {
Edward Creef65b1842017-08-07 15:27:12 +01007048 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02007049 .insns = {
7050 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7053 BPF_LD_MAP_FD(BPF_REG_1, 0),
7054 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7055 BPF_FUNC_map_lookup_elem),
7056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7057 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7058 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7059 BPF_MOV64_IMM(BPF_REG_2, 2),
7060 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7061 BPF_MOV64_IMM(BPF_REG_7, 1),
7062 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
7063 BPF_MOV64_IMM(BPF_REG_0, 0),
7064 BPF_EXIT_INSN(),
7065 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
7066 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
7067 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
7068 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7069 BPF_MOV64_IMM(BPF_REG_0, 0),
7070 BPF_EXIT_INSN(),
7071 },
7072 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007073 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007074 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007075 },
7076 {
Edward Creef65b1842017-08-07 15:27:12 +01007077 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02007078 .insns = {
7079 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
7080 offsetof(struct __sk_buff, mark)),
7081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7084 BPF_LD_MAP_FD(BPF_REG_1, 0),
7085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7086 BPF_FUNC_map_lookup_elem),
7087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7089 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7090 BPF_MOV64_IMM(BPF_REG_2, -1),
7091 BPF_MOV64_IMM(BPF_REG_8, 2),
7092 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
7093 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
7094 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7095 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7096 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7097 BPF_MOV64_IMM(BPF_REG_0, 0),
7098 BPF_EXIT_INSN(),
7099 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
7100 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
7101 },
7102 .fixup_map1 = { 4 },
Daniel Borkmann6f161012018-01-18 01:15:21 +01007103 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann86412502017-07-21 00:00:25 +02007104 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02007105 },
7106 {
Edward Creef65b1842017-08-07 15:27:12 +01007107 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02007108 .insns = {
7109 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7112 BPF_LD_MAP_FD(BPF_REG_1, 0),
7113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7114 BPF_FUNC_map_lookup_elem),
7115 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7116 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7118 BPF_MOV64_IMM(BPF_REG_2, -6),
7119 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
7120 BPF_MOV64_IMM(BPF_REG_0, 0),
7121 BPF_EXIT_INSN(),
7122 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7123 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
7124 BPF_MOV64_IMM(BPF_REG_0, 0),
7125 BPF_EXIT_INSN(),
7126 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7127 BPF_MOV64_IMM(BPF_REG_0, 0),
7128 BPF_EXIT_INSN(),
7129 },
7130 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08007131 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02007132 .result = REJECT,
7133 .result_unpriv = REJECT,
7134 },
Edward Cree545722c2017-07-21 14:36:57 +01007135 {
Edward Creef65b1842017-08-07 15:27:12 +01007136 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01007137 .insns = {
7138 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7141 BPF_LD_MAP_FD(BPF_REG_1, 0),
7142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7143 BPF_FUNC_map_lookup_elem),
7144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7145 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7146 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
7147 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7148 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
7149 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7150 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
7151 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7152 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7153 BPF_EXIT_INSN(),
7154 BPF_MOV64_IMM(BPF_REG_0, 0),
7155 BPF_EXIT_INSN(),
7156 },
7157 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01007158 .errstr = "R0 max value is outside of the array range",
7159 .result = REJECT,
7160 },
7161 {
7162 "subtraction bounds (map value) variant 2",
7163 .insns = {
7164 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7167 BPF_LD_MAP_FD(BPF_REG_1, 0),
7168 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7169 BPF_FUNC_map_lookup_elem),
7170 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7171 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7172 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
7173 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
7174 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
7175 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
7176 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7177 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7178 BPF_EXIT_INSN(),
7179 BPF_MOV64_IMM(BPF_REG_0, 0),
7180 BPF_EXIT_INSN(),
7181 },
7182 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01007183 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
7184 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01007185 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01007186 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007187 "bounds check based on zero-extended MOV",
7188 .insns = {
7189 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7192 BPF_LD_MAP_FD(BPF_REG_1, 0),
7193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7194 BPF_FUNC_map_lookup_elem),
7195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7196 /* r2 = 0x0000'0000'ffff'ffff */
7197 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
7198 /* r2 = 0 */
7199 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7200 /* no-op */
7201 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7202 /* access at offset 0 */
7203 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7204 /* exit */
7205 BPF_MOV64_IMM(BPF_REG_0, 0),
7206 BPF_EXIT_INSN(),
7207 },
7208 .fixup_map1 = { 3 },
7209 .result = ACCEPT
7210 },
7211 {
7212 "bounds check based on sign-extended MOV. test1",
7213 .insns = {
7214 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7217 BPF_LD_MAP_FD(BPF_REG_1, 0),
7218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7219 BPF_FUNC_map_lookup_elem),
7220 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7221 /* r2 = 0xffff'ffff'ffff'ffff */
7222 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7223 /* r2 = 0xffff'ffff */
7224 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
7225 /* r0 = <oob pointer> */
7226 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7227 /* access to OOB pointer */
7228 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7229 /* exit */
7230 BPF_MOV64_IMM(BPF_REG_0, 0),
7231 BPF_EXIT_INSN(),
7232 },
7233 .fixup_map1 = { 3 },
7234 .errstr = "map_value pointer and 4294967295",
7235 .result = REJECT
7236 },
7237 {
7238 "bounds check based on sign-extended MOV. test2",
7239 .insns = {
7240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7241 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7243 BPF_LD_MAP_FD(BPF_REG_1, 0),
7244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7245 BPF_FUNC_map_lookup_elem),
7246 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7247 /* r2 = 0xffff'ffff'ffff'ffff */
7248 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
7249 /* r2 = 0xfff'ffff */
7250 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
7251 /* r0 = <oob pointer> */
7252 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
7253 /* access to OOB pointer */
7254 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7255 /* exit */
7256 BPF_MOV64_IMM(BPF_REG_0, 0),
7257 BPF_EXIT_INSN(),
7258 },
7259 .fixup_map1 = { 3 },
7260 .errstr = "R0 min value is outside of the array range",
7261 .result = REJECT
7262 },
7263 {
7264 "bounds check based on reg_off + var_off + insn_off. test1",
7265 .insns = {
7266 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7267 offsetof(struct __sk_buff, mark)),
7268 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7269 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7270 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7271 BPF_LD_MAP_FD(BPF_REG_1, 0),
7272 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7273 BPF_FUNC_map_lookup_elem),
7274 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7275 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
7277 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7279 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7280 BPF_MOV64_IMM(BPF_REG_0, 0),
7281 BPF_EXIT_INSN(),
7282 },
7283 .fixup_map1 = { 4 },
7284 .errstr = "value_size=8 off=1073741825",
7285 .result = REJECT,
7286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7287 },
7288 {
7289 "bounds check based on reg_off + var_off + insn_off. test2",
7290 .insns = {
7291 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7292 offsetof(struct __sk_buff, mark)),
7293 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7296 BPF_LD_MAP_FD(BPF_REG_1, 0),
7297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7298 BPF_FUNC_map_lookup_elem),
7299 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7300 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
7301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
7302 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
7303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
7304 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
7305 BPF_MOV64_IMM(BPF_REG_0, 0),
7306 BPF_EXIT_INSN(),
7307 },
7308 .fixup_map1 = { 4 },
7309 .errstr = "value 1073741823",
7310 .result = REJECT,
7311 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7312 },
7313 {
7314 "bounds check after truncation of non-boundary-crossing range",
7315 .insns = {
7316 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7319 BPF_LD_MAP_FD(BPF_REG_1, 0),
7320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7321 BPF_FUNC_map_lookup_elem),
7322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7323 /* r1 = [0x00, 0xff] */
7324 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7325 BPF_MOV64_IMM(BPF_REG_2, 1),
7326 /* r2 = 0x10'0000'0000 */
7327 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
7328 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
7329 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7330 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
7331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7332 /* r1 = [0x00, 0xff] */
7333 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
7334 /* r1 = 0 */
7335 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7336 /* no-op */
7337 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7338 /* access at offset 0 */
7339 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7340 /* exit */
7341 BPF_MOV64_IMM(BPF_REG_0, 0),
7342 BPF_EXIT_INSN(),
7343 },
7344 .fixup_map1 = { 3 },
7345 .result = ACCEPT
7346 },
7347 {
7348 "bounds check after truncation of boundary-crossing range (1)",
7349 .insns = {
7350 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7353 BPF_LD_MAP_FD(BPF_REG_1, 0),
7354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7355 BPF_FUNC_map_lookup_elem),
7356 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7357 /* r1 = [0x00, 0xff] */
7358 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7359 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7360 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7361 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7362 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7363 * [0x0000'0000, 0x0000'007f]
7364 */
7365 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
7366 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7367 /* r1 = [0x00, 0xff] or
7368 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7369 */
7370 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7371 /* r1 = 0 or
7372 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7373 */
7374 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7375 /* no-op or OOB pointer computation */
7376 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7377 /* potentially OOB access */
7378 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7379 /* exit */
7380 BPF_MOV64_IMM(BPF_REG_0, 0),
7381 BPF_EXIT_INSN(),
7382 },
7383 .fixup_map1 = { 3 },
7384 /* not actually fully unbounded, but the bound is very high */
7385 .errstr = "R0 unbounded memory access",
7386 .result = REJECT
7387 },
7388 {
7389 "bounds check after truncation of boundary-crossing range (2)",
7390 .insns = {
7391 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7392 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7394 BPF_LD_MAP_FD(BPF_REG_1, 0),
7395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7396 BPF_FUNC_map_lookup_elem),
7397 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7398 /* r1 = [0x00, 0xff] */
7399 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7401 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7403 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7404 * [0x0000'0000, 0x0000'007f]
7405 * difference to previous test: truncation via MOV32
7406 * instead of ALU32.
7407 */
7408 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7409 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7410 /* r1 = [0x00, 0xff] or
7411 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7412 */
7413 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7414 /* r1 = 0 or
7415 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7416 */
7417 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7418 /* no-op or OOB pointer computation */
7419 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7420 /* potentially OOB access */
7421 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7422 /* exit */
7423 BPF_MOV64_IMM(BPF_REG_0, 0),
7424 BPF_EXIT_INSN(),
7425 },
7426 .fixup_map1 = { 3 },
7427 /* not actually fully unbounded, but the bound is very high */
7428 .errstr = "R0 unbounded memory access",
7429 .result = REJECT
7430 },
7431 {
7432 "bounds check after wrapping 32-bit addition",
7433 .insns = {
7434 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7437 BPF_LD_MAP_FD(BPF_REG_1, 0),
7438 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7439 BPF_FUNC_map_lookup_elem),
7440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7441 /* r1 = 0x7fff'ffff */
7442 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7443 /* r1 = 0xffff'fffe */
7444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7445 /* r1 = 0 */
7446 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7447 /* no-op */
7448 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7449 /* access at offset 0 */
7450 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7451 /* exit */
7452 BPF_MOV64_IMM(BPF_REG_0, 0),
7453 BPF_EXIT_INSN(),
7454 },
7455 .fixup_map1 = { 3 },
7456 .result = ACCEPT
7457 },
7458 {
7459 "bounds check after shift with oversized count operand",
7460 .insns = {
7461 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7462 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7464 BPF_LD_MAP_FD(BPF_REG_1, 0),
7465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7466 BPF_FUNC_map_lookup_elem),
7467 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7468 BPF_MOV64_IMM(BPF_REG_2, 32),
7469 BPF_MOV64_IMM(BPF_REG_1, 1),
7470 /* r1 = (u32)1 << (u32)32 = ? */
7471 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7472 /* r1 = [0x0000, 0xffff] */
7473 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7474 /* computes unknown pointer, potentially OOB */
7475 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7476 /* potentially OOB access */
7477 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7478 /* exit */
7479 BPF_MOV64_IMM(BPF_REG_0, 0),
7480 BPF_EXIT_INSN(),
7481 },
7482 .fixup_map1 = { 3 },
7483 .errstr = "R0 max value is outside of the array range",
7484 .result = REJECT
7485 },
7486 {
7487 "bounds check after right shift of maybe-negative number",
7488 .insns = {
7489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7490 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7492 BPF_LD_MAP_FD(BPF_REG_1, 0),
7493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7494 BPF_FUNC_map_lookup_elem),
7495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7496 /* r1 = [0x00, 0xff] */
7497 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7498 /* r1 = [-0x01, 0xfe] */
7499 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7500 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7501 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7502 /* r1 = 0 or 0xffff'ffff'ffff */
7503 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7504 /* computes unknown pointer, potentially OOB */
7505 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7506 /* potentially OOB access */
7507 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7508 /* exit */
7509 BPF_MOV64_IMM(BPF_REG_0, 0),
7510 BPF_EXIT_INSN(),
7511 },
7512 .fixup_map1 = { 3 },
7513 .errstr = "R0 unbounded memory access",
7514 .result = REJECT
7515 },
7516 {
7517 "bounds check map access with off+size signed 32bit overflow. test1",
7518 .insns = {
7519 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7522 BPF_LD_MAP_FD(BPF_REG_1, 0),
7523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7524 BPF_FUNC_map_lookup_elem),
7525 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7526 BPF_EXIT_INSN(),
7527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7528 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7529 BPF_JMP_A(0),
7530 BPF_EXIT_INSN(),
7531 },
7532 .fixup_map1 = { 3 },
7533 .errstr = "map_value pointer and 2147483646",
7534 .result = REJECT
7535 },
7536 {
7537 "bounds check map access with off+size signed 32bit overflow. test2",
7538 .insns = {
7539 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7540 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7542 BPF_LD_MAP_FD(BPF_REG_1, 0),
7543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7544 BPF_FUNC_map_lookup_elem),
7545 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7546 BPF_EXIT_INSN(),
7547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7550 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7551 BPF_JMP_A(0),
7552 BPF_EXIT_INSN(),
7553 },
7554 .fixup_map1 = { 3 },
7555 .errstr = "pointer offset 1073741822",
7556 .result = REJECT
7557 },
7558 {
7559 "bounds check map access with off+size signed 32bit overflow. test3",
7560 .insns = {
7561 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7562 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7564 BPF_LD_MAP_FD(BPF_REG_1, 0),
7565 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7566 BPF_FUNC_map_lookup_elem),
7567 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7568 BPF_EXIT_INSN(),
7569 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7570 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7572 BPF_JMP_A(0),
7573 BPF_EXIT_INSN(),
7574 },
7575 .fixup_map1 = { 3 },
7576 .errstr = "pointer offset -1073741822",
7577 .result = REJECT
7578 },
7579 {
7580 "bounds check map access with off+size signed 32bit overflow. test4",
7581 .insns = {
7582 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7585 BPF_LD_MAP_FD(BPF_REG_1, 0),
7586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7587 BPF_FUNC_map_lookup_elem),
7588 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7589 BPF_EXIT_INSN(),
7590 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7591 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7592 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7593 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7594 BPF_JMP_A(0),
7595 BPF_EXIT_INSN(),
7596 },
7597 .fixup_map1 = { 3 },
7598 .errstr = "map_value pointer and 1000000000000",
7599 .result = REJECT
7600 },
7601 {
7602 "pointer/scalar confusion in state equality check (way 1)",
7603 .insns = {
7604 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7607 BPF_LD_MAP_FD(BPF_REG_1, 0),
7608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7609 BPF_FUNC_map_lookup_elem),
7610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7611 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7612 BPF_JMP_A(1),
7613 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7614 BPF_JMP_A(0),
7615 BPF_EXIT_INSN(),
7616 },
7617 .fixup_map1 = { 3 },
7618 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007619 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007620 .result_unpriv = REJECT,
7621 .errstr_unpriv = "R0 leaks addr as return value"
7622 },
7623 {
7624 "pointer/scalar confusion in state equality check (way 2)",
7625 .insns = {
7626 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7627 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7629 BPF_LD_MAP_FD(BPF_REG_1, 0),
7630 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7631 BPF_FUNC_map_lookup_elem),
7632 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7633 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7634 BPF_JMP_A(1),
7635 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7636 BPF_EXIT_INSN(),
7637 },
7638 .fixup_map1 = { 3 },
7639 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007640 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007641 .result_unpriv = REJECT,
7642 .errstr_unpriv = "R0 leaks addr as return value"
7643 },
7644 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007645 "variable-offset ctx access",
7646 .insns = {
7647 /* Get an unknown value */
7648 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7649 /* Make it small and 4-byte aligned */
7650 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7651 /* add it to skb. We now have either &skb->len or
7652 * &skb->pkt_type, but we don't know which
7653 */
7654 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7655 /* dereference it */
7656 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7657 BPF_EXIT_INSN(),
7658 },
7659 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7660 .result = REJECT,
7661 .prog_type = BPF_PROG_TYPE_LWT_IN,
7662 },
7663 {
7664 "variable-offset stack access",
7665 .insns = {
7666 /* Fill the top 8 bytes of the stack */
7667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7668 /* Get an unknown value */
7669 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7670 /* Make it small and 4-byte aligned */
7671 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7672 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7673 /* add it to fp. We now have either fp-4 or fp-8, but
7674 * we don't know which
7675 */
7676 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7677 /* dereference it */
7678 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7679 BPF_EXIT_INSN(),
7680 },
7681 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7682 .result = REJECT,
7683 .prog_type = BPF_PROG_TYPE_LWT_IN,
7684 },
Edward Creed893dc22017-08-23 15:09:46 +01007685 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007686 "indirect variable-offset stack access",
7687 .insns = {
7688 /* Fill the top 8 bytes of the stack */
7689 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7690 /* Get an unknown value */
7691 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7692 /* Make it small and 4-byte aligned */
7693 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7694 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7695 /* add it to fp. We now have either fp-4 or fp-8, but
7696 * we don't know which
7697 */
7698 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7699 /* dereference it indirectly */
7700 BPF_LD_MAP_FD(BPF_REG_1, 0),
7701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7702 BPF_FUNC_map_lookup_elem),
7703 BPF_MOV64_IMM(BPF_REG_0, 0),
7704 BPF_EXIT_INSN(),
7705 },
7706 .fixup_map1 = { 5 },
7707 .errstr = "variable stack read R2",
7708 .result = REJECT,
7709 .prog_type = BPF_PROG_TYPE_LWT_IN,
7710 },
7711 {
7712 "direct stack access with 32-bit wraparound. test1",
7713 .insns = {
7714 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7717 BPF_MOV32_IMM(BPF_REG_0, 0),
7718 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7719 BPF_EXIT_INSN()
7720 },
7721 .errstr = "fp pointer and 2147483647",
7722 .result = REJECT
7723 },
7724 {
7725 "direct stack access with 32-bit wraparound. test2",
7726 .insns = {
7727 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7730 BPF_MOV32_IMM(BPF_REG_0, 0),
7731 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7732 BPF_EXIT_INSN()
7733 },
7734 .errstr = "fp pointer and 1073741823",
7735 .result = REJECT
7736 },
7737 {
7738 "direct stack access with 32-bit wraparound. test3",
7739 .insns = {
7740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7743 BPF_MOV32_IMM(BPF_REG_0, 0),
7744 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7745 BPF_EXIT_INSN()
7746 },
7747 .errstr = "fp pointer offset 1073741822",
7748 .result = REJECT
7749 },
7750 {
Edward Creed893dc22017-08-23 15:09:46 +01007751 "liveness pruning and write screening",
7752 .insns = {
7753 /* Get an unknown value */
7754 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7755 /* branch conditions teach us nothing about R2 */
7756 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7757 BPF_MOV64_IMM(BPF_REG_0, 0),
7758 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7759 BPF_MOV64_IMM(BPF_REG_0, 0),
7760 BPF_EXIT_INSN(),
7761 },
7762 .errstr = "R0 !read_ok",
7763 .result = REJECT,
7764 .prog_type = BPF_PROG_TYPE_LWT_IN,
7765 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007766 {
7767 "varlen_map_value_access pruning",
7768 .insns = {
7769 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7770 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7772 BPF_LD_MAP_FD(BPF_REG_1, 0),
7773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7774 BPF_FUNC_map_lookup_elem),
7775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7776 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7777 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7778 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7779 BPF_MOV32_IMM(BPF_REG_1, 0),
7780 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7781 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7782 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7783 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7784 offsetof(struct test_val, foo)),
7785 BPF_EXIT_INSN(),
7786 },
7787 .fixup_map2 = { 3 },
7788 .errstr_unpriv = "R0 leaks addr",
7789 .errstr = "R0 unbounded memory access",
7790 .result_unpriv = REJECT,
7791 .result = REJECT,
7792 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7793 },
Edward Creee67b8a62017-09-15 14:37:38 +01007794 {
7795 "invalid 64-bit BPF_END",
7796 .insns = {
7797 BPF_MOV32_IMM(BPF_REG_0, 0),
7798 {
7799 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7800 .dst_reg = BPF_REG_0,
7801 .src_reg = 0,
7802 .off = 0,
7803 .imm = 32,
7804 },
7805 BPF_EXIT_INSN(),
7806 },
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01007807 .errstr = "unknown opcode d7",
Edward Creee67b8a62017-09-15 14:37:38 +01007808 .result = REJECT,
7809 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007810 {
Daniel Borkmann65073a62018-01-31 12:58:56 +01007811 "XDP, using ifindex from netdev",
7812 .insns = {
7813 BPF_MOV64_IMM(BPF_REG_0, 0),
7814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7815 offsetof(struct xdp_md, ingress_ifindex)),
7816 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
7817 BPF_MOV64_IMM(BPF_REG_0, 1),
7818 BPF_EXIT_INSN(),
7819 },
7820 .result = ACCEPT,
7821 .prog_type = BPF_PROG_TYPE_XDP,
7822 .retval = 1,
7823 },
7824 {
Daniel Borkmann22c88522017-09-25 02:25:53 +02007825 "meta access, test1",
7826 .insns = {
7827 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7828 offsetof(struct xdp_md, data_meta)),
7829 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7830 offsetof(struct xdp_md, data)),
7831 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7833 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7834 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7835 BPF_MOV64_IMM(BPF_REG_0, 0),
7836 BPF_EXIT_INSN(),
7837 },
7838 .result = ACCEPT,
7839 .prog_type = BPF_PROG_TYPE_XDP,
7840 },
7841 {
7842 "meta access, test2",
7843 .insns = {
7844 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7845 offsetof(struct xdp_md, data_meta)),
7846 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7847 offsetof(struct xdp_md, data)),
7848 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7849 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7850 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7852 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7853 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7854 BPF_MOV64_IMM(BPF_REG_0, 0),
7855 BPF_EXIT_INSN(),
7856 },
7857 .result = REJECT,
7858 .errstr = "invalid access to packet, off=-8",
7859 .prog_type = BPF_PROG_TYPE_XDP,
7860 },
7861 {
7862 "meta access, test3",
7863 .insns = {
7864 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7865 offsetof(struct xdp_md, data_meta)),
7866 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7867 offsetof(struct xdp_md, data_end)),
7868 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7870 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7871 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7872 BPF_MOV64_IMM(BPF_REG_0, 0),
7873 BPF_EXIT_INSN(),
7874 },
7875 .result = REJECT,
7876 .errstr = "invalid access to packet",
7877 .prog_type = BPF_PROG_TYPE_XDP,
7878 },
7879 {
7880 "meta access, test4",
7881 .insns = {
7882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7883 offsetof(struct xdp_md, data_meta)),
7884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7885 offsetof(struct xdp_md, data_end)),
7886 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7887 offsetof(struct xdp_md, data)),
7888 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7890 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7891 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7892 BPF_MOV64_IMM(BPF_REG_0, 0),
7893 BPF_EXIT_INSN(),
7894 },
7895 .result = REJECT,
7896 .errstr = "invalid access to packet",
7897 .prog_type = BPF_PROG_TYPE_XDP,
7898 },
7899 {
7900 "meta access, test5",
7901 .insns = {
7902 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7903 offsetof(struct xdp_md, data_meta)),
7904 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7905 offsetof(struct xdp_md, data)),
7906 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7908 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7909 BPF_MOV64_IMM(BPF_REG_2, -8),
7910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7911 BPF_FUNC_xdp_adjust_meta),
7912 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7913 BPF_MOV64_IMM(BPF_REG_0, 0),
7914 BPF_EXIT_INSN(),
7915 },
7916 .result = REJECT,
7917 .errstr = "R3 !read_ok",
7918 .prog_type = BPF_PROG_TYPE_XDP,
7919 },
7920 {
7921 "meta access, test6",
7922 .insns = {
7923 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7924 offsetof(struct xdp_md, data_meta)),
7925 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7926 offsetof(struct xdp_md, data)),
7927 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7929 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7931 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7932 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7933 BPF_MOV64_IMM(BPF_REG_0, 0),
7934 BPF_EXIT_INSN(),
7935 },
7936 .result = REJECT,
7937 .errstr = "invalid access to packet",
7938 .prog_type = BPF_PROG_TYPE_XDP,
7939 },
7940 {
7941 "meta access, test7",
7942 .insns = {
7943 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7944 offsetof(struct xdp_md, data_meta)),
7945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7946 offsetof(struct xdp_md, data)),
7947 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7949 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7951 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7952 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7953 BPF_MOV64_IMM(BPF_REG_0, 0),
7954 BPF_EXIT_INSN(),
7955 },
7956 .result = ACCEPT,
7957 .prog_type = BPF_PROG_TYPE_XDP,
7958 },
7959 {
7960 "meta access, test8",
7961 .insns = {
7962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7963 offsetof(struct xdp_md, data_meta)),
7964 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7965 offsetof(struct xdp_md, data)),
7966 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7968 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7969 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7970 BPF_MOV64_IMM(BPF_REG_0, 0),
7971 BPF_EXIT_INSN(),
7972 },
7973 .result = ACCEPT,
7974 .prog_type = BPF_PROG_TYPE_XDP,
7975 },
7976 {
7977 "meta access, test9",
7978 .insns = {
7979 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7980 offsetof(struct xdp_md, data_meta)),
7981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7982 offsetof(struct xdp_md, data)),
7983 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7986 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7987 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7988 BPF_MOV64_IMM(BPF_REG_0, 0),
7989 BPF_EXIT_INSN(),
7990 },
7991 .result = REJECT,
7992 .errstr = "invalid access to packet",
7993 .prog_type = BPF_PROG_TYPE_XDP,
7994 },
7995 {
7996 "meta access, test10",
7997 .insns = {
7998 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7999 offsetof(struct xdp_md, data_meta)),
8000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8001 offsetof(struct xdp_md, data)),
8002 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8003 offsetof(struct xdp_md, data_end)),
8004 BPF_MOV64_IMM(BPF_REG_5, 42),
8005 BPF_MOV64_IMM(BPF_REG_6, 24),
8006 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8007 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8008 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8009 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8010 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
8011 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8012 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8014 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
8015 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
8016 BPF_MOV64_IMM(BPF_REG_0, 0),
8017 BPF_EXIT_INSN(),
8018 },
8019 .result = REJECT,
8020 .errstr = "invalid access to packet",
8021 .prog_type = BPF_PROG_TYPE_XDP,
8022 },
8023 {
8024 "meta access, test11",
8025 .insns = {
8026 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8027 offsetof(struct xdp_md, data_meta)),
8028 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8029 offsetof(struct xdp_md, data)),
8030 BPF_MOV64_IMM(BPF_REG_5, 42),
8031 BPF_MOV64_IMM(BPF_REG_6, 24),
8032 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
8033 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
8034 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
8035 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
8036 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
8037 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8038 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
8039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
8040 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
8041 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
8042 BPF_MOV64_IMM(BPF_REG_0, 0),
8043 BPF_EXIT_INSN(),
8044 },
8045 .result = ACCEPT,
8046 .prog_type = BPF_PROG_TYPE_XDP,
8047 },
8048 {
8049 "meta access, test12",
8050 .insns = {
8051 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8052 offsetof(struct xdp_md, data_meta)),
8053 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8054 offsetof(struct xdp_md, data)),
8055 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8056 offsetof(struct xdp_md, data_end)),
8057 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
8058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8059 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
8060 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8061 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
8062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
8063 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
8064 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8065 BPF_MOV64_IMM(BPF_REG_0, 0),
8066 BPF_EXIT_INSN(),
8067 },
8068 .result = ACCEPT,
8069 .prog_type = BPF_PROG_TYPE_XDP,
8070 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07008071 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07008072 "arithmetic ops make PTR_TO_CTX unusable",
8073 .insns = {
8074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
8075 offsetof(struct __sk_buff, data) -
8076 offsetof(struct __sk_buff, mark)),
8077 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8078 offsetof(struct __sk_buff, mark)),
8079 BPF_EXIT_INSN(),
8080 },
8081 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
8082 .result = REJECT,
8083 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8084 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008085 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008086 "pkt_end - pkt_start is allowed",
8087 .insns = {
8088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8089 offsetof(struct __sk_buff, data_end)),
8090 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8091 offsetof(struct __sk_buff, data)),
8092 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
8093 BPF_EXIT_INSN(),
8094 },
8095 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008096 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008097 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8098 },
8099 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008100 "XDP pkt read, pkt_end mangling, bad access 1",
8101 .insns = {
8102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8103 offsetof(struct xdp_md, data)),
8104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8105 offsetof(struct xdp_md, data_end)),
8106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
8109 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8110 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8111 BPF_MOV64_IMM(BPF_REG_0, 0),
8112 BPF_EXIT_INSN(),
8113 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008114 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008115 .result = REJECT,
8116 .prog_type = BPF_PROG_TYPE_XDP,
8117 },
8118 {
8119 "XDP pkt read, pkt_end mangling, bad access 2",
8120 .insns = {
8121 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8122 offsetof(struct xdp_md, data)),
8123 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8124 offsetof(struct xdp_md, data_end)),
8125 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8126 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8127 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
8128 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8129 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8130 BPF_MOV64_IMM(BPF_REG_0, 0),
8131 BPF_EXIT_INSN(),
8132 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08008133 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02008134 .result = REJECT,
8135 .prog_type = BPF_PROG_TYPE_XDP,
8136 },
8137 {
8138 "XDP pkt read, pkt_data' > pkt_end, good access",
8139 .insns = {
8140 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8141 offsetof(struct xdp_md, data)),
8142 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8143 offsetof(struct xdp_md, data_end)),
8144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8146 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8147 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8148 BPF_MOV64_IMM(BPF_REG_0, 0),
8149 BPF_EXIT_INSN(),
8150 },
8151 .result = ACCEPT,
8152 .prog_type = BPF_PROG_TYPE_XDP,
8153 },
8154 {
8155 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
8156 .insns = {
8157 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8158 offsetof(struct xdp_md, data)),
8159 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8160 offsetof(struct xdp_md, data_end)),
8161 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8163 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8164 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8165 BPF_MOV64_IMM(BPF_REG_0, 0),
8166 BPF_EXIT_INSN(),
8167 },
8168 .errstr = "R1 offset is outside of the packet",
8169 .result = REJECT,
8170 .prog_type = BPF_PROG_TYPE_XDP,
8171 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8172 },
8173 {
8174 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
8175 .insns = {
8176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8177 offsetof(struct xdp_md, data)),
8178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8179 offsetof(struct xdp_md, data_end)),
8180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8182 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8183 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8184 BPF_MOV64_IMM(BPF_REG_0, 0),
8185 BPF_EXIT_INSN(),
8186 },
8187 .errstr = "R1 offset is outside of the packet",
8188 .result = REJECT,
8189 .prog_type = BPF_PROG_TYPE_XDP,
8190 },
8191 {
8192 "XDP pkt read, pkt_end > pkt_data', good access",
8193 .insns = {
8194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8195 offsetof(struct xdp_md, data)),
8196 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8197 offsetof(struct xdp_md, data_end)),
8198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8200 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8201 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8202 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8203 BPF_MOV64_IMM(BPF_REG_0, 0),
8204 BPF_EXIT_INSN(),
8205 },
8206 .result = ACCEPT,
8207 .prog_type = BPF_PROG_TYPE_XDP,
8208 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8209 },
8210 {
8211 "XDP pkt read, pkt_end > pkt_data', bad access 1",
8212 .insns = {
8213 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8214 offsetof(struct xdp_md, data)),
8215 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8216 offsetof(struct xdp_md, data_end)),
8217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8219 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8220 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8221 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8222 BPF_MOV64_IMM(BPF_REG_0, 0),
8223 BPF_EXIT_INSN(),
8224 },
8225 .errstr = "R1 offset is outside of the packet",
8226 .result = REJECT,
8227 .prog_type = BPF_PROG_TYPE_XDP,
8228 },
8229 {
8230 "XDP pkt read, pkt_end > pkt_data', bad access 2",
8231 .insns = {
8232 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8233 offsetof(struct xdp_md, data)),
8234 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8235 offsetof(struct xdp_md, data_end)),
8236 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8238 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8239 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8240 BPF_MOV64_IMM(BPF_REG_0, 0),
8241 BPF_EXIT_INSN(),
8242 },
8243 .errstr = "R1 offset is outside of the packet",
8244 .result = REJECT,
8245 .prog_type = BPF_PROG_TYPE_XDP,
8246 },
8247 {
8248 "XDP pkt read, pkt_data' < pkt_end, good access",
8249 .insns = {
8250 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8251 offsetof(struct xdp_md, data)),
8252 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8253 offsetof(struct xdp_md, data_end)),
8254 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8256 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8257 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8259 BPF_MOV64_IMM(BPF_REG_0, 0),
8260 BPF_EXIT_INSN(),
8261 },
8262 .result = ACCEPT,
8263 .prog_type = BPF_PROG_TYPE_XDP,
8264 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8265 },
8266 {
8267 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
8268 .insns = {
8269 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8270 offsetof(struct xdp_md, data)),
8271 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8272 offsetof(struct xdp_md, data_end)),
8273 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8275 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8276 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8277 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8278 BPF_MOV64_IMM(BPF_REG_0, 0),
8279 BPF_EXIT_INSN(),
8280 },
8281 .errstr = "R1 offset is outside of the packet",
8282 .result = REJECT,
8283 .prog_type = BPF_PROG_TYPE_XDP,
8284 },
8285 {
8286 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
8287 .insns = {
8288 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8289 offsetof(struct xdp_md, data)),
8290 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8291 offsetof(struct xdp_md, data_end)),
8292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8294 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8295 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8296 BPF_MOV64_IMM(BPF_REG_0, 0),
8297 BPF_EXIT_INSN(),
8298 },
8299 .errstr = "R1 offset is outside of the packet",
8300 .result = REJECT,
8301 .prog_type = BPF_PROG_TYPE_XDP,
8302 },
8303 {
8304 "XDP pkt read, pkt_end < pkt_data', good access",
8305 .insns = {
8306 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8307 offsetof(struct xdp_md, data)),
8308 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8309 offsetof(struct xdp_md, data_end)),
8310 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8312 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8313 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8314 BPF_MOV64_IMM(BPF_REG_0, 0),
8315 BPF_EXIT_INSN(),
8316 },
8317 .result = ACCEPT,
8318 .prog_type = BPF_PROG_TYPE_XDP,
8319 },
8320 {
8321 "XDP pkt read, pkt_end < pkt_data', bad access 1",
8322 .insns = {
8323 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8324 offsetof(struct xdp_md, data)),
8325 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8326 offsetof(struct xdp_md, data_end)),
8327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8329 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8330 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8331 BPF_MOV64_IMM(BPF_REG_0, 0),
8332 BPF_EXIT_INSN(),
8333 },
8334 .errstr = "R1 offset is outside of the packet",
8335 .result = REJECT,
8336 .prog_type = BPF_PROG_TYPE_XDP,
8337 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8338 },
8339 {
8340 "XDP pkt read, pkt_end < pkt_data', bad access 2",
8341 .insns = {
8342 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8343 offsetof(struct xdp_md, data)),
8344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8345 offsetof(struct xdp_md, data_end)),
8346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8348 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8349 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8350 BPF_MOV64_IMM(BPF_REG_0, 0),
8351 BPF_EXIT_INSN(),
8352 },
8353 .errstr = "R1 offset is outside of the packet",
8354 .result = REJECT,
8355 .prog_type = BPF_PROG_TYPE_XDP,
8356 },
8357 {
8358 "XDP pkt read, pkt_data' >= pkt_end, good access",
8359 .insns = {
8360 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8361 offsetof(struct xdp_md, data)),
8362 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8363 offsetof(struct xdp_md, data_end)),
8364 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8366 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8367 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8368 BPF_MOV64_IMM(BPF_REG_0, 0),
8369 BPF_EXIT_INSN(),
8370 },
8371 .result = ACCEPT,
8372 .prog_type = BPF_PROG_TYPE_XDP,
8373 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8374 },
8375 {
8376 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
8377 .insns = {
8378 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8379 offsetof(struct xdp_md, data)),
8380 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8381 offsetof(struct xdp_md, data_end)),
8382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8384 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8386 BPF_MOV64_IMM(BPF_REG_0, 0),
8387 BPF_EXIT_INSN(),
8388 },
8389 .errstr = "R1 offset is outside of the packet",
8390 .result = REJECT,
8391 .prog_type = BPF_PROG_TYPE_XDP,
8392 },
8393 {
8394 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8395 .insns = {
8396 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8397 offsetof(struct xdp_md, data)),
8398 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8399 offsetof(struct xdp_md, data_end)),
8400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8402 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8404 BPF_MOV64_IMM(BPF_REG_0, 0),
8405 BPF_EXIT_INSN(),
8406 },
8407 .errstr = "R1 offset is outside of the packet",
8408 .result = REJECT,
8409 .prog_type = BPF_PROG_TYPE_XDP,
8410 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8411 },
8412 {
8413 "XDP pkt read, pkt_end >= pkt_data', good access",
8414 .insns = {
8415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8416 offsetof(struct xdp_md, data)),
8417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8418 offsetof(struct xdp_md, data_end)),
8419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8421 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8422 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8424 BPF_MOV64_IMM(BPF_REG_0, 0),
8425 BPF_EXIT_INSN(),
8426 },
8427 .result = ACCEPT,
8428 .prog_type = BPF_PROG_TYPE_XDP,
8429 },
8430 {
8431 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8432 .insns = {
8433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8434 offsetof(struct xdp_md, data)),
8435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8436 offsetof(struct xdp_md, data_end)),
8437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8439 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8440 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8441 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8442 BPF_MOV64_IMM(BPF_REG_0, 0),
8443 BPF_EXIT_INSN(),
8444 },
8445 .errstr = "R1 offset is outside of the packet",
8446 .result = REJECT,
8447 .prog_type = BPF_PROG_TYPE_XDP,
8448 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8449 },
8450 {
8451 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8452 .insns = {
8453 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8454 offsetof(struct xdp_md, data)),
8455 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8456 offsetof(struct xdp_md, data_end)),
8457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8459 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8460 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8461 BPF_MOV64_IMM(BPF_REG_0, 0),
8462 BPF_EXIT_INSN(),
8463 },
8464 .errstr = "R1 offset is outside of the packet",
8465 .result = REJECT,
8466 .prog_type = BPF_PROG_TYPE_XDP,
8467 },
8468 {
8469 "XDP pkt read, pkt_data' <= pkt_end, good access",
8470 .insns = {
8471 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8472 offsetof(struct xdp_md, data)),
8473 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8474 offsetof(struct xdp_md, data_end)),
8475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8477 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8478 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8479 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8480 BPF_MOV64_IMM(BPF_REG_0, 0),
8481 BPF_EXIT_INSN(),
8482 },
8483 .result = ACCEPT,
8484 .prog_type = BPF_PROG_TYPE_XDP,
8485 },
8486 {
8487 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8488 .insns = {
8489 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8490 offsetof(struct xdp_md, data)),
8491 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8492 offsetof(struct xdp_md, data_end)),
8493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8495 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8496 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8497 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8498 BPF_MOV64_IMM(BPF_REG_0, 0),
8499 BPF_EXIT_INSN(),
8500 },
8501 .errstr = "R1 offset is outside of the packet",
8502 .result = REJECT,
8503 .prog_type = BPF_PROG_TYPE_XDP,
8504 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8505 },
8506 {
8507 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8508 .insns = {
8509 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8510 offsetof(struct xdp_md, data)),
8511 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8512 offsetof(struct xdp_md, data_end)),
8513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8515 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8516 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8517 BPF_MOV64_IMM(BPF_REG_0, 0),
8518 BPF_EXIT_INSN(),
8519 },
8520 .errstr = "R1 offset is outside of the packet",
8521 .result = REJECT,
8522 .prog_type = BPF_PROG_TYPE_XDP,
8523 },
8524 {
8525 "XDP pkt read, pkt_end <= pkt_data', good access",
8526 .insns = {
8527 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8528 offsetof(struct xdp_md, data)),
8529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8530 offsetof(struct xdp_md, data_end)),
8531 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8533 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8535 BPF_MOV64_IMM(BPF_REG_0, 0),
8536 BPF_EXIT_INSN(),
8537 },
8538 .result = ACCEPT,
8539 .prog_type = BPF_PROG_TYPE_XDP,
8540 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8541 },
8542 {
8543 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8544 .insns = {
8545 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8546 offsetof(struct xdp_md, data)),
8547 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8548 offsetof(struct xdp_md, data_end)),
8549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8551 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8553 BPF_MOV64_IMM(BPF_REG_0, 0),
8554 BPF_EXIT_INSN(),
8555 },
8556 .errstr = "R1 offset is outside of the packet",
8557 .result = REJECT,
8558 .prog_type = BPF_PROG_TYPE_XDP,
8559 },
8560 {
8561 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8562 .insns = {
8563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8564 offsetof(struct xdp_md, data)),
8565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8566 offsetof(struct xdp_md, data_end)),
8567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8569 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8570 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8571 BPF_MOV64_IMM(BPF_REG_0, 0),
8572 BPF_EXIT_INSN(),
8573 },
8574 .errstr = "R1 offset is outside of the packet",
8575 .result = REJECT,
8576 .prog_type = BPF_PROG_TYPE_XDP,
8577 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8578 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008579 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008580 "XDP pkt read, pkt_meta' > pkt_data, good access",
8581 .insns = {
8582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8583 offsetof(struct xdp_md, data_meta)),
8584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8585 offsetof(struct xdp_md, data)),
8586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8588 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8590 BPF_MOV64_IMM(BPF_REG_0, 0),
8591 BPF_EXIT_INSN(),
8592 },
8593 .result = ACCEPT,
8594 .prog_type = BPF_PROG_TYPE_XDP,
8595 },
8596 {
8597 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8598 .insns = {
8599 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8600 offsetof(struct xdp_md, data_meta)),
8601 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8602 offsetof(struct xdp_md, data)),
8603 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8604 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8605 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8606 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8607 BPF_MOV64_IMM(BPF_REG_0, 0),
8608 BPF_EXIT_INSN(),
8609 },
8610 .errstr = "R1 offset is outside of the packet",
8611 .result = REJECT,
8612 .prog_type = BPF_PROG_TYPE_XDP,
8613 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8614 },
8615 {
8616 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8617 .insns = {
8618 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8619 offsetof(struct xdp_md, data_meta)),
8620 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8621 offsetof(struct xdp_md, data)),
8622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8624 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8625 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8626 BPF_MOV64_IMM(BPF_REG_0, 0),
8627 BPF_EXIT_INSN(),
8628 },
8629 .errstr = "R1 offset is outside of the packet",
8630 .result = REJECT,
8631 .prog_type = BPF_PROG_TYPE_XDP,
8632 },
8633 {
8634 "XDP pkt read, pkt_data > pkt_meta', good access",
8635 .insns = {
8636 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8637 offsetof(struct xdp_md, data_meta)),
8638 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8639 offsetof(struct xdp_md, data)),
8640 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8642 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8643 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8644 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8645 BPF_MOV64_IMM(BPF_REG_0, 0),
8646 BPF_EXIT_INSN(),
8647 },
8648 .result = ACCEPT,
8649 .prog_type = BPF_PROG_TYPE_XDP,
8650 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8651 },
8652 {
8653 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8654 .insns = {
8655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8656 offsetof(struct xdp_md, data_meta)),
8657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8658 offsetof(struct xdp_md, data)),
8659 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8661 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8662 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8663 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8664 BPF_MOV64_IMM(BPF_REG_0, 0),
8665 BPF_EXIT_INSN(),
8666 },
8667 .errstr = "R1 offset is outside of the packet",
8668 .result = REJECT,
8669 .prog_type = BPF_PROG_TYPE_XDP,
8670 },
8671 {
8672 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8673 .insns = {
8674 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8675 offsetof(struct xdp_md, data_meta)),
8676 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8677 offsetof(struct xdp_md, data)),
8678 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8680 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8681 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8682 BPF_MOV64_IMM(BPF_REG_0, 0),
8683 BPF_EXIT_INSN(),
8684 },
8685 .errstr = "R1 offset is outside of the packet",
8686 .result = REJECT,
8687 .prog_type = BPF_PROG_TYPE_XDP,
8688 },
8689 {
8690 "XDP pkt read, pkt_meta' < pkt_data, good access",
8691 .insns = {
8692 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8693 offsetof(struct xdp_md, data_meta)),
8694 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8695 offsetof(struct xdp_md, data)),
8696 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8697 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8698 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8699 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8700 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8701 BPF_MOV64_IMM(BPF_REG_0, 0),
8702 BPF_EXIT_INSN(),
8703 },
8704 .result = ACCEPT,
8705 .prog_type = BPF_PROG_TYPE_XDP,
8706 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8707 },
8708 {
8709 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8710 .insns = {
8711 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8712 offsetof(struct xdp_md, data_meta)),
8713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8714 offsetof(struct xdp_md, data)),
8715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8717 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8718 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8719 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8720 BPF_MOV64_IMM(BPF_REG_0, 0),
8721 BPF_EXIT_INSN(),
8722 },
8723 .errstr = "R1 offset is outside of the packet",
8724 .result = REJECT,
8725 .prog_type = BPF_PROG_TYPE_XDP,
8726 },
8727 {
8728 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8729 .insns = {
8730 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8731 offsetof(struct xdp_md, data_meta)),
8732 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8733 offsetof(struct xdp_md, data)),
8734 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8736 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8737 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8738 BPF_MOV64_IMM(BPF_REG_0, 0),
8739 BPF_EXIT_INSN(),
8740 },
8741 .errstr = "R1 offset is outside of the packet",
8742 .result = REJECT,
8743 .prog_type = BPF_PROG_TYPE_XDP,
8744 },
8745 {
8746 "XDP pkt read, pkt_data < pkt_meta', good access",
8747 .insns = {
8748 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8749 offsetof(struct xdp_md, data_meta)),
8750 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8751 offsetof(struct xdp_md, data)),
8752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8754 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8755 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8756 BPF_MOV64_IMM(BPF_REG_0, 0),
8757 BPF_EXIT_INSN(),
8758 },
8759 .result = ACCEPT,
8760 .prog_type = BPF_PROG_TYPE_XDP,
8761 },
8762 {
8763 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8764 .insns = {
8765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8766 offsetof(struct xdp_md, data_meta)),
8767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8768 offsetof(struct xdp_md, data)),
8769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8771 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8772 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8773 BPF_MOV64_IMM(BPF_REG_0, 0),
8774 BPF_EXIT_INSN(),
8775 },
8776 .errstr = "R1 offset is outside of the packet",
8777 .result = REJECT,
8778 .prog_type = BPF_PROG_TYPE_XDP,
8779 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8780 },
8781 {
8782 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8783 .insns = {
8784 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8785 offsetof(struct xdp_md, data_meta)),
8786 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8787 offsetof(struct xdp_md, data)),
8788 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8790 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8792 BPF_MOV64_IMM(BPF_REG_0, 0),
8793 BPF_EXIT_INSN(),
8794 },
8795 .errstr = "R1 offset is outside of the packet",
8796 .result = REJECT,
8797 .prog_type = BPF_PROG_TYPE_XDP,
8798 },
8799 {
8800 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8801 .insns = {
8802 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8803 offsetof(struct xdp_md, data_meta)),
8804 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8805 offsetof(struct xdp_md, data)),
8806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8808 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8809 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8810 BPF_MOV64_IMM(BPF_REG_0, 0),
8811 BPF_EXIT_INSN(),
8812 },
8813 .result = ACCEPT,
8814 .prog_type = BPF_PROG_TYPE_XDP,
8815 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8816 },
8817 {
8818 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8819 .insns = {
8820 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8821 offsetof(struct xdp_md, data_meta)),
8822 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8823 offsetof(struct xdp_md, data)),
8824 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8826 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8827 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8828 BPF_MOV64_IMM(BPF_REG_0, 0),
8829 BPF_EXIT_INSN(),
8830 },
8831 .errstr = "R1 offset is outside of the packet",
8832 .result = REJECT,
8833 .prog_type = BPF_PROG_TYPE_XDP,
8834 },
8835 {
8836 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8837 .insns = {
8838 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8839 offsetof(struct xdp_md, data_meta)),
8840 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8841 offsetof(struct xdp_md, data)),
8842 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8844 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8845 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8846 BPF_MOV64_IMM(BPF_REG_0, 0),
8847 BPF_EXIT_INSN(),
8848 },
8849 .errstr = "R1 offset is outside of the packet",
8850 .result = REJECT,
8851 .prog_type = BPF_PROG_TYPE_XDP,
8852 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8853 },
8854 {
8855 "XDP pkt read, pkt_data >= pkt_meta', good access",
8856 .insns = {
8857 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8858 offsetof(struct xdp_md, data_meta)),
8859 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8860 offsetof(struct xdp_md, data)),
8861 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8863 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8864 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8865 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8866 BPF_MOV64_IMM(BPF_REG_0, 0),
8867 BPF_EXIT_INSN(),
8868 },
8869 .result = ACCEPT,
8870 .prog_type = BPF_PROG_TYPE_XDP,
8871 },
8872 {
8873 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8874 .insns = {
8875 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8876 offsetof(struct xdp_md, data_meta)),
8877 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8878 offsetof(struct xdp_md, data)),
8879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8881 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8882 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8883 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8884 BPF_MOV64_IMM(BPF_REG_0, 0),
8885 BPF_EXIT_INSN(),
8886 },
8887 .errstr = "R1 offset is outside of the packet",
8888 .result = REJECT,
8889 .prog_type = BPF_PROG_TYPE_XDP,
8890 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8891 },
8892 {
8893 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8894 .insns = {
8895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8896 offsetof(struct xdp_md, data_meta)),
8897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8898 offsetof(struct xdp_md, data)),
8899 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8901 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8902 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8903 BPF_MOV64_IMM(BPF_REG_0, 0),
8904 BPF_EXIT_INSN(),
8905 },
8906 .errstr = "R1 offset is outside of the packet",
8907 .result = REJECT,
8908 .prog_type = BPF_PROG_TYPE_XDP,
8909 },
8910 {
8911 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8912 .insns = {
8913 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8914 offsetof(struct xdp_md, data_meta)),
8915 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8916 offsetof(struct xdp_md, data)),
8917 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8919 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8920 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8921 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8922 BPF_MOV64_IMM(BPF_REG_0, 0),
8923 BPF_EXIT_INSN(),
8924 },
8925 .result = ACCEPT,
8926 .prog_type = BPF_PROG_TYPE_XDP,
8927 },
8928 {
8929 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8930 .insns = {
8931 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8932 offsetof(struct xdp_md, data_meta)),
8933 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8934 offsetof(struct xdp_md, data)),
8935 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8936 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8937 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8938 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8940 BPF_MOV64_IMM(BPF_REG_0, 0),
8941 BPF_EXIT_INSN(),
8942 },
8943 .errstr = "R1 offset is outside of the packet",
8944 .result = REJECT,
8945 .prog_type = BPF_PROG_TYPE_XDP,
8946 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8947 },
8948 {
8949 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8950 .insns = {
8951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8952 offsetof(struct xdp_md, data_meta)),
8953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8954 offsetof(struct xdp_md, data)),
8955 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8957 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8958 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8959 BPF_MOV64_IMM(BPF_REG_0, 0),
8960 BPF_EXIT_INSN(),
8961 },
8962 .errstr = "R1 offset is outside of the packet",
8963 .result = REJECT,
8964 .prog_type = BPF_PROG_TYPE_XDP,
8965 },
8966 {
8967 "XDP pkt read, pkt_data <= pkt_meta', good access",
8968 .insns = {
8969 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8970 offsetof(struct xdp_md, data_meta)),
8971 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8972 offsetof(struct xdp_md, data)),
8973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8975 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8976 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8977 BPF_MOV64_IMM(BPF_REG_0, 0),
8978 BPF_EXIT_INSN(),
8979 },
8980 .result = ACCEPT,
8981 .prog_type = BPF_PROG_TYPE_XDP,
8982 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8983 },
8984 {
8985 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8986 .insns = {
8987 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8988 offsetof(struct xdp_md, data_meta)),
8989 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8990 offsetof(struct xdp_md, data)),
8991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8993 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8994 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8995 BPF_MOV64_IMM(BPF_REG_0, 0),
8996 BPF_EXIT_INSN(),
8997 },
8998 .errstr = "R1 offset is outside of the packet",
8999 .result = REJECT,
9000 .prog_type = BPF_PROG_TYPE_XDP,
9001 },
9002 {
9003 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
9004 .insns = {
9005 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9006 offsetof(struct xdp_md, data_meta)),
9007 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9008 offsetof(struct xdp_md, data)),
9009 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9011 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9012 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9013 BPF_MOV64_IMM(BPF_REG_0, 0),
9014 BPF_EXIT_INSN(),
9015 },
9016 .errstr = "R1 offset is outside of the packet",
9017 .result = REJECT,
9018 .prog_type = BPF_PROG_TYPE_XDP,
9019 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9020 },
9021 {
Daniel Borkmann6f161012018-01-18 01:15:21 +01009022 "check deducing bounds from const, 1",
9023 .insns = {
9024 BPF_MOV64_IMM(BPF_REG_0, 1),
9025 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
9026 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9027 BPF_EXIT_INSN(),
9028 },
9029 .result = REJECT,
9030 .errstr = "R0 tried to subtract pointer from scalar",
9031 },
9032 {
9033 "check deducing bounds from const, 2",
9034 .insns = {
9035 BPF_MOV64_IMM(BPF_REG_0, 1),
9036 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
9037 BPF_EXIT_INSN(),
9038 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
9039 BPF_EXIT_INSN(),
9040 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9041 BPF_EXIT_INSN(),
9042 },
9043 .result = ACCEPT,
Yonghong Song35136922018-01-22 22:10:59 -08009044 .retval = 1,
Daniel Borkmann6f161012018-01-18 01:15:21 +01009045 },
9046 {
9047 "check deducing bounds from const, 3",
9048 .insns = {
9049 BPF_MOV64_IMM(BPF_REG_0, 0),
9050 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9051 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9052 BPF_EXIT_INSN(),
9053 },
9054 .result = REJECT,
9055 .errstr = "R0 tried to subtract pointer from scalar",
9056 },
9057 {
9058 "check deducing bounds from const, 4",
9059 .insns = {
9060 BPF_MOV64_IMM(BPF_REG_0, 0),
9061 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
9062 BPF_EXIT_INSN(),
9063 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9064 BPF_EXIT_INSN(),
9065 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9066 BPF_EXIT_INSN(),
9067 },
9068 .result = ACCEPT,
9069 },
9070 {
9071 "check deducing bounds from const, 5",
9072 .insns = {
9073 BPF_MOV64_IMM(BPF_REG_0, 0),
9074 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9075 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9076 BPF_EXIT_INSN(),
9077 },
9078 .result = REJECT,
9079 .errstr = "R0 tried to subtract pointer from scalar",
9080 },
9081 {
9082 "check deducing bounds from const, 6",
9083 .insns = {
9084 BPF_MOV64_IMM(BPF_REG_0, 0),
9085 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9086 BPF_EXIT_INSN(),
9087 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9088 BPF_EXIT_INSN(),
9089 },
9090 .result = REJECT,
9091 .errstr = "R0 tried to subtract pointer from scalar",
9092 },
9093 {
9094 "check deducing bounds from const, 7",
9095 .insns = {
9096 BPF_MOV64_IMM(BPF_REG_0, ~0),
9097 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9098 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
9099 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9100 offsetof(struct __sk_buff, mark)),
9101 BPF_EXIT_INSN(),
9102 },
9103 .result = REJECT,
9104 .errstr = "dereference of modified ctx ptr",
9105 },
9106 {
9107 "check deducing bounds from const, 8",
9108 .insns = {
9109 BPF_MOV64_IMM(BPF_REG_0, ~0),
9110 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
9111 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
9112 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9113 offsetof(struct __sk_buff, mark)),
9114 BPF_EXIT_INSN(),
9115 },
9116 .result = REJECT,
9117 .errstr = "dereference of modified ctx ptr",
9118 },
9119 {
9120 "check deducing bounds from const, 9",
9121 .insns = {
9122 BPF_MOV64_IMM(BPF_REG_0, 0),
9123 BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
9124 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9125 BPF_EXIT_INSN(),
9126 },
9127 .result = REJECT,
9128 .errstr = "R0 tried to subtract pointer from scalar",
9129 },
9130 {
9131 "check deducing bounds from const, 10",
9132 .insns = {
9133 BPF_MOV64_IMM(BPF_REG_0, 0),
9134 BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
9135 /* Marks reg as unknown. */
9136 BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
9137 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
9138 BPF_EXIT_INSN(),
9139 },
9140 .result = REJECT,
9141 .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
9142 },
9143 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01009144 "bpf_exit with invalid return code. test1",
9145 .insns = {
9146 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9147 BPF_EXIT_INSN(),
9148 },
9149 .errstr = "R0 has value (0x0; 0xffffffff)",
9150 .result = REJECT,
9151 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9152 },
9153 {
9154 "bpf_exit with invalid return code. test2",
9155 .insns = {
9156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9157 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
9158 BPF_EXIT_INSN(),
9159 },
9160 .result = ACCEPT,
9161 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9162 },
9163 {
9164 "bpf_exit with invalid return code. test3",
9165 .insns = {
9166 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9167 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
9168 BPF_EXIT_INSN(),
9169 },
9170 .errstr = "R0 has value (0x0; 0x3)",
9171 .result = REJECT,
9172 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9173 },
9174 {
9175 "bpf_exit with invalid return code. test4",
9176 .insns = {
9177 BPF_MOV64_IMM(BPF_REG_0, 1),
9178 BPF_EXIT_INSN(),
9179 },
9180 .result = ACCEPT,
9181 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9182 },
9183 {
9184 "bpf_exit with invalid return code. test5",
9185 .insns = {
9186 BPF_MOV64_IMM(BPF_REG_0, 2),
9187 BPF_EXIT_INSN(),
9188 },
9189 .errstr = "R0 has value (0x2; 0x0)",
9190 .result = REJECT,
9191 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9192 },
9193 {
9194 "bpf_exit with invalid return code. test6",
9195 .insns = {
9196 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9197 BPF_EXIT_INSN(),
9198 },
9199 .errstr = "R0 is not a known value (ctx)",
9200 .result = REJECT,
9201 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9202 },
9203 {
9204 "bpf_exit with invalid return code. test7",
9205 .insns = {
9206 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
9208 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
9209 BPF_EXIT_INSN(),
9210 },
9211 .errstr = "R0 has unknown scalar value",
9212 .result = REJECT,
9213 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
9214 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009215 {
9216 "calls: basic sanity",
9217 .insns = {
9218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9219 BPF_MOV64_IMM(BPF_REG_0, 1),
9220 BPF_EXIT_INSN(),
9221 BPF_MOV64_IMM(BPF_REG_0, 2),
9222 BPF_EXIT_INSN(),
9223 },
9224 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9225 .result = ACCEPT,
9226 },
9227 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009228 "calls: not on unpriviledged",
9229 .insns = {
9230 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9231 BPF_MOV64_IMM(BPF_REG_0, 1),
9232 BPF_EXIT_INSN(),
9233 BPF_MOV64_IMM(BPF_REG_0, 2),
9234 BPF_EXIT_INSN(),
9235 },
9236 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
9237 .result_unpriv = REJECT,
9238 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009239 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009240 },
9241 {
Daniel Borkmann21ccaf22018-01-26 23:33:48 +01009242 "calls: div by 0 in subprog",
9243 .insns = {
9244 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9247 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9248 offsetof(struct __sk_buff, data_end)),
9249 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9250 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9251 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9252 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9253 BPF_MOV64_IMM(BPF_REG_0, 1),
9254 BPF_EXIT_INSN(),
9255 BPF_MOV32_IMM(BPF_REG_2, 0),
9256 BPF_MOV32_IMM(BPF_REG_3, 1),
9257 BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
9258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9259 offsetof(struct __sk_buff, data)),
9260 BPF_EXIT_INSN(),
9261 },
9262 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9263 .result = ACCEPT,
9264 .retval = 1,
9265 },
9266 {
9267 "calls: multiple ret types in subprog 1",
9268 .insns = {
9269 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9271 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9272 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9273 offsetof(struct __sk_buff, data_end)),
9274 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9276 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9277 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9278 BPF_MOV64_IMM(BPF_REG_0, 1),
9279 BPF_EXIT_INSN(),
9280 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9281 offsetof(struct __sk_buff, data)),
9282 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9283 BPF_MOV32_IMM(BPF_REG_0, 42),
9284 BPF_EXIT_INSN(),
9285 },
9286 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9287 .result = REJECT,
9288 .errstr = "R0 invalid mem access 'inv'",
9289 },
9290 {
9291 "calls: multiple ret types in subprog 2",
9292 .insns = {
9293 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9296 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
9297 offsetof(struct __sk_buff, data_end)),
9298 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
9299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
9300 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
9301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9302 BPF_MOV64_IMM(BPF_REG_0, 1),
9303 BPF_EXIT_INSN(),
9304 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9305 offsetof(struct __sk_buff, data)),
9306 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9307 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
9308 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9311 BPF_LD_MAP_FD(BPF_REG_1, 0),
9312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9313 BPF_FUNC_map_lookup_elem),
9314 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9315 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
9316 offsetof(struct __sk_buff, data)),
9317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
9318 BPF_EXIT_INSN(),
9319 },
9320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9321 .fixup_map1 = { 16 },
9322 .result = REJECT,
9323 .errstr = "R0 min value is outside of the array range",
9324 },
9325 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009326 "calls: overlapping caller/callee",
9327 .insns = {
9328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
9329 BPF_MOV64_IMM(BPF_REG_0, 1),
9330 BPF_EXIT_INSN(),
9331 },
9332 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9333 .errstr = "last insn is not an exit or jmp",
9334 .result = REJECT,
9335 },
9336 {
9337 "calls: wrong recursive calls",
9338 .insns = {
9339 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9340 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9342 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9343 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
9344 BPF_MOV64_IMM(BPF_REG_0, 1),
9345 BPF_EXIT_INSN(),
9346 },
9347 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9348 .errstr = "jump out of range",
9349 .result = REJECT,
9350 },
9351 {
9352 "calls: wrong src reg",
9353 .insns = {
9354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
9355 BPF_MOV64_IMM(BPF_REG_0, 1),
9356 BPF_EXIT_INSN(),
9357 },
9358 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9359 .errstr = "BPF_CALL uses reserved fields",
9360 .result = REJECT,
9361 },
9362 {
9363 "calls: wrong off value",
9364 .insns = {
9365 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
9366 BPF_MOV64_IMM(BPF_REG_0, 1),
9367 BPF_EXIT_INSN(),
9368 BPF_MOV64_IMM(BPF_REG_0, 2),
9369 BPF_EXIT_INSN(),
9370 },
9371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9372 .errstr = "BPF_CALL uses reserved fields",
9373 .result = REJECT,
9374 },
9375 {
9376 "calls: jump back loop",
9377 .insns = {
9378 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9379 BPF_MOV64_IMM(BPF_REG_0, 1),
9380 BPF_EXIT_INSN(),
9381 },
9382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9383 .errstr = "back-edge from insn 0 to 0",
9384 .result = REJECT,
9385 },
9386 {
9387 "calls: conditional call",
9388 .insns = {
9389 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9390 offsetof(struct __sk_buff, mark)),
9391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9393 BPF_MOV64_IMM(BPF_REG_0, 1),
9394 BPF_EXIT_INSN(),
9395 BPF_MOV64_IMM(BPF_REG_0, 2),
9396 BPF_EXIT_INSN(),
9397 },
9398 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9399 .errstr = "jump out of range",
9400 .result = REJECT,
9401 },
9402 {
9403 "calls: conditional call 2",
9404 .insns = {
9405 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9406 offsetof(struct __sk_buff, mark)),
9407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9408 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9409 BPF_MOV64_IMM(BPF_REG_0, 1),
9410 BPF_EXIT_INSN(),
9411 BPF_MOV64_IMM(BPF_REG_0, 2),
9412 BPF_EXIT_INSN(),
9413 BPF_MOV64_IMM(BPF_REG_0, 3),
9414 BPF_EXIT_INSN(),
9415 },
9416 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9417 .result = ACCEPT,
9418 },
9419 {
9420 "calls: conditional call 3",
9421 .insns = {
9422 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9423 offsetof(struct __sk_buff, mark)),
9424 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9425 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
9426 BPF_MOV64_IMM(BPF_REG_0, 1),
9427 BPF_EXIT_INSN(),
9428 BPF_MOV64_IMM(BPF_REG_0, 1),
9429 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9430 BPF_MOV64_IMM(BPF_REG_0, 3),
9431 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9432 },
9433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9434 .errstr = "back-edge from insn",
9435 .result = REJECT,
9436 },
9437 {
9438 "calls: conditional call 4",
9439 .insns = {
9440 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9441 offsetof(struct __sk_buff, mark)),
9442 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9444 BPF_MOV64_IMM(BPF_REG_0, 1),
9445 BPF_EXIT_INSN(),
9446 BPF_MOV64_IMM(BPF_REG_0, 1),
9447 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
9448 BPF_MOV64_IMM(BPF_REG_0, 3),
9449 BPF_EXIT_INSN(),
9450 },
9451 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9452 .result = ACCEPT,
9453 },
9454 {
9455 "calls: conditional call 5",
9456 .insns = {
9457 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9458 offsetof(struct __sk_buff, mark)),
9459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
9460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9461 BPF_MOV64_IMM(BPF_REG_0, 1),
9462 BPF_EXIT_INSN(),
9463 BPF_MOV64_IMM(BPF_REG_0, 1),
9464 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
9465 BPF_MOV64_IMM(BPF_REG_0, 3),
9466 BPF_EXIT_INSN(),
9467 },
9468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9469 .errstr = "back-edge from insn",
9470 .result = REJECT,
9471 },
9472 {
9473 "calls: conditional call 6",
9474 .insns = {
9475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9476 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
9477 BPF_EXIT_INSN(),
9478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9479 offsetof(struct __sk_buff, mark)),
9480 BPF_EXIT_INSN(),
9481 },
9482 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9483 .errstr = "back-edge from insn",
9484 .result = REJECT,
9485 },
9486 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009487 "calls: using r0 returned by callee",
9488 .insns = {
9489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9490 BPF_EXIT_INSN(),
9491 BPF_MOV64_IMM(BPF_REG_0, 2),
9492 BPF_EXIT_INSN(),
9493 },
9494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9495 .result = ACCEPT,
9496 },
9497 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009498 "calls: using uninit r0 from callee",
9499 .insns = {
9500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9501 BPF_EXIT_INSN(),
9502 BPF_EXIT_INSN(),
9503 },
9504 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9505 .errstr = "!read_ok",
9506 .result = REJECT,
9507 },
9508 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009509 "calls: callee is using r1",
9510 .insns = {
9511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9512 BPF_EXIT_INSN(),
9513 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9514 offsetof(struct __sk_buff, len)),
9515 BPF_EXIT_INSN(),
9516 },
9517 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
9518 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009519 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009520 },
9521 {
9522 "calls: callee using args1",
9523 .insns = {
9524 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9525 BPF_EXIT_INSN(),
9526 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9527 BPF_EXIT_INSN(),
9528 },
9529 .errstr_unpriv = "allowed for root only",
9530 .result_unpriv = REJECT,
9531 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009532 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009533 },
9534 {
9535 "calls: callee using wrong args2",
9536 .insns = {
9537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9538 BPF_EXIT_INSN(),
9539 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9540 BPF_EXIT_INSN(),
9541 },
9542 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9543 .errstr = "R2 !read_ok",
9544 .result = REJECT,
9545 },
9546 {
9547 "calls: callee using two args",
9548 .insns = {
9549 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9550 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
9551 offsetof(struct __sk_buff, len)),
9552 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
9553 offsetof(struct __sk_buff, len)),
9554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9555 BPF_EXIT_INSN(),
9556 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
9557 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
9558 BPF_EXIT_INSN(),
9559 },
9560 .errstr_unpriv = "allowed for root only",
9561 .result_unpriv = REJECT,
9562 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009563 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009564 },
9565 {
9566 "calls: callee changing pkt pointers",
9567 .insns = {
9568 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
9569 offsetof(struct xdp_md, data)),
9570 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
9571 offsetof(struct xdp_md, data_end)),
9572 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
9573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
9574 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
9575 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9576 /* clear_all_pkt_pointers() has to walk all frames
9577 * to make sure that pkt pointers in the caller
9578 * are cleared when callee is calling a helper that
9579 * adjusts packet size
9580 */
9581 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9582 BPF_MOV32_IMM(BPF_REG_0, 0),
9583 BPF_EXIT_INSN(),
9584 BPF_MOV64_IMM(BPF_REG_2, 0),
9585 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9586 BPF_FUNC_xdp_adjust_head),
9587 BPF_EXIT_INSN(),
9588 },
9589 .result = REJECT,
9590 .errstr = "R6 invalid mem access 'inv'",
9591 .prog_type = BPF_PROG_TYPE_XDP,
9592 },
9593 {
9594 "calls: two calls with args",
9595 .insns = {
9596 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9597 BPF_EXIT_INSN(),
9598 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9600 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9603 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9604 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9605 BPF_EXIT_INSN(),
9606 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9607 offsetof(struct __sk_buff, len)),
9608 BPF_EXIT_INSN(),
9609 },
9610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9611 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009612 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009613 },
9614 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009615 "calls: calls with stack arith",
9616 .insns = {
9617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9620 BPF_EXIT_INSN(),
9621 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9623 BPF_EXIT_INSN(),
9624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9625 BPF_MOV64_IMM(BPF_REG_0, 42),
9626 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9627 BPF_EXIT_INSN(),
9628 },
9629 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9630 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009631 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009632 },
9633 {
9634 "calls: calls with misaligned stack access",
9635 .insns = {
9636 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9639 BPF_EXIT_INSN(),
9640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9642 BPF_EXIT_INSN(),
9643 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9644 BPF_MOV64_IMM(BPF_REG_0, 42),
9645 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9646 BPF_EXIT_INSN(),
9647 },
9648 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9649 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9650 .errstr = "misaligned stack access",
9651 .result = REJECT,
9652 },
9653 {
9654 "calls: calls control flow, jump test",
9655 .insns = {
9656 BPF_MOV64_IMM(BPF_REG_0, 42),
9657 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9658 BPF_MOV64_IMM(BPF_REG_0, 43),
9659 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9660 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9661 BPF_EXIT_INSN(),
9662 },
9663 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9664 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009665 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009666 },
9667 {
9668 "calls: calls control flow, jump test 2",
9669 .insns = {
9670 BPF_MOV64_IMM(BPF_REG_0, 42),
9671 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9672 BPF_MOV64_IMM(BPF_REG_0, 43),
9673 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9674 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9675 BPF_EXIT_INSN(),
9676 },
9677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9678 .errstr = "jump out of range from insn 1 to 4",
9679 .result = REJECT,
9680 },
9681 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009682 "calls: two calls with bad jump",
9683 .insns = {
9684 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9685 BPF_EXIT_INSN(),
9686 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9687 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9688 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9691 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9692 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9693 BPF_EXIT_INSN(),
9694 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9695 offsetof(struct __sk_buff, len)),
9696 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9697 BPF_EXIT_INSN(),
9698 },
9699 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9700 .errstr = "jump out of range from insn 11 to 9",
9701 .result = REJECT,
9702 },
9703 {
9704 "calls: recursive call. test1",
9705 .insns = {
9706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9707 BPF_EXIT_INSN(),
9708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9709 BPF_EXIT_INSN(),
9710 },
9711 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9712 .errstr = "back-edge",
9713 .result = REJECT,
9714 },
9715 {
9716 "calls: recursive call. test2",
9717 .insns = {
9718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9719 BPF_EXIT_INSN(),
9720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9721 BPF_EXIT_INSN(),
9722 },
9723 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9724 .errstr = "back-edge",
9725 .result = REJECT,
9726 },
9727 {
9728 "calls: unreachable code",
9729 .insns = {
9730 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9731 BPF_EXIT_INSN(),
9732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9733 BPF_EXIT_INSN(),
9734 BPF_MOV64_IMM(BPF_REG_0, 0),
9735 BPF_EXIT_INSN(),
9736 BPF_MOV64_IMM(BPF_REG_0, 0),
9737 BPF_EXIT_INSN(),
9738 },
9739 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9740 .errstr = "unreachable insn 6",
9741 .result = REJECT,
9742 },
9743 {
9744 "calls: invalid call",
9745 .insns = {
9746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9747 BPF_EXIT_INSN(),
9748 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9749 BPF_EXIT_INSN(),
9750 },
9751 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9752 .errstr = "invalid destination",
9753 .result = REJECT,
9754 },
9755 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009756 "calls: invalid call 2",
9757 .insns = {
9758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9759 BPF_EXIT_INSN(),
9760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9761 BPF_EXIT_INSN(),
9762 },
9763 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9764 .errstr = "invalid destination",
9765 .result = REJECT,
9766 },
9767 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009768 "calls: jumping across function bodies. test1",
9769 .insns = {
9770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9771 BPF_MOV64_IMM(BPF_REG_0, 0),
9772 BPF_EXIT_INSN(),
9773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9774 BPF_EXIT_INSN(),
9775 },
9776 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9777 .errstr = "jump out of range",
9778 .result = REJECT,
9779 },
9780 {
9781 "calls: jumping across function bodies. test2",
9782 .insns = {
9783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9784 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9785 BPF_MOV64_IMM(BPF_REG_0, 0),
9786 BPF_EXIT_INSN(),
9787 BPF_EXIT_INSN(),
9788 },
9789 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9790 .errstr = "jump out of range",
9791 .result = REJECT,
9792 },
9793 {
9794 "calls: call without exit",
9795 .insns = {
9796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9797 BPF_EXIT_INSN(),
9798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9799 BPF_EXIT_INSN(),
9800 BPF_MOV64_IMM(BPF_REG_0, 0),
9801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9802 },
9803 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9804 .errstr = "not an exit",
9805 .result = REJECT,
9806 },
9807 {
9808 "calls: call into middle of ld_imm64",
9809 .insns = {
9810 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9812 BPF_MOV64_IMM(BPF_REG_0, 0),
9813 BPF_EXIT_INSN(),
9814 BPF_LD_IMM64(BPF_REG_0, 0),
9815 BPF_EXIT_INSN(),
9816 },
9817 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9818 .errstr = "last insn",
9819 .result = REJECT,
9820 },
9821 {
9822 "calls: call into middle of other call",
9823 .insns = {
9824 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9826 BPF_MOV64_IMM(BPF_REG_0, 0),
9827 BPF_EXIT_INSN(),
9828 BPF_MOV64_IMM(BPF_REG_0, 0),
9829 BPF_MOV64_IMM(BPF_REG_0, 0),
9830 BPF_EXIT_INSN(),
9831 },
9832 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9833 .errstr = "last insn",
9834 .result = REJECT,
9835 },
9836 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009837 "calls: ld_abs with changing ctx data in callee",
9838 .insns = {
9839 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9840 BPF_LD_ABS(BPF_B, 0),
9841 BPF_LD_ABS(BPF_H, 0),
9842 BPF_LD_ABS(BPF_W, 0),
9843 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9845 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9846 BPF_LD_ABS(BPF_B, 0),
9847 BPF_LD_ABS(BPF_H, 0),
9848 BPF_LD_ABS(BPF_W, 0),
9849 BPF_EXIT_INSN(),
9850 BPF_MOV64_IMM(BPF_REG_2, 1),
9851 BPF_MOV64_IMM(BPF_REG_3, 2),
9852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9853 BPF_FUNC_skb_vlan_push),
9854 BPF_EXIT_INSN(),
9855 },
9856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9857 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9858 .result = REJECT,
9859 },
9860 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009861 "calls: two calls with bad fallthrough",
9862 .insns = {
9863 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9864 BPF_EXIT_INSN(),
9865 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9867 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9868 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9870 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9871 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9872 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9873 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9874 offsetof(struct __sk_buff, len)),
9875 BPF_EXIT_INSN(),
9876 },
9877 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9878 .errstr = "not an exit",
9879 .result = REJECT,
9880 },
9881 {
9882 "calls: two calls with stack read",
9883 .insns = {
9884 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9888 BPF_EXIT_INSN(),
9889 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9890 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9891 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9892 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9893 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9894 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9895 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9896 BPF_EXIT_INSN(),
9897 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9898 BPF_EXIT_INSN(),
9899 },
9900 .prog_type = BPF_PROG_TYPE_XDP,
9901 .result = ACCEPT,
9902 },
9903 {
9904 "calls: two calls with stack write",
9905 .insns = {
9906 /* main prog */
9907 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9909 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9913 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9914 BPF_EXIT_INSN(),
9915
9916 /* subprog 1 */
9917 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9918 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
9920 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
9921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9923 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
9924 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
9925 /* write into stack frame of main prog */
9926 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9927 BPF_EXIT_INSN(),
9928
9929 /* subprog 2 */
9930 /* read from stack frame of main prog */
9931 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9932 BPF_EXIT_INSN(),
9933 },
9934 .prog_type = BPF_PROG_TYPE_XDP,
9935 .result = ACCEPT,
9936 },
9937 {
Jann Horn6b80ad22017-12-22 19:12:35 +01009938 "calls: stack overflow using two frames (pre-call access)",
9939 .insns = {
9940 /* prog 1 */
9941 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9942 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9943 BPF_EXIT_INSN(),
9944
9945 /* prog 2 */
9946 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9947 BPF_MOV64_IMM(BPF_REG_0, 0),
9948 BPF_EXIT_INSN(),
9949 },
9950 .prog_type = BPF_PROG_TYPE_XDP,
9951 .errstr = "combined stack size",
9952 .result = REJECT,
9953 },
9954 {
9955 "calls: stack overflow using two frames (post-call access)",
9956 .insns = {
9957 /* prog 1 */
9958 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9959 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9960 BPF_EXIT_INSN(),
9961
9962 /* prog 2 */
9963 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9964 BPF_MOV64_IMM(BPF_REG_0, 0),
9965 BPF_EXIT_INSN(),
9966 },
9967 .prog_type = BPF_PROG_TYPE_XDP,
9968 .errstr = "combined stack size",
9969 .result = REJECT,
9970 },
9971 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08009972 "calls: stack depth check using three frames. test1",
9973 .insns = {
9974 /* main */
9975 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9976 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9977 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9978 BPF_MOV64_IMM(BPF_REG_0, 0),
9979 BPF_EXIT_INSN(),
9980 /* A */
9981 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9982 BPF_EXIT_INSN(),
9983 /* B */
9984 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9985 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9986 BPF_EXIT_INSN(),
9987 },
9988 .prog_type = BPF_PROG_TYPE_XDP,
9989 /* stack_main=32, stack_A=256, stack_B=64
9990 * and max(main+A, main+A+B) < 512
9991 */
9992 .result = ACCEPT,
9993 },
9994 {
9995 "calls: stack depth check using three frames. test2",
9996 .insns = {
9997 /* main */
9998 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9999 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
10000 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
10001 BPF_MOV64_IMM(BPF_REG_0, 0),
10002 BPF_EXIT_INSN(),
10003 /* A */
10004 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10005 BPF_EXIT_INSN(),
10006 /* B */
10007 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
10008 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10009 BPF_EXIT_INSN(),
10010 },
10011 .prog_type = BPF_PROG_TYPE_XDP,
10012 /* stack_main=32, stack_A=64, stack_B=256
10013 * and max(main+A, main+A+B) < 512
10014 */
10015 .result = ACCEPT,
10016 },
10017 {
10018 "calls: stack depth check using three frames. test3",
10019 .insns = {
10020 /* main */
10021 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10022 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10024 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
10025 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
10026 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
10027 BPF_MOV64_IMM(BPF_REG_0, 0),
10028 BPF_EXIT_INSN(),
10029 /* A */
10030 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
10031 BPF_EXIT_INSN(),
10032 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
10033 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10034 /* B */
10035 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
10036 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
10037 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
10038 BPF_EXIT_INSN(),
10039 },
10040 .prog_type = BPF_PROG_TYPE_XDP,
10041 /* stack_main=64, stack_A=224, stack_B=256
10042 * and max(main+A, main+A+B) > 512
10043 */
10044 .errstr = "combined stack",
10045 .result = REJECT,
10046 },
10047 {
10048 "calls: stack depth check using three frames. test4",
10049 /* void main(void) {
10050 * func1(0);
10051 * func1(1);
10052 * func2(1);
10053 * }
10054 * void func1(int alloc_or_recurse) {
10055 * if (alloc_or_recurse) {
10056 * frame_pointer[-300] = 1;
10057 * } else {
10058 * func2(alloc_or_recurse);
10059 * }
10060 * }
10061 * void func2(int alloc_or_recurse) {
10062 * if (alloc_or_recurse) {
10063 * frame_pointer[-300] = 1;
10064 * }
10065 * }
10066 */
10067 .insns = {
10068 /* main */
10069 BPF_MOV64_IMM(BPF_REG_1, 0),
10070 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
10071 BPF_MOV64_IMM(BPF_REG_1, 1),
10072 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
10073 BPF_MOV64_IMM(BPF_REG_1, 1),
10074 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
10075 BPF_MOV64_IMM(BPF_REG_0, 0),
10076 BPF_EXIT_INSN(),
10077 /* A */
10078 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
10079 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10080 BPF_EXIT_INSN(),
10081 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10082 BPF_EXIT_INSN(),
10083 /* B */
10084 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10085 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10086 BPF_EXIT_INSN(),
10087 },
10088 .prog_type = BPF_PROG_TYPE_XDP,
10089 .result = REJECT,
10090 .errstr = "combined stack",
10091 },
10092 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -080010093 "calls: stack depth check using three frames. test5",
10094 .insns = {
10095 /* main */
10096 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
10097 BPF_EXIT_INSN(),
10098 /* A */
10099 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
10100 BPF_EXIT_INSN(),
10101 /* B */
10102 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
10103 BPF_EXIT_INSN(),
10104 /* C */
10105 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
10106 BPF_EXIT_INSN(),
10107 /* D */
10108 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
10109 BPF_EXIT_INSN(),
10110 /* E */
10111 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
10112 BPF_EXIT_INSN(),
10113 /* F */
10114 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
10115 BPF_EXIT_INSN(),
10116 /* G */
10117 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
10118 BPF_EXIT_INSN(),
10119 /* H */
10120 BPF_MOV64_IMM(BPF_REG_0, 0),
10121 BPF_EXIT_INSN(),
10122 },
10123 .prog_type = BPF_PROG_TYPE_XDP,
10124 .errstr = "call stack",
10125 .result = REJECT,
10126 },
10127 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010128 "calls: spill into caller stack frame",
10129 .insns = {
10130 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10131 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10134 BPF_EXIT_INSN(),
10135 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
10136 BPF_MOV64_IMM(BPF_REG_0, 0),
10137 BPF_EXIT_INSN(),
10138 },
10139 .prog_type = BPF_PROG_TYPE_XDP,
10140 .errstr = "cannot spill",
10141 .result = REJECT,
10142 },
10143 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010144 "calls: write into caller stack frame",
10145 .insns = {
10146 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10148 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10151 BPF_EXIT_INSN(),
10152 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
10153 BPF_MOV64_IMM(BPF_REG_0, 0),
10154 BPF_EXIT_INSN(),
10155 },
10156 .prog_type = BPF_PROG_TYPE_XDP,
10157 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010158 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010159 },
10160 {
10161 "calls: write into callee stack frame",
10162 .insns = {
10163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10164 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
10165 BPF_EXIT_INSN(),
10166 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
10167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
10168 BPF_EXIT_INSN(),
10169 },
10170 .prog_type = BPF_PROG_TYPE_XDP,
10171 .errstr = "cannot return stack pointer",
10172 .result = REJECT,
10173 },
10174 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010175 "calls: two calls with stack write and void return",
10176 .insns = {
10177 /* main prog */
10178 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10181 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10183 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10184 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10185 BPF_EXIT_INSN(),
10186
10187 /* subprog 1 */
10188 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10189 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10191 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10193 BPF_EXIT_INSN(),
10194
10195 /* subprog 2 */
10196 /* write into stack frame of main prog */
10197 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
10198 BPF_EXIT_INSN(), /* void return */
10199 },
10200 .prog_type = BPF_PROG_TYPE_XDP,
10201 .result = ACCEPT,
10202 },
10203 {
10204 "calls: ambiguous return value",
10205 .insns = {
10206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10208 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10210 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10211 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
10212 BPF_EXIT_INSN(),
10213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
10214 BPF_MOV64_IMM(BPF_REG_0, 0),
10215 BPF_EXIT_INSN(),
10216 },
10217 .errstr_unpriv = "allowed for root only",
10218 .result_unpriv = REJECT,
10219 .errstr = "R0 !read_ok",
10220 .result = REJECT,
10221 },
10222 {
10223 "calls: two calls that return map_value",
10224 .insns = {
10225 /* main prog */
10226 /* pass fp-16, fp-8 into a function */
10227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10229 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10232
10233 /* fetch map_value_ptr from the stack of this function */
10234 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10236 /* write into map value */
10237 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10238 /* fetch secound map_value_ptr from the stack */
10239 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10240 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10241 /* write into map value */
10242 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10243 BPF_MOV64_IMM(BPF_REG_0, 0),
10244 BPF_EXIT_INSN(),
10245
10246 /* subprog 1 */
10247 /* call 3rd function twice */
10248 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10249 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10250 /* first time with fp-8 */
10251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10253 /* second time with fp-16 */
10254 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10255 BPF_EXIT_INSN(),
10256
10257 /* subprog 2 */
10258 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10259 /* lookup from map */
10260 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10261 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10263 BPF_LD_MAP_FD(BPF_REG_1, 0),
10264 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10265 BPF_FUNC_map_lookup_elem),
10266 /* write map_value_ptr into stack frame of main prog */
10267 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10268 BPF_MOV64_IMM(BPF_REG_0, 0),
10269 BPF_EXIT_INSN(), /* return 0 */
10270 },
10271 .prog_type = BPF_PROG_TYPE_XDP,
10272 .fixup_map1 = { 23 },
10273 .result = ACCEPT,
10274 },
10275 {
10276 "calls: two calls that return map_value with bool condition",
10277 .insns = {
10278 /* main prog */
10279 /* pass fp-16, fp-8 into a function */
10280 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10285 BPF_MOV64_IMM(BPF_REG_0, 0),
10286 BPF_EXIT_INSN(),
10287
10288 /* subprog 1 */
10289 /* call 3rd function twice */
10290 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10291 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10292 /* first time with fp-8 */
10293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10294 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10295 /* fetch map_value_ptr from the stack of this function */
10296 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10297 /* write into map value */
10298 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10299 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10300 /* second time with fp-16 */
10301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10302 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10303 /* fetch secound map_value_ptr from the stack */
10304 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10305 /* write into map value */
10306 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10307 BPF_EXIT_INSN(),
10308
10309 /* subprog 2 */
10310 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10311 /* lookup from map */
10312 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10315 BPF_LD_MAP_FD(BPF_REG_1, 0),
10316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10317 BPF_FUNC_map_lookup_elem),
10318 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10319 BPF_MOV64_IMM(BPF_REG_0, 0),
10320 BPF_EXIT_INSN(), /* return 0 */
10321 /* write map_value_ptr into stack frame of main prog */
10322 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10323 BPF_MOV64_IMM(BPF_REG_0, 1),
10324 BPF_EXIT_INSN(), /* return 1 */
10325 },
10326 .prog_type = BPF_PROG_TYPE_XDP,
10327 .fixup_map1 = { 23 },
10328 .result = ACCEPT,
10329 },
10330 {
10331 "calls: two calls that return map_value with incorrect bool check",
10332 .insns = {
10333 /* main prog */
10334 /* pass fp-16, fp-8 into a function */
10335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10340 BPF_MOV64_IMM(BPF_REG_0, 0),
10341 BPF_EXIT_INSN(),
10342
10343 /* subprog 1 */
10344 /* call 3rd function twice */
10345 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10346 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10347 /* first time with fp-8 */
10348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
10349 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
10350 /* fetch map_value_ptr from the stack of this function */
10351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10352 /* write into map value */
10353 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
10355 /* second time with fp-16 */
10356 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10357 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10358 /* fetch secound map_value_ptr from the stack */
10359 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
10360 /* write into map value */
10361 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10362 BPF_EXIT_INSN(),
10363
10364 /* subprog 2 */
10365 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10366 /* lookup from map */
10367 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10368 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10370 BPF_LD_MAP_FD(BPF_REG_1, 0),
10371 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10372 BPF_FUNC_map_lookup_elem),
10373 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10374 BPF_MOV64_IMM(BPF_REG_0, 0),
10375 BPF_EXIT_INSN(), /* return 0 */
10376 /* write map_value_ptr into stack frame of main prog */
10377 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10378 BPF_MOV64_IMM(BPF_REG_0, 1),
10379 BPF_EXIT_INSN(), /* return 1 */
10380 },
10381 .prog_type = BPF_PROG_TYPE_XDP,
10382 .fixup_map1 = { 23 },
10383 .result = REJECT,
10384 .errstr = "invalid read from stack off -16+0 size 8",
10385 },
10386 {
10387 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
10388 .insns = {
10389 /* main prog */
10390 /* pass fp-16, fp-8 into a function */
10391 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10396 BPF_MOV64_IMM(BPF_REG_0, 0),
10397 BPF_EXIT_INSN(),
10398
10399 /* subprog 1 */
10400 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10401 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10402 /* 1st lookup from map */
10403 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10404 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10406 BPF_LD_MAP_FD(BPF_REG_1, 0),
10407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10408 BPF_FUNC_map_lookup_elem),
10409 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10410 BPF_MOV64_IMM(BPF_REG_8, 0),
10411 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10412 /* write map_value_ptr into stack frame of main prog at fp-8 */
10413 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10414 BPF_MOV64_IMM(BPF_REG_8, 1),
10415
10416 /* 2nd lookup from map */
10417 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10419 BPF_LD_MAP_FD(BPF_REG_1, 0),
10420 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10421 BPF_FUNC_map_lookup_elem),
10422 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10423 BPF_MOV64_IMM(BPF_REG_9, 0),
10424 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10425 /* write map_value_ptr into stack frame of main prog at fp-16 */
10426 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10427 BPF_MOV64_IMM(BPF_REG_9, 1),
10428
10429 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10430 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10431 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10432 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10433 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10435 BPF_EXIT_INSN(),
10436
10437 /* subprog 2 */
10438 /* if arg2 == 1 do *arg1 = 0 */
10439 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10440 /* fetch map_value_ptr from the stack of this function */
10441 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10442 /* write into map value */
10443 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10444
10445 /* if arg4 == 1 do *arg3 = 0 */
10446 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10447 /* fetch map_value_ptr from the stack of this function */
10448 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10449 /* write into map value */
10450 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10451 BPF_EXIT_INSN(),
10452 },
10453 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10454 .fixup_map1 = { 12, 22 },
10455 .result = REJECT,
10456 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10457 },
10458 {
10459 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
10460 .insns = {
10461 /* main prog */
10462 /* pass fp-16, fp-8 into a function */
10463 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10465 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10467 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10468 BPF_MOV64_IMM(BPF_REG_0, 0),
10469 BPF_EXIT_INSN(),
10470
10471 /* subprog 1 */
10472 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10473 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10474 /* 1st lookup from map */
10475 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10476 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10478 BPF_LD_MAP_FD(BPF_REG_1, 0),
10479 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10480 BPF_FUNC_map_lookup_elem),
10481 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10482 BPF_MOV64_IMM(BPF_REG_8, 0),
10483 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10484 /* write map_value_ptr into stack frame of main prog at fp-8 */
10485 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10486 BPF_MOV64_IMM(BPF_REG_8, 1),
10487
10488 /* 2nd lookup from map */
10489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
10490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10491 BPF_LD_MAP_FD(BPF_REG_1, 0),
10492 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
10493 BPF_FUNC_map_lookup_elem),
10494 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10495 BPF_MOV64_IMM(BPF_REG_9, 0),
10496 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10497 /* write map_value_ptr into stack frame of main prog at fp-16 */
10498 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10499 BPF_MOV64_IMM(BPF_REG_9, 1),
10500
10501 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10502 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
10503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10504 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10505 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10506 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
10507 BPF_EXIT_INSN(),
10508
10509 /* subprog 2 */
10510 /* if arg2 == 1 do *arg1 = 0 */
10511 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10512 /* fetch map_value_ptr from the stack of this function */
10513 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10514 /* write into map value */
10515 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10516
10517 /* if arg4 == 1 do *arg3 = 0 */
10518 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10519 /* fetch map_value_ptr from the stack of this function */
10520 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10521 /* write into map value */
10522 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10523 BPF_EXIT_INSN(),
10524 },
10525 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10526 .fixup_map1 = { 12, 22 },
10527 .result = ACCEPT,
10528 },
10529 {
10530 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
10531 .insns = {
10532 /* main prog */
10533 /* pass fp-16, fp-8 into a function */
10534 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10536 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10538 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
10539 BPF_MOV64_IMM(BPF_REG_0, 0),
10540 BPF_EXIT_INSN(),
10541
10542 /* subprog 1 */
10543 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10544 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10545 /* 1st lookup from map */
10546 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
10547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10549 BPF_LD_MAP_FD(BPF_REG_1, 0),
10550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10551 BPF_FUNC_map_lookup_elem),
10552 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10553 BPF_MOV64_IMM(BPF_REG_8, 0),
10554 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10555 /* write map_value_ptr into stack frame of main prog at fp-8 */
10556 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10557 BPF_MOV64_IMM(BPF_REG_8, 1),
10558
10559 /* 2nd lookup from map */
10560 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
10562 BPF_LD_MAP_FD(BPF_REG_1, 0),
10563 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10564 BPF_FUNC_map_lookup_elem),
10565 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10566 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
10567 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10568 /* write map_value_ptr into stack frame of main prog at fp-16 */
10569 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10570 BPF_MOV64_IMM(BPF_REG_9, 1),
10571
10572 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
10574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10575 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10576 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10577 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
10578 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
10579
10580 /* subprog 2 */
10581 /* if arg2 == 1 do *arg1 = 0 */
10582 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10583 /* fetch map_value_ptr from the stack of this function */
10584 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10585 /* write into map value */
10586 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10587
10588 /* if arg4 == 1 do *arg3 = 0 */
10589 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10590 /* fetch map_value_ptr from the stack of this function */
10591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10592 /* write into map value */
10593 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10594 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10595 },
10596 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10597 .fixup_map1 = { 12, 22 },
10598 .result = REJECT,
10599 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10600 },
10601 {
10602 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10603 .insns = {
10604 /* main prog */
10605 /* pass fp-16, fp-8 into a function */
10606 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10607 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10608 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10610 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10611 BPF_MOV64_IMM(BPF_REG_0, 0),
10612 BPF_EXIT_INSN(),
10613
10614 /* subprog 1 */
10615 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10616 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10617 /* 1st lookup from map */
10618 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10619 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10620 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10621 BPF_LD_MAP_FD(BPF_REG_1, 0),
10622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10623 BPF_FUNC_map_lookup_elem),
10624 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10625 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10626 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10627 BPF_MOV64_IMM(BPF_REG_8, 0),
10628 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10629 BPF_MOV64_IMM(BPF_REG_8, 1),
10630
10631 /* 2nd lookup from map */
10632 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10634 BPF_LD_MAP_FD(BPF_REG_1, 0),
10635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10636 BPF_FUNC_map_lookup_elem),
10637 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10638 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10639 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10640 BPF_MOV64_IMM(BPF_REG_9, 0),
10641 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10642 BPF_MOV64_IMM(BPF_REG_9, 1),
10643
10644 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10648 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10650 BPF_EXIT_INSN(),
10651
10652 /* subprog 2 */
10653 /* if arg2 == 1 do *arg1 = 0 */
10654 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10655 /* fetch map_value_ptr from the stack of this function */
10656 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10657 /* write into map value */
10658 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10659
10660 /* if arg4 == 1 do *arg3 = 0 */
10661 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10662 /* fetch map_value_ptr from the stack of this function */
10663 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10664 /* write into map value */
10665 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10666 BPF_EXIT_INSN(),
10667 },
10668 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10669 .fixup_map1 = { 12, 22 },
10670 .result = ACCEPT,
10671 },
10672 {
10673 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10674 .insns = {
10675 /* main prog */
10676 /* pass fp-16, fp-8 into a function */
10677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10682 BPF_MOV64_IMM(BPF_REG_0, 0),
10683 BPF_EXIT_INSN(),
10684
10685 /* subprog 1 */
10686 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10687 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10688 /* 1st lookup from map */
10689 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10692 BPF_LD_MAP_FD(BPF_REG_1, 0),
10693 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10694 BPF_FUNC_map_lookup_elem),
10695 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10696 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10697 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10698 BPF_MOV64_IMM(BPF_REG_8, 0),
10699 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10700 BPF_MOV64_IMM(BPF_REG_8, 1),
10701
10702 /* 2nd lookup from map */
10703 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10705 BPF_LD_MAP_FD(BPF_REG_1, 0),
10706 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10707 BPF_FUNC_map_lookup_elem),
10708 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10709 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10710 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10711 BPF_MOV64_IMM(BPF_REG_9, 0),
10712 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10713 BPF_MOV64_IMM(BPF_REG_9, 1),
10714
10715 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10718 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10719 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10720 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10721 BPF_EXIT_INSN(),
10722
10723 /* subprog 2 */
10724 /* if arg2 == 1 do *arg1 = 0 */
10725 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10726 /* fetch map_value_ptr from the stack of this function */
10727 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10728 /* write into map value */
10729 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10730
10731 /* if arg4 == 0 do *arg3 = 0 */
10732 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10733 /* fetch map_value_ptr from the stack of this function */
10734 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10735 /* write into map value */
10736 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10737 BPF_EXIT_INSN(),
10738 },
10739 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10740 .fixup_map1 = { 12, 22 },
10741 .result = REJECT,
10742 .errstr = "R0 invalid mem access 'inv'",
10743 },
10744 {
10745 "calls: pkt_ptr spill into caller stack",
10746 .insns = {
10747 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10749 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10750 BPF_EXIT_INSN(),
10751
10752 /* subprog 1 */
10753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10754 offsetof(struct __sk_buff, data)),
10755 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10756 offsetof(struct __sk_buff, data_end)),
10757 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10759 /* spill unchecked pkt_ptr into stack of caller */
10760 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10761 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10762 /* now the pkt range is verified, read pkt_ptr from stack */
10763 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10764 /* write 4 bytes into packet */
10765 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10766 BPF_EXIT_INSN(),
10767 },
10768 .result = ACCEPT,
10769 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010770 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010771 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010772 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010773 "calls: pkt_ptr spill into caller stack 2",
10774 .insns = {
10775 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10777 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10778 /* Marking is still kept, but not in all cases safe. */
10779 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10780 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10781 BPF_EXIT_INSN(),
10782
10783 /* subprog 1 */
10784 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10785 offsetof(struct __sk_buff, data)),
10786 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10787 offsetof(struct __sk_buff, data_end)),
10788 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10789 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10790 /* spill unchecked pkt_ptr into stack of caller */
10791 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10792 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10793 /* now the pkt range is verified, read pkt_ptr from stack */
10794 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10795 /* write 4 bytes into packet */
10796 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10797 BPF_EXIT_INSN(),
10798 },
10799 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10800 .errstr = "invalid access to packet",
10801 .result = REJECT,
10802 },
10803 {
10804 "calls: pkt_ptr spill into caller stack 3",
10805 .insns = {
10806 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10808 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10810 /* Marking is still kept and safe here. */
10811 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10812 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10813 BPF_EXIT_INSN(),
10814
10815 /* subprog 1 */
10816 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10817 offsetof(struct __sk_buff, data)),
10818 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10819 offsetof(struct __sk_buff, data_end)),
10820 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10822 /* spill unchecked pkt_ptr into stack of caller */
10823 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10824 BPF_MOV64_IMM(BPF_REG_5, 0),
10825 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10826 BPF_MOV64_IMM(BPF_REG_5, 1),
10827 /* now the pkt range is verified, read pkt_ptr from stack */
10828 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10829 /* write 4 bytes into packet */
10830 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10831 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10832 BPF_EXIT_INSN(),
10833 },
10834 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10835 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010836 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010837 },
10838 {
10839 "calls: pkt_ptr spill into caller stack 4",
10840 .insns = {
10841 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10843 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10845 /* Check marking propagated. */
10846 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10847 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10848 BPF_EXIT_INSN(),
10849
10850 /* subprog 1 */
10851 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10852 offsetof(struct __sk_buff, data)),
10853 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10854 offsetof(struct __sk_buff, data_end)),
10855 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10856 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10857 /* spill unchecked pkt_ptr into stack of caller */
10858 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10859 BPF_MOV64_IMM(BPF_REG_5, 0),
10860 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10861 BPF_MOV64_IMM(BPF_REG_5, 1),
10862 /* don't read back pkt_ptr from stack here */
10863 /* write 4 bytes into packet */
10864 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10865 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10866 BPF_EXIT_INSN(),
10867 },
10868 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10869 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010870 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010871 },
10872 {
10873 "calls: pkt_ptr spill into caller stack 5",
10874 .insns = {
10875 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10877 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10879 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10880 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10881 BPF_EXIT_INSN(),
10882
10883 /* subprog 1 */
10884 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10885 offsetof(struct __sk_buff, data)),
10886 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10887 offsetof(struct __sk_buff, data_end)),
10888 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10890 BPF_MOV64_IMM(BPF_REG_5, 0),
10891 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10892 /* spill checked pkt_ptr into stack of caller */
10893 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10894 BPF_MOV64_IMM(BPF_REG_5, 1),
10895 /* don't read back pkt_ptr from stack here */
10896 /* write 4 bytes into packet */
10897 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10898 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10899 BPF_EXIT_INSN(),
10900 },
10901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10902 .errstr = "same insn cannot be used with different",
10903 .result = REJECT,
10904 },
10905 {
10906 "calls: pkt_ptr spill into caller stack 6",
10907 .insns = {
10908 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10909 offsetof(struct __sk_buff, data_end)),
10910 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10912 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10914 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10915 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10916 BPF_EXIT_INSN(),
10917
10918 /* subprog 1 */
10919 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10920 offsetof(struct __sk_buff, data)),
10921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10922 offsetof(struct __sk_buff, data_end)),
10923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10925 BPF_MOV64_IMM(BPF_REG_5, 0),
10926 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10927 /* spill checked pkt_ptr into stack of caller */
10928 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10929 BPF_MOV64_IMM(BPF_REG_5, 1),
10930 /* don't read back pkt_ptr from stack here */
10931 /* write 4 bytes into packet */
10932 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10933 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10934 BPF_EXIT_INSN(),
10935 },
10936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10937 .errstr = "R4 invalid mem access",
10938 .result = REJECT,
10939 },
10940 {
10941 "calls: pkt_ptr spill into caller stack 7",
10942 .insns = {
10943 BPF_MOV64_IMM(BPF_REG_2, 0),
10944 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10946 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10948 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10949 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10950 BPF_EXIT_INSN(),
10951
10952 /* subprog 1 */
10953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10954 offsetof(struct __sk_buff, data)),
10955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10956 offsetof(struct __sk_buff, data_end)),
10957 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10959 BPF_MOV64_IMM(BPF_REG_5, 0),
10960 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10961 /* spill checked pkt_ptr into stack of caller */
10962 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10963 BPF_MOV64_IMM(BPF_REG_5, 1),
10964 /* don't read back pkt_ptr from stack here */
10965 /* write 4 bytes into packet */
10966 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10967 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10968 BPF_EXIT_INSN(),
10969 },
10970 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10971 .errstr = "R4 invalid mem access",
10972 .result = REJECT,
10973 },
10974 {
10975 "calls: pkt_ptr spill into caller stack 8",
10976 .insns = {
10977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10978 offsetof(struct __sk_buff, data)),
10979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10980 offsetof(struct __sk_buff, data_end)),
10981 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10983 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10984 BPF_EXIT_INSN(),
10985 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10987 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10989 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10990 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10991 BPF_EXIT_INSN(),
10992
10993 /* subprog 1 */
10994 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10995 offsetof(struct __sk_buff, data)),
10996 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10997 offsetof(struct __sk_buff, data_end)),
10998 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11000 BPF_MOV64_IMM(BPF_REG_5, 0),
11001 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11002 /* spill checked pkt_ptr into stack of caller */
11003 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11004 BPF_MOV64_IMM(BPF_REG_5, 1),
11005 /* don't read back pkt_ptr from stack here */
11006 /* write 4 bytes into packet */
11007 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11008 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11009 BPF_EXIT_INSN(),
11010 },
11011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11012 .result = ACCEPT,
11013 },
11014 {
11015 "calls: pkt_ptr spill into caller stack 9",
11016 .insns = {
11017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11018 offsetof(struct __sk_buff, data)),
11019 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11020 offsetof(struct __sk_buff, data_end)),
11021 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11023 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
11024 BPF_EXIT_INSN(),
11025 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11027 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11028 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11029 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11030 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11031 BPF_EXIT_INSN(),
11032
11033 /* subprog 1 */
11034 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11035 offsetof(struct __sk_buff, data)),
11036 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11037 offsetof(struct __sk_buff, data_end)),
11038 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11040 BPF_MOV64_IMM(BPF_REG_5, 0),
11041 /* spill unchecked pkt_ptr into stack of caller */
11042 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11043 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11044 BPF_MOV64_IMM(BPF_REG_5, 1),
11045 /* don't read back pkt_ptr from stack here */
11046 /* write 4 bytes into packet */
11047 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11049 BPF_EXIT_INSN(),
11050 },
11051 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
11052 .errstr = "invalid access to packet",
11053 .result = REJECT,
11054 },
11055 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080011056 "calls: caller stack init to zero or map_value_or_null",
11057 .insns = {
11058 BPF_MOV64_IMM(BPF_REG_0, 0),
11059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
11060 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11062 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11063 /* fetch map_value_or_null or const_zero from stack */
11064 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11065 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11066 /* store into map_value */
11067 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
11068 BPF_EXIT_INSN(),
11069
11070 /* subprog 1 */
11071 /* if (ctx == 0) return; */
11072 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
11073 /* else bpf_map_lookup() and *(fp - 8) = r0 */
11074 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
11075 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11077 BPF_LD_MAP_FD(BPF_REG_1, 0),
11078 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11079 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11080 BPF_FUNC_map_lookup_elem),
11081 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11082 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11083 BPF_EXIT_INSN(),
11084 },
11085 .fixup_map1 = { 13 },
11086 .result = ACCEPT,
11087 .prog_type = BPF_PROG_TYPE_XDP,
11088 },
11089 {
11090 "calls: stack init to zero and pruning",
11091 .insns = {
11092 /* first make allocated_stack 16 byte */
11093 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
11094 /* now fork the execution such that the false branch
11095 * of JGT insn will be verified second and it skisp zero
11096 * init of fp-8 stack slot. If stack liveness marking
11097 * is missing live_read marks from call map_lookup
11098 * processing then pruning will incorrectly assume
11099 * that fp-8 stack slot was unused in the fall-through
11100 * branch and will accept the program incorrectly
11101 */
11102 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
11103 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11104 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
11105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11107 BPF_LD_MAP_FD(BPF_REG_1, 0),
11108 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11109 BPF_FUNC_map_lookup_elem),
11110 BPF_EXIT_INSN(),
11111 },
11112 .fixup_map2 = { 6 },
11113 .errstr = "invalid indirect read from stack off -8+0 size 8",
11114 .result = REJECT,
11115 .prog_type = BPF_PROG_TYPE_XDP,
11116 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000011117 {
11118 "search pruning: all branches should be verified (nop operation)",
11119 .insns = {
11120 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11122 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11123 BPF_LD_MAP_FD(BPF_REG_1, 0),
11124 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
11126 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11128 BPF_MOV64_IMM(BPF_REG_4, 0),
11129 BPF_JMP_A(1),
11130 BPF_MOV64_IMM(BPF_REG_4, 1),
11131 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11132 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11133 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
11135 BPF_MOV64_IMM(BPF_REG_6, 0),
11136 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
11137 BPF_EXIT_INSN(),
11138 },
11139 .fixup_map1 = { 3 },
11140 .errstr = "R6 invalid mem access 'inv'",
11141 .result = REJECT,
11142 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11143 },
11144 {
11145 "search pruning: all branches should be verified (invalid stack access)",
11146 .insns = {
11147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11149 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
11150 BPF_LD_MAP_FD(BPF_REG_1, 0),
11151 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11152 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
11153 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
11154 BPF_MOV64_IMM(BPF_REG_4, 0),
11155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
11156 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
11157 BPF_JMP_A(1),
11158 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
11159 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
11160 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
11161 BPF_EXIT_INSN(),
11162 },
11163 .fixup_map1 = { 3 },
11164 .errstr = "invalid read from stack off -16+0 size 8",
11165 .result = REJECT,
11166 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
11167 },
Daniel Borkmann23d191a2018-02-24 01:08:03 +010011168 {
11169 "jit: lsh, rsh, arsh by 1",
11170 .insns = {
11171 BPF_MOV64_IMM(BPF_REG_0, 1),
11172 BPF_MOV64_IMM(BPF_REG_1, 0xff),
11173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
11174 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
11175 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
11176 BPF_EXIT_INSN(),
11177 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
11178 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
11179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
11180 BPF_EXIT_INSN(),
11181 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
11182 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
11183 BPF_EXIT_INSN(),
11184 BPF_MOV64_IMM(BPF_REG_0, 2),
11185 BPF_EXIT_INSN(),
11186 },
11187 .result = ACCEPT,
11188 .retval = 2,
11189 },
11190 {
11191 "jit: mov32 for ldimm64, 1",
11192 .insns = {
11193 BPF_MOV64_IMM(BPF_REG_0, 2),
11194 BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
11195 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
11196 BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
11197 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11198 BPF_MOV64_IMM(BPF_REG_0, 1),
11199 BPF_EXIT_INSN(),
11200 },
11201 .result = ACCEPT,
11202 .retval = 2,
11203 },
11204 {
11205 "jit: mov32 for ldimm64, 2",
11206 .insns = {
11207 BPF_MOV64_IMM(BPF_REG_0, 1),
11208 BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
11209 BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
11210 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
11211 BPF_MOV64_IMM(BPF_REG_0, 2),
11212 BPF_EXIT_INSN(),
11213 },
11214 .result = ACCEPT,
11215 .retval = 2,
11216 },
11217 {
11218 "jit: various mul tests",
11219 .insns = {
11220 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11221 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11222 BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
11223 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11224 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11225 BPF_MOV64_IMM(BPF_REG_0, 1),
11226 BPF_EXIT_INSN(),
11227 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11228 BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11229 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11230 BPF_MOV64_IMM(BPF_REG_0, 1),
11231 BPF_EXIT_INSN(),
11232 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
11233 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
11234 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
11235 BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
11236 BPF_MOV64_IMM(BPF_REG_0, 1),
11237 BPF_EXIT_INSN(),
11238 BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
11239 BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
11240 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
11241 BPF_MOV64_IMM(BPF_REG_0, 1),
11242 BPF_EXIT_INSN(),
11243 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
11244 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
11245 BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
11246 BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
11247 BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
11248 BPF_MOV64_IMM(BPF_REG_0, 1),
11249 BPF_EXIT_INSN(),
11250 BPF_MOV64_IMM(BPF_REG_0, 2),
11251 BPF_EXIT_INSN(),
11252 },
11253 .result = ACCEPT,
11254 .retval = 2,
11255 },
11256
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011257};
11258
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011259static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011260{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011261 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011262
11263 for (len = MAX_INSNS - 1; len > 0; --len)
11264 if (fp[len].code != 0 || fp[len].imm != 0)
11265 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011266 return len + 1;
11267}
11268
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011269static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011270{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011271 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011272
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011273 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011274 size_value, max_elem, BPF_F_NO_PREALLOC);
11275 if (fd < 0)
11276 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011277
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011278 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011279}
11280
11281static int create_prog_array(void)
11282{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011283 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011284
Mickaël Salaünf4874d02017-02-10 00:21:43 +010011285 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011286 sizeof(int), 4, 0);
11287 if (fd < 0)
11288 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011289
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011290 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011291}
11292
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011293static int create_map_in_map(void)
11294{
11295 int inner_map_fd, outer_map_fd;
11296
11297 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
11298 sizeof(int), 1, 0);
11299 if (inner_map_fd < 0) {
11300 printf("Failed to create array '%s'!\n", strerror(errno));
11301 return inner_map_fd;
11302 }
11303
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070011304 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011305 sizeof(int), inner_map_fd, 1, 0);
11306 if (outer_map_fd < 0)
11307 printf("Failed to create array of maps '%s'!\n",
11308 strerror(errno));
11309
11310 close(inner_map_fd);
11311
11312 return outer_map_fd;
11313}
11314
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011315static char bpf_vlog[32768];
11316
11317static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011318 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011319{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011320 int *fixup_map1 = test->fixup_map1;
11321 int *fixup_map2 = test->fixup_map2;
11322 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011323 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011324
11325 /* Allocating HTs with 1 elem is fine here, since we only test
11326 * for verifier and not do a runtime lookup, so the only thing
11327 * that really matters is value size in this case.
11328 */
11329 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011330 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011331 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011332 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011333 fixup_map1++;
11334 } while (*fixup_map1);
11335 }
11336
11337 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011338 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011339 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011340 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011341 fixup_map2++;
11342 } while (*fixup_map2);
11343 }
11344
11345 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011346 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011347 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011348 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011349 fixup_prog++;
11350 } while (*fixup_prog);
11351 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011352
11353 if (*fixup_map_in_map) {
11354 map_fds[3] = create_map_in_map();
11355 do {
11356 prog[*fixup_map_in_map].imm = map_fds[3];
11357 fixup_map_in_map++;
11358 } while (*fixup_map_in_map);
11359 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011360}
11361
11362static void do_test_single(struct bpf_test *test, bool unpriv,
11363 int *passes, int *errors)
11364{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011365 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011366 struct bpf_insn *prog = test->insns;
11367 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011368 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011369 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011370 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011371 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011372 uint32_t retval;
11373 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011374
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011375 for (i = 0; i < MAX_NR_MAPS; i++)
11376 map_fds[i] = -1;
11377
11378 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011379
Daniel Borkmann614d0d72017-05-25 01:05:09 +020011380 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
11381 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020011382 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011383
11384 expected_ret = unpriv && test->result_unpriv != UNDEF ?
11385 test->result_unpriv : test->result;
11386 expected_err = unpriv && test->errstr_unpriv ?
11387 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011388
11389 reject_from_alignment = fd_prog < 0 &&
11390 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
11391 strstr(bpf_vlog, "Unknown alignment.");
11392#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
11393 if (reject_from_alignment) {
11394 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
11395 strerror(errno));
11396 goto fail_log;
11397 }
11398#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011399 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011400 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011401 printf("FAIL\nFailed to load prog '%s'!\n",
11402 strerror(errno));
11403 goto fail_log;
11404 }
11405 } else {
11406 if (fd_prog >= 0) {
11407 printf("FAIL\nUnexpected success to load!\n");
11408 goto fail_log;
11409 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011410 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Joe Stringer95f87a92018-02-14 13:50:34 -080011411 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
11412 expected_err, bpf_vlog);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011413 goto fail_log;
11414 }
11415 }
11416
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080011417 if (fd_prog >= 0) {
11418 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
11419 NULL, NULL, &retval, NULL);
11420 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
11421 printf("Unexpected bpf_prog_test_run error\n");
11422 goto fail_log;
11423 }
11424 if (!err && retval != test->retval &&
11425 test->retval != POINTER_VALUE) {
11426 printf("FAIL retval %d != %d\n", retval, test->retval);
11427 goto fail_log;
11428 }
11429 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011430 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020011431 printf("OK%s\n", reject_from_alignment ?
11432 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011433close_fds:
11434 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070011435 for (i = 0; i < MAX_NR_MAPS; i++)
11436 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011437 sched_yield();
11438 return;
11439fail_log:
11440 (*errors)++;
11441 printf("%s", bpf_vlog);
11442 goto close_fds;
11443}
11444
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011445static bool is_admin(void)
11446{
11447 cap_t caps;
11448 cap_flag_value_t sysadmin = CAP_CLEAR;
11449 const cap_value_t cap_val = CAP_SYS_ADMIN;
11450
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011451#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011452 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
11453 perror("cap_get_flag");
11454 return false;
11455 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080011456#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011457 caps = cap_get_proc();
11458 if (!caps) {
11459 perror("cap_get_proc");
11460 return false;
11461 }
11462 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
11463 perror("cap_get_flag");
11464 if (cap_free(caps))
11465 perror("cap_free");
11466 return (sysadmin == CAP_SET);
11467}
11468
11469static int set_admin(bool admin)
11470{
11471 cap_t caps;
11472 const cap_value_t cap_val = CAP_SYS_ADMIN;
11473 int ret = -1;
11474
11475 caps = cap_get_proc();
11476 if (!caps) {
11477 perror("cap_get_proc");
11478 return -1;
11479 }
11480 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
11481 admin ? CAP_SET : CAP_CLEAR)) {
11482 perror("cap_set_flag");
11483 goto out;
11484 }
11485 if (cap_set_proc(caps)) {
11486 perror("cap_set_proc");
11487 goto out;
11488 }
11489 ret = 0;
11490out:
11491 if (cap_free(caps))
11492 perror("cap_free");
11493 return ret;
11494}
11495
Joe Stringer0a6748742018-02-14 13:50:36 -080011496static void get_unpriv_disabled()
11497{
11498 char buf[2];
11499 FILE *fd;
11500
11501 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
11502 if (fgets(buf, 2, fd) == buf && atoi(buf))
11503 unpriv_disabled = true;
11504 fclose(fd);
11505}
11506
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011507static int do_test(bool unpriv, unsigned int from, unsigned int to)
11508{
Joe Stringerd0a0e492018-02-14 13:50:35 -080011509 int i, passes = 0, errors = 0, skips = 0;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011510
11511 for (i = from; i < to; i++) {
11512 struct bpf_test *test = &tests[i];
11513
11514 /* Program types that are not supported by non-root we
11515 * skip right away.
11516 */
Joe Stringer0a6748742018-02-14 13:50:36 -080011517 if (!test->prog_type && unpriv_disabled) {
11518 printf("#%d/u %s SKIP\n", i, test->descr);
11519 skips++;
11520 } else if (!test->prog_type) {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011521 if (!unpriv)
11522 set_admin(false);
11523 printf("#%d/u %s ", i, test->descr);
11524 do_test_single(test, true, &passes, &errors);
11525 if (!unpriv)
11526 set_admin(true);
11527 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011528
Joe Stringerd0a0e492018-02-14 13:50:35 -080011529 if (unpriv) {
11530 printf("#%d/p %s SKIP\n", i, test->descr);
11531 skips++;
11532 } else {
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011533 printf("#%d/p %s ", i, test->descr);
11534 do_test_single(test, false, &passes, &errors);
11535 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011536 }
11537
Joe Stringerd0a0e492018-02-14 13:50:35 -080011538 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
11539 skips, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020011540 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011541}
11542
11543int main(int argc, char **argv)
11544{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011545 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010011546 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011547
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011548 if (argc == 3) {
11549 unsigned int l = atoi(argv[argc - 2]);
11550 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011551
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011552 if (l < to && u < to) {
11553 from = l;
11554 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011555 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011556 } else if (argc == 2) {
11557 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011558
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011559 if (t < to) {
11560 from = t;
11561 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070011562 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011563 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011564
Joe Stringer0a6748742018-02-14 13:50:36 -080011565 get_unpriv_disabled();
11566 if (unpriv && unpriv_disabled) {
11567 printf("Cannot run as unprivileged user with sysctl %s.\n",
11568 UNPRIV_SYSCTL);
11569 return EXIT_FAILURE;
11570 }
11571
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011572 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070011573}