blob: 6c22edb1f006721b27314e9f83ab3acd31784e36 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
24
Mickaël Salaünd02d8982017-02-10 00:21:37 +010025#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070026#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020028#include <linux/unistd.h>
29#include <linux/filter.h>
30#include <linux/bpf_perf_event.h>
31#include <linux/bpf.h>
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080032#include <linux/if_ether.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070033
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010034#include <bpf/bpf.h>
35
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020036#ifdef HAVE_GENHDR
37# include "autoconf.h"
38#else
39# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
40# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
41# endif
42#endif
43
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020044#include "../../../include/linux/filter.h"
45
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020046#ifndef ARRAY_SIZE
47# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
48#endif
49
50#define MAX_INSNS 512
51#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070052#define MAX_NR_MAPS 4
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080053#define POINTER_VALUE 0xcafe4all
54#define TEST_DATA_LEN 64
Alexei Starovoitovbf508872015-10-07 22:23:23 -070055
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020056#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020057#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020058
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070059struct bpf_test {
60 const char *descr;
61 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020062 int fixup_map1[MAX_FIXUPS];
63 int fixup_map2[MAX_FIXUPS];
64 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070065 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070066 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070067 const char *errstr_unpriv;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080068 uint32_t retval;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070069 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070070 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070071 ACCEPT,
72 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070073 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070074 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020075 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070076};
77
Josef Bacik48461132016-09-28 10:54:32 -040078/* Note we want this to be 64 bit aligned so that the end of our array is
79 * actually the end of the structure.
80 */
81#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040082
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020083struct test_val {
84 unsigned int index;
85 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040086};
87
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070088static struct bpf_test tests[] = {
89 {
90 "add+sub+mul",
91 .insns = {
92 BPF_MOV64_IMM(BPF_REG_1, 1),
93 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
94 BPF_MOV64_IMM(BPF_REG_2, 3),
95 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
96 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
97 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
98 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
99 BPF_EXIT_INSN(),
100 },
101 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800102 .retval = -3,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700103 },
104 {
105 "unreachable",
106 .insns = {
107 BPF_EXIT_INSN(),
108 BPF_EXIT_INSN(),
109 },
110 .errstr = "unreachable",
111 .result = REJECT,
112 },
113 {
114 "unreachable2",
115 .insns = {
116 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
117 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
118 BPF_EXIT_INSN(),
119 },
120 .errstr = "unreachable",
121 .result = REJECT,
122 },
123 {
124 "out of range jump",
125 .insns = {
126 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
127 BPF_EXIT_INSN(),
128 },
129 .errstr = "jump out of range",
130 .result = REJECT,
131 },
132 {
133 "out of range jump2",
134 .insns = {
135 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
136 BPF_EXIT_INSN(),
137 },
138 .errstr = "jump out of range",
139 .result = REJECT,
140 },
141 {
142 "test1 ld_imm64",
143 .insns = {
144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
145 BPF_LD_IMM64(BPF_REG_0, 0),
146 BPF_LD_IMM64(BPF_REG_0, 0),
147 BPF_LD_IMM64(BPF_REG_0, 1),
148 BPF_LD_IMM64(BPF_REG_0, 1),
149 BPF_MOV64_IMM(BPF_REG_0, 2),
150 BPF_EXIT_INSN(),
151 },
152 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700153 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700154 .result = REJECT,
155 },
156 {
157 "test2 ld_imm64",
158 .insns = {
159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
160 BPF_LD_IMM64(BPF_REG_0, 0),
161 BPF_LD_IMM64(BPF_REG_0, 0),
162 BPF_LD_IMM64(BPF_REG_0, 1),
163 BPF_LD_IMM64(BPF_REG_0, 1),
164 BPF_EXIT_INSN(),
165 },
166 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700167 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700168 .result = REJECT,
169 },
170 {
171 "test3 ld_imm64",
172 .insns = {
173 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
174 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
175 BPF_LD_IMM64(BPF_REG_0, 0),
176 BPF_LD_IMM64(BPF_REG_0, 0),
177 BPF_LD_IMM64(BPF_REG_0, 1),
178 BPF_LD_IMM64(BPF_REG_0, 1),
179 BPF_EXIT_INSN(),
180 },
181 .errstr = "invalid bpf_ld_imm64 insn",
182 .result = REJECT,
183 },
184 {
185 "test4 ld_imm64",
186 .insns = {
187 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
188 BPF_EXIT_INSN(),
189 },
190 .errstr = "invalid bpf_ld_imm64 insn",
191 .result = REJECT,
192 },
193 {
194 "test5 ld_imm64",
195 .insns = {
196 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
197 },
198 .errstr = "invalid bpf_ld_imm64 insn",
199 .result = REJECT,
200 },
201 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200202 "test6 ld_imm64",
203 .insns = {
204 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
205 BPF_RAW_INSN(0, 0, 0, 0, 0),
206 BPF_EXIT_INSN(),
207 },
208 .result = ACCEPT,
209 },
210 {
211 "test7 ld_imm64",
212 .insns = {
213 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
214 BPF_RAW_INSN(0, 0, 0, 0, 1),
215 BPF_EXIT_INSN(),
216 },
217 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800218 .retval = 1,
Daniel Borkmann728a8532017-04-27 01:39:32 +0200219 },
220 {
221 "test8 ld_imm64",
222 .insns = {
223 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
224 BPF_RAW_INSN(0, 0, 0, 0, 1),
225 BPF_EXIT_INSN(),
226 },
227 .errstr = "uses reserved fields",
228 .result = REJECT,
229 },
230 {
231 "test9 ld_imm64",
232 .insns = {
233 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
234 BPF_RAW_INSN(0, 0, 0, 1, 1),
235 BPF_EXIT_INSN(),
236 },
237 .errstr = "invalid bpf_ld_imm64 insn",
238 .result = REJECT,
239 },
240 {
241 "test10 ld_imm64",
242 .insns = {
243 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
244 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
245 BPF_EXIT_INSN(),
246 },
247 .errstr = "invalid bpf_ld_imm64 insn",
248 .result = REJECT,
249 },
250 {
251 "test11 ld_imm64",
252 .insns = {
253 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
254 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
255 BPF_EXIT_INSN(),
256 },
257 .errstr = "invalid bpf_ld_imm64 insn",
258 .result = REJECT,
259 },
260 {
261 "test12 ld_imm64",
262 .insns = {
263 BPF_MOV64_IMM(BPF_REG_1, 0),
264 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
265 BPF_RAW_INSN(0, 0, 0, 0, 1),
266 BPF_EXIT_INSN(),
267 },
268 .errstr = "not pointing to valid bpf_map",
269 .result = REJECT,
270 },
271 {
272 "test13 ld_imm64",
273 .insns = {
274 BPF_MOV64_IMM(BPF_REG_1, 0),
275 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
276 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
277 BPF_EXIT_INSN(),
278 },
279 .errstr = "invalid bpf_ld_imm64 insn",
280 .result = REJECT,
281 },
282 {
Daniel Borkmann7891a872018-01-10 20:04:37 +0100283 "arsh32 on imm",
284 .insns = {
285 BPF_MOV64_IMM(BPF_REG_0, 1),
286 BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
287 BPF_EXIT_INSN(),
288 },
289 .result = REJECT,
290 .errstr = "BPF_ARSH not supported for 32 bit ALU",
291 },
292 {
293 "arsh32 on reg",
294 .insns = {
295 BPF_MOV64_IMM(BPF_REG_0, 1),
296 BPF_MOV64_IMM(BPF_REG_1, 5),
297 BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
298 BPF_EXIT_INSN(),
299 },
300 .result = REJECT,
301 .errstr = "BPF_ARSH not supported for 32 bit ALU",
302 },
303 {
304 "arsh64 on imm",
305 .insns = {
306 BPF_MOV64_IMM(BPF_REG_0, 1),
307 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
308 BPF_EXIT_INSN(),
309 },
310 .result = ACCEPT,
311 },
312 {
313 "arsh64 on reg",
314 .insns = {
315 BPF_MOV64_IMM(BPF_REG_0, 1),
316 BPF_MOV64_IMM(BPF_REG_1, 5),
317 BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
318 BPF_EXIT_INSN(),
319 },
320 .result = ACCEPT,
321 },
322 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700323 "no bpf_exit",
324 .insns = {
325 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
326 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800327 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700328 .result = REJECT,
329 },
330 {
331 "loop (back-edge)",
332 .insns = {
333 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
334 BPF_EXIT_INSN(),
335 },
336 .errstr = "back-edge",
337 .result = REJECT,
338 },
339 {
340 "loop2 (back-edge)",
341 .insns = {
342 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
343 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
344 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
345 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
346 BPF_EXIT_INSN(),
347 },
348 .errstr = "back-edge",
349 .result = REJECT,
350 },
351 {
352 "conditional loop",
353 .insns = {
354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
355 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
356 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
357 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
358 BPF_EXIT_INSN(),
359 },
360 .errstr = "back-edge",
361 .result = REJECT,
362 },
363 {
364 "read uninitialized register",
365 .insns = {
366 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
367 BPF_EXIT_INSN(),
368 },
369 .errstr = "R2 !read_ok",
370 .result = REJECT,
371 },
372 {
373 "read invalid register",
374 .insns = {
375 BPF_MOV64_REG(BPF_REG_0, -1),
376 BPF_EXIT_INSN(),
377 },
378 .errstr = "R15 is invalid",
379 .result = REJECT,
380 },
381 {
382 "program doesn't init R0 before exit",
383 .insns = {
384 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
385 BPF_EXIT_INSN(),
386 },
387 .errstr = "R0 !read_ok",
388 .result = REJECT,
389 },
390 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700391 "program doesn't init R0 before exit in all branches",
392 .insns = {
393 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
394 BPF_MOV64_IMM(BPF_REG_0, 1),
395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
396 BPF_EXIT_INSN(),
397 },
398 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700399 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700400 .result = REJECT,
401 },
402 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700403 "stack out of bounds",
404 .insns = {
405 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
406 BPF_EXIT_INSN(),
407 },
408 .errstr = "invalid stack",
409 .result = REJECT,
410 },
411 {
412 "invalid call insn1",
413 .insns = {
414 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
415 BPF_EXIT_INSN(),
416 },
417 .errstr = "BPF_CALL uses reserved",
418 .result = REJECT,
419 },
420 {
421 "invalid call insn2",
422 .insns = {
423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
424 BPF_EXIT_INSN(),
425 },
426 .errstr = "BPF_CALL uses reserved",
427 .result = REJECT,
428 },
429 {
430 "invalid function call",
431 .insns = {
432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
433 BPF_EXIT_INSN(),
434 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100435 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700436 .result = REJECT,
437 },
438 {
439 "uninitialized stack1",
440 .insns = {
441 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
443 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700446 BPF_EXIT_INSN(),
447 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200448 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700449 .errstr = "invalid indirect read from stack",
450 .result = REJECT,
451 },
452 {
453 "uninitialized stack2",
454 .insns = {
455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
456 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
457 BPF_EXIT_INSN(),
458 },
459 .errstr = "invalid read from stack",
460 .result = REJECT,
461 },
462 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200463 "invalid fp arithmetic",
464 /* If this gets ever changed, make sure JITs can deal with it. */
465 .insns = {
466 BPF_MOV64_IMM(BPF_REG_0, 0),
467 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
468 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
469 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
470 BPF_EXIT_INSN(),
471 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -0800472 .errstr = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200473 .result = REJECT,
474 },
475 {
476 "non-invalid fp arithmetic",
477 .insns = {
478 BPF_MOV64_IMM(BPF_REG_0, 0),
479 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
480 BPF_EXIT_INSN(),
481 },
482 .result = ACCEPT,
483 },
484 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200485 "invalid argument register",
486 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
488 BPF_FUNC_get_cgroup_classid),
489 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
490 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200491 BPF_EXIT_INSN(),
492 },
493 .errstr = "R1 !read_ok",
494 .result = REJECT,
495 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
496 },
497 {
498 "non-invalid argument register",
499 .insns = {
500 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200501 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
502 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200503 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200504 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
505 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200506 BPF_EXIT_INSN(),
507 },
508 .result = ACCEPT,
509 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
510 },
511 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700512 "check valid spill/fill",
513 .insns = {
514 /* spill R1(ctx) into stack */
515 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700516 /* fill it back into R2 */
517 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700518 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100519 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
520 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700521 BPF_EXIT_INSN(),
522 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700523 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700524 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700525 .result_unpriv = REJECT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800526 .retval = POINTER_VALUE,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700527 },
528 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200529 "check valid spill/fill, skb mark",
530 .insns = {
531 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
532 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
534 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
535 offsetof(struct __sk_buff, mark)),
536 BPF_EXIT_INSN(),
537 },
538 .result = ACCEPT,
539 .result_unpriv = ACCEPT,
540 },
541 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700542 "check corrupted spill/fill",
543 .insns = {
544 /* spill R1(ctx) into stack */
545 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700546 /* mess up with R1 pointer on stack */
547 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700548 /* fill back into R0 should fail */
549 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700550 BPF_EXIT_INSN(),
551 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700552 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700553 .errstr = "corrupted spill",
554 .result = REJECT,
555 },
556 {
557 "invalid src register in STX",
558 .insns = {
559 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
560 BPF_EXIT_INSN(),
561 },
562 .errstr = "R15 is invalid",
563 .result = REJECT,
564 },
565 {
566 "invalid dst register in STX",
567 .insns = {
568 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
569 BPF_EXIT_INSN(),
570 },
571 .errstr = "R14 is invalid",
572 .result = REJECT,
573 },
574 {
575 "invalid dst register in ST",
576 .insns = {
577 BPF_ST_MEM(BPF_B, 14, -1, -1),
578 BPF_EXIT_INSN(),
579 },
580 .errstr = "R14 is invalid",
581 .result = REJECT,
582 },
583 {
584 "invalid src register in LDX",
585 .insns = {
586 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
587 BPF_EXIT_INSN(),
588 },
589 .errstr = "R12 is invalid",
590 .result = REJECT,
591 },
592 {
593 "invalid dst register in LDX",
594 .insns = {
595 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
596 BPF_EXIT_INSN(),
597 },
598 .errstr = "R11 is invalid",
599 .result = REJECT,
600 },
601 {
602 "junk insn",
603 .insns = {
604 BPF_RAW_INSN(0, 0, 0, 0, 0),
605 BPF_EXIT_INSN(),
606 },
607 .errstr = "invalid BPF_LD_IMM",
608 .result = REJECT,
609 },
610 {
611 "junk insn2",
612 .insns = {
613 BPF_RAW_INSN(1, 0, 0, 0, 0),
614 BPF_EXIT_INSN(),
615 },
616 .errstr = "BPF_LDX uses reserved fields",
617 .result = REJECT,
618 },
619 {
620 "junk insn3",
621 .insns = {
622 BPF_RAW_INSN(-1, 0, 0, 0, 0),
623 BPF_EXIT_INSN(),
624 },
625 .errstr = "invalid BPF_ALU opcode f0",
626 .result = REJECT,
627 },
628 {
629 "junk insn4",
630 .insns = {
631 BPF_RAW_INSN(-1, -1, -1, -1, -1),
632 BPF_EXIT_INSN(),
633 },
634 .errstr = "invalid BPF_ALU opcode f0",
635 .result = REJECT,
636 },
637 {
638 "junk insn5",
639 .insns = {
640 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
641 BPF_EXIT_INSN(),
642 },
643 .errstr = "BPF_ALU uses reserved fields",
644 .result = REJECT,
645 },
646 {
647 "misaligned read from stack",
648 .insns = {
649 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
650 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
651 BPF_EXIT_INSN(),
652 },
Edward Creef65b1842017-08-07 15:27:12 +0100653 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700654 .result = REJECT,
655 },
656 {
657 "invalid map_fd for function call",
658 .insns = {
659 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
660 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
662 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200663 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
664 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700665 BPF_EXIT_INSN(),
666 },
667 .errstr = "fd 0 is not pointing to valid bpf_map",
668 .result = REJECT,
669 },
670 {
671 "don't check return value before access",
672 .insns = {
673 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
674 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
676 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200677 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
678 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700679 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
680 BPF_EXIT_INSN(),
681 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200682 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700683 .errstr = "R0 invalid mem access 'map_value_or_null'",
684 .result = REJECT,
685 },
686 {
687 "access memory with incorrect alignment",
688 .insns = {
689 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
690 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
692 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200693 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
694 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
696 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
697 BPF_EXIT_INSN(),
698 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200699 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +0100700 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700701 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100702 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700703 },
704 {
705 "sometimes access memory with incorrect alignment",
706 .insns = {
707 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
708 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
710 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200711 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
712 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
714 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
715 BPF_EXIT_INSN(),
716 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
717 BPF_EXIT_INSN(),
718 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200719 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700720 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700721 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700722 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100723 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700724 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700725 {
726 "jump test 1",
727 .insns = {
728 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
729 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
731 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
732 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
733 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
735 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
738 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
739 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
742 BPF_MOV64_IMM(BPF_REG_0, 0),
743 BPF_EXIT_INSN(),
744 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700745 .errstr_unpriv = "R1 pointer comparison",
746 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700747 .result = ACCEPT,
748 },
749 {
750 "jump test 2",
751 .insns = {
752 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
754 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
755 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
758 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
760 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
761 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
763 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
764 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
765 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
766 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
767 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
768 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
769 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
770 BPF_MOV64_IMM(BPF_REG_0, 0),
771 BPF_EXIT_INSN(),
772 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700773 .errstr_unpriv = "R1 pointer comparison",
774 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700775 .result = ACCEPT,
776 },
777 {
778 "jump test 3",
779 .insns = {
780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
782 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
784 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
786 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
787 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
788 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
790 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
792 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
794 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
796 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
798 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
800 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
802 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
804 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200805 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
806 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700807 BPF_EXIT_INSN(),
808 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200809 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700810 .errstr_unpriv = "R1 pointer comparison",
811 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700812 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -0800813 .retval = -ENOENT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700814 },
815 {
816 "jump test 4",
817 .insns = {
818 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
820 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
821 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
823 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
824 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
825 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
828 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
829 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
832 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
833 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
834 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
835 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
837 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
839 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
840 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
845 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
850 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
851 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
852 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
853 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
856 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
857 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
858 BPF_MOV64_IMM(BPF_REG_0, 0),
859 BPF_EXIT_INSN(),
860 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700861 .errstr_unpriv = "R1 pointer comparison",
862 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700863 .result = ACCEPT,
864 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700865 {
866 "jump test 5",
867 .insns = {
868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
869 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
870 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
871 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
872 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
873 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
874 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
875 BPF_MOV64_IMM(BPF_REG_0, 0),
876 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
877 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
878 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
879 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
880 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
881 BPF_MOV64_IMM(BPF_REG_0, 0),
882 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
883 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
884 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
885 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
886 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
887 BPF_MOV64_IMM(BPF_REG_0, 0),
888 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
889 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
890 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
891 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
892 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
893 BPF_MOV64_IMM(BPF_REG_0, 0),
894 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
895 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
896 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
897 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
898 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
899 BPF_MOV64_IMM(BPF_REG_0, 0),
900 BPF_EXIT_INSN(),
901 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700902 .errstr_unpriv = "R1 pointer comparison",
903 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700904 .result = ACCEPT,
905 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700906 {
907 "access skb fields ok",
908 .insns = {
909 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
910 offsetof(struct __sk_buff, len)),
911 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
912 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, mark)),
914 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
915 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
916 offsetof(struct __sk_buff, pkt_type)),
917 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
918 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
919 offsetof(struct __sk_buff, queue_mapping)),
920 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700921 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
922 offsetof(struct __sk_buff, protocol)),
923 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
924 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
925 offsetof(struct __sk_buff, vlan_present)),
926 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
927 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
928 offsetof(struct __sk_buff, vlan_tci)),
929 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200930 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
931 offsetof(struct __sk_buff, napi_id)),
932 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700933 BPF_EXIT_INSN(),
934 },
935 .result = ACCEPT,
936 },
937 {
938 "access skb fields bad1",
939 .insns = {
940 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
941 BPF_EXIT_INSN(),
942 },
943 .errstr = "invalid bpf_context access",
944 .result = REJECT,
945 },
946 {
947 "access skb fields bad2",
948 .insns = {
949 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
953 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200954 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
955 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700956 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
957 BPF_EXIT_INSN(),
958 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
959 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
960 offsetof(struct __sk_buff, pkt_type)),
961 BPF_EXIT_INSN(),
962 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200963 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700964 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700965 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700966 .result = REJECT,
967 },
968 {
969 "access skb fields bad3",
970 .insns = {
971 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
972 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
973 offsetof(struct __sk_buff, pkt_type)),
974 BPF_EXIT_INSN(),
975 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
976 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
978 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200979 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
980 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700981 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
982 BPF_EXIT_INSN(),
983 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
984 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
985 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200986 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700987 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700988 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700989 .result = REJECT,
990 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700991 {
992 "access skb fields bad4",
993 .insns = {
994 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
995 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
996 offsetof(struct __sk_buff, len)),
997 BPF_MOV64_IMM(BPF_REG_0, 0),
998 BPF_EXIT_INSN(),
999 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1000 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1002 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001003 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1004 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001005 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1006 BPF_EXIT_INSN(),
1007 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1008 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1009 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001010 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001011 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001012 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -07001013 .result = REJECT,
1014 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001015 {
John Fastabend41bc94f2017-08-15 22:33:56 -07001016 "invalid access __sk_buff family",
1017 .insns = {
1018 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1019 offsetof(struct __sk_buff, family)),
1020 BPF_EXIT_INSN(),
1021 },
1022 .errstr = "invalid bpf_context access",
1023 .result = REJECT,
1024 },
1025 {
1026 "invalid access __sk_buff remote_ip4",
1027 .insns = {
1028 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1029 offsetof(struct __sk_buff, remote_ip4)),
1030 BPF_EXIT_INSN(),
1031 },
1032 .errstr = "invalid bpf_context access",
1033 .result = REJECT,
1034 },
1035 {
1036 "invalid access __sk_buff local_ip4",
1037 .insns = {
1038 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1039 offsetof(struct __sk_buff, local_ip4)),
1040 BPF_EXIT_INSN(),
1041 },
1042 .errstr = "invalid bpf_context access",
1043 .result = REJECT,
1044 },
1045 {
1046 "invalid access __sk_buff remote_ip6",
1047 .insns = {
1048 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1049 offsetof(struct __sk_buff, remote_ip6)),
1050 BPF_EXIT_INSN(),
1051 },
1052 .errstr = "invalid bpf_context access",
1053 .result = REJECT,
1054 },
1055 {
1056 "invalid access __sk_buff local_ip6",
1057 .insns = {
1058 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1059 offsetof(struct __sk_buff, local_ip6)),
1060 BPF_EXIT_INSN(),
1061 },
1062 .errstr = "invalid bpf_context access",
1063 .result = REJECT,
1064 },
1065 {
1066 "invalid access __sk_buff remote_port",
1067 .insns = {
1068 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1069 offsetof(struct __sk_buff, remote_port)),
1070 BPF_EXIT_INSN(),
1071 },
1072 .errstr = "invalid bpf_context access",
1073 .result = REJECT,
1074 },
1075 {
1076 "invalid access __sk_buff remote_port",
1077 .insns = {
1078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1079 offsetof(struct __sk_buff, local_port)),
1080 BPF_EXIT_INSN(),
1081 },
1082 .errstr = "invalid bpf_context access",
1083 .result = REJECT,
1084 },
1085 {
1086 "valid access __sk_buff family",
1087 .insns = {
1088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1089 offsetof(struct __sk_buff, family)),
1090 BPF_EXIT_INSN(),
1091 },
1092 .result = ACCEPT,
1093 .prog_type = BPF_PROG_TYPE_SK_SKB,
1094 },
1095 {
1096 "valid access __sk_buff remote_ip4",
1097 .insns = {
1098 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1099 offsetof(struct __sk_buff, remote_ip4)),
1100 BPF_EXIT_INSN(),
1101 },
1102 .result = ACCEPT,
1103 .prog_type = BPF_PROG_TYPE_SK_SKB,
1104 },
1105 {
1106 "valid access __sk_buff local_ip4",
1107 .insns = {
1108 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1109 offsetof(struct __sk_buff, local_ip4)),
1110 BPF_EXIT_INSN(),
1111 },
1112 .result = ACCEPT,
1113 .prog_type = BPF_PROG_TYPE_SK_SKB,
1114 },
1115 {
1116 "valid access __sk_buff remote_ip6",
1117 .insns = {
1118 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1119 offsetof(struct __sk_buff, remote_ip6[0])),
1120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1121 offsetof(struct __sk_buff, remote_ip6[1])),
1122 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1123 offsetof(struct __sk_buff, remote_ip6[2])),
1124 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1125 offsetof(struct __sk_buff, remote_ip6[3])),
1126 BPF_EXIT_INSN(),
1127 },
1128 .result = ACCEPT,
1129 .prog_type = BPF_PROG_TYPE_SK_SKB,
1130 },
1131 {
1132 "valid access __sk_buff local_ip6",
1133 .insns = {
1134 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1135 offsetof(struct __sk_buff, local_ip6[0])),
1136 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1137 offsetof(struct __sk_buff, local_ip6[1])),
1138 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1139 offsetof(struct __sk_buff, local_ip6[2])),
1140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1141 offsetof(struct __sk_buff, local_ip6[3])),
1142 BPF_EXIT_INSN(),
1143 },
1144 .result = ACCEPT,
1145 .prog_type = BPF_PROG_TYPE_SK_SKB,
1146 },
1147 {
1148 "valid access __sk_buff remote_port",
1149 .insns = {
1150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1151 offsetof(struct __sk_buff, remote_port)),
1152 BPF_EXIT_INSN(),
1153 },
1154 .result = ACCEPT,
1155 .prog_type = BPF_PROG_TYPE_SK_SKB,
1156 },
1157 {
1158 "valid access __sk_buff remote_port",
1159 .insns = {
1160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1161 offsetof(struct __sk_buff, local_port)),
1162 BPF_EXIT_INSN(),
1163 },
1164 .result = ACCEPT,
1165 .prog_type = BPF_PROG_TYPE_SK_SKB,
1166 },
1167 {
John Fastabended850542017-08-28 07:11:24 -07001168 "invalid access of tc_classid for SK_SKB",
1169 .insns = {
1170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1171 offsetof(struct __sk_buff, tc_classid)),
1172 BPF_EXIT_INSN(),
1173 },
1174 .result = REJECT,
1175 .prog_type = BPF_PROG_TYPE_SK_SKB,
1176 .errstr = "invalid bpf_context access",
1177 },
1178 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001179 "invalid access of skb->mark for SK_SKB",
1180 .insns = {
1181 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1182 offsetof(struct __sk_buff, mark)),
1183 BPF_EXIT_INSN(),
1184 },
1185 .result = REJECT,
1186 .prog_type = BPF_PROG_TYPE_SK_SKB,
1187 .errstr = "invalid bpf_context access",
1188 },
1189 {
1190 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001191 .insns = {
1192 BPF_MOV64_IMM(BPF_REG_0, 0),
1193 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1194 offsetof(struct __sk_buff, mark)),
1195 BPF_EXIT_INSN(),
1196 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001197 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001198 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001199 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001200 },
1201 {
1202 "check skb->tc_index is writeable by SK_SKB",
1203 .insns = {
1204 BPF_MOV64_IMM(BPF_REG_0, 0),
1205 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1206 offsetof(struct __sk_buff, tc_index)),
1207 BPF_EXIT_INSN(),
1208 },
1209 .result = ACCEPT,
1210 .prog_type = BPF_PROG_TYPE_SK_SKB,
1211 },
1212 {
1213 "check skb->priority is writeable by SK_SKB",
1214 .insns = {
1215 BPF_MOV64_IMM(BPF_REG_0, 0),
1216 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1217 offsetof(struct __sk_buff, priority)),
1218 BPF_EXIT_INSN(),
1219 },
1220 .result = ACCEPT,
1221 .prog_type = BPF_PROG_TYPE_SK_SKB,
1222 },
1223 {
1224 "direct packet read for SK_SKB",
1225 .insns = {
1226 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1227 offsetof(struct __sk_buff, data)),
1228 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1229 offsetof(struct __sk_buff, data_end)),
1230 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1232 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1233 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1234 BPF_MOV64_IMM(BPF_REG_0, 0),
1235 BPF_EXIT_INSN(),
1236 },
1237 .result = ACCEPT,
1238 .prog_type = BPF_PROG_TYPE_SK_SKB,
1239 },
1240 {
1241 "direct packet write for SK_SKB",
1242 .insns = {
1243 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1244 offsetof(struct __sk_buff, data)),
1245 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1246 offsetof(struct __sk_buff, data_end)),
1247 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1249 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1250 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1251 BPF_MOV64_IMM(BPF_REG_0, 0),
1252 BPF_EXIT_INSN(),
1253 },
1254 .result = ACCEPT,
1255 .prog_type = BPF_PROG_TYPE_SK_SKB,
1256 },
1257 {
1258 "overlapping checks for direct packet access SK_SKB",
1259 .insns = {
1260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1261 offsetof(struct __sk_buff, data)),
1262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1263 offsetof(struct __sk_buff, data_end)),
1264 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1266 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1269 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1270 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1271 BPF_MOV64_IMM(BPF_REG_0, 0),
1272 BPF_EXIT_INSN(),
1273 },
1274 .result = ACCEPT,
1275 .prog_type = BPF_PROG_TYPE_SK_SKB,
1276 },
1277 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001278 "check skb->mark is not writeable by sockets",
1279 .insns = {
1280 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1281 offsetof(struct __sk_buff, mark)),
1282 BPF_EXIT_INSN(),
1283 },
1284 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001285 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001286 .result = REJECT,
1287 },
1288 {
1289 "check skb->tc_index is not writeable by sockets",
1290 .insns = {
1291 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1292 offsetof(struct __sk_buff, tc_index)),
1293 BPF_EXIT_INSN(),
1294 },
1295 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001296 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001297 .result = REJECT,
1298 },
1299 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001300 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001301 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001302 BPF_MOV64_IMM(BPF_REG_0, 0),
1303 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1304 offsetof(struct __sk_buff, cb[0])),
1305 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1306 offsetof(struct __sk_buff, cb[0]) + 1),
1307 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1308 offsetof(struct __sk_buff, cb[0]) + 2),
1309 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1310 offsetof(struct __sk_buff, cb[0]) + 3),
1311 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1312 offsetof(struct __sk_buff, cb[1])),
1313 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1314 offsetof(struct __sk_buff, cb[1]) + 1),
1315 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1316 offsetof(struct __sk_buff, cb[1]) + 2),
1317 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1318 offsetof(struct __sk_buff, cb[1]) + 3),
1319 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1320 offsetof(struct __sk_buff, cb[2])),
1321 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1322 offsetof(struct __sk_buff, cb[2]) + 1),
1323 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1324 offsetof(struct __sk_buff, cb[2]) + 2),
1325 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1326 offsetof(struct __sk_buff, cb[2]) + 3),
1327 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1328 offsetof(struct __sk_buff, cb[3])),
1329 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1330 offsetof(struct __sk_buff, cb[3]) + 1),
1331 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1332 offsetof(struct __sk_buff, cb[3]) + 2),
1333 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1334 offsetof(struct __sk_buff, cb[3]) + 3),
1335 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1336 offsetof(struct __sk_buff, cb[4])),
1337 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1338 offsetof(struct __sk_buff, cb[4]) + 1),
1339 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1340 offsetof(struct __sk_buff, cb[4]) + 2),
1341 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1342 offsetof(struct __sk_buff, cb[4]) + 3),
1343 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1344 offsetof(struct __sk_buff, cb[0])),
1345 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1346 offsetof(struct __sk_buff, cb[0]) + 1),
1347 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1348 offsetof(struct __sk_buff, cb[0]) + 2),
1349 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1350 offsetof(struct __sk_buff, cb[0]) + 3),
1351 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1352 offsetof(struct __sk_buff, cb[1])),
1353 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1354 offsetof(struct __sk_buff, cb[1]) + 1),
1355 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1356 offsetof(struct __sk_buff, cb[1]) + 2),
1357 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1358 offsetof(struct __sk_buff, cb[1]) + 3),
1359 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1360 offsetof(struct __sk_buff, cb[2])),
1361 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1362 offsetof(struct __sk_buff, cb[2]) + 1),
1363 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1364 offsetof(struct __sk_buff, cb[2]) + 2),
1365 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1366 offsetof(struct __sk_buff, cb[2]) + 3),
1367 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1368 offsetof(struct __sk_buff, cb[3])),
1369 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1370 offsetof(struct __sk_buff, cb[3]) + 1),
1371 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1372 offsetof(struct __sk_buff, cb[3]) + 2),
1373 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1374 offsetof(struct __sk_buff, cb[3]) + 3),
1375 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1376 offsetof(struct __sk_buff, cb[4])),
1377 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1378 offsetof(struct __sk_buff, cb[4]) + 1),
1379 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1380 offsetof(struct __sk_buff, cb[4]) + 2),
1381 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1382 offsetof(struct __sk_buff, cb[4]) + 3),
1383 BPF_EXIT_INSN(),
1384 },
1385 .result = ACCEPT,
1386 },
1387 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001388 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001389 .insns = {
1390 BPF_MOV64_IMM(BPF_REG_0, 0),
1391 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001392 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001393 BPF_EXIT_INSN(),
1394 },
1395 .errstr = "invalid bpf_context access",
1396 .result = REJECT,
1397 },
1398 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001399 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001400 .insns = {
1401 BPF_MOV64_IMM(BPF_REG_0, 0),
1402 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001403 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001404 BPF_EXIT_INSN(),
1405 },
1406 .errstr = "invalid bpf_context access",
1407 .result = REJECT,
1408 },
1409 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001410 "check skb->hash byte load permitted",
1411 .insns = {
1412 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001413#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001414 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1415 offsetof(struct __sk_buff, hash)),
1416#else
1417 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1418 offsetof(struct __sk_buff, hash) + 3),
1419#endif
1420 BPF_EXIT_INSN(),
1421 },
1422 .result = ACCEPT,
1423 },
1424 {
1425 "check skb->hash byte load not permitted 1",
1426 .insns = {
1427 BPF_MOV64_IMM(BPF_REG_0, 0),
1428 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1429 offsetof(struct __sk_buff, hash) + 1),
1430 BPF_EXIT_INSN(),
1431 },
1432 .errstr = "invalid bpf_context access",
1433 .result = REJECT,
1434 },
1435 {
1436 "check skb->hash byte load not permitted 2",
1437 .insns = {
1438 BPF_MOV64_IMM(BPF_REG_0, 0),
1439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1440 offsetof(struct __sk_buff, hash) + 2),
1441 BPF_EXIT_INSN(),
1442 },
1443 .errstr = "invalid bpf_context access",
1444 .result = REJECT,
1445 },
1446 {
1447 "check skb->hash byte load not permitted 3",
1448 .insns = {
1449 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001450#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1452 offsetof(struct __sk_buff, hash) + 3),
1453#else
1454 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1455 offsetof(struct __sk_buff, hash)),
1456#endif
1457 BPF_EXIT_INSN(),
1458 },
1459 .errstr = "invalid bpf_context access",
1460 .result = REJECT,
1461 },
1462 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001463 "check cb access: byte, wrong type",
1464 .insns = {
1465 BPF_MOV64_IMM(BPF_REG_0, 0),
1466 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001467 offsetof(struct __sk_buff, cb[0])),
1468 BPF_EXIT_INSN(),
1469 },
1470 .errstr = "invalid bpf_context access",
1471 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001472 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1473 },
1474 {
1475 "check cb access: half",
1476 .insns = {
1477 BPF_MOV64_IMM(BPF_REG_0, 0),
1478 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1479 offsetof(struct __sk_buff, cb[0])),
1480 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1481 offsetof(struct __sk_buff, cb[0]) + 2),
1482 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1483 offsetof(struct __sk_buff, cb[1])),
1484 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1485 offsetof(struct __sk_buff, cb[1]) + 2),
1486 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1487 offsetof(struct __sk_buff, cb[2])),
1488 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1489 offsetof(struct __sk_buff, cb[2]) + 2),
1490 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1491 offsetof(struct __sk_buff, cb[3])),
1492 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1493 offsetof(struct __sk_buff, cb[3]) + 2),
1494 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1495 offsetof(struct __sk_buff, cb[4])),
1496 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1497 offsetof(struct __sk_buff, cb[4]) + 2),
1498 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1499 offsetof(struct __sk_buff, cb[0])),
1500 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1501 offsetof(struct __sk_buff, cb[0]) + 2),
1502 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1503 offsetof(struct __sk_buff, cb[1])),
1504 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1505 offsetof(struct __sk_buff, cb[1]) + 2),
1506 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1507 offsetof(struct __sk_buff, cb[2])),
1508 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1509 offsetof(struct __sk_buff, cb[2]) + 2),
1510 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1511 offsetof(struct __sk_buff, cb[3])),
1512 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1513 offsetof(struct __sk_buff, cb[3]) + 2),
1514 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1515 offsetof(struct __sk_buff, cb[4])),
1516 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1517 offsetof(struct __sk_buff, cb[4]) + 2),
1518 BPF_EXIT_INSN(),
1519 },
1520 .result = ACCEPT,
1521 },
1522 {
1523 "check cb access: half, unaligned",
1524 .insns = {
1525 BPF_MOV64_IMM(BPF_REG_0, 0),
1526 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1527 offsetof(struct __sk_buff, cb[0]) + 1),
1528 BPF_EXIT_INSN(),
1529 },
Edward Creef65b1842017-08-07 15:27:12 +01001530 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001531 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001532 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001533 },
1534 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001535 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001536 .insns = {
1537 BPF_MOV64_IMM(BPF_REG_0, 0),
1538 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001539 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001540 BPF_EXIT_INSN(),
1541 },
1542 .errstr = "invalid bpf_context access",
1543 .result = REJECT,
1544 },
1545 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001546 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001547 .insns = {
1548 BPF_MOV64_IMM(BPF_REG_0, 0),
1549 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001550 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001551 BPF_EXIT_INSN(),
1552 },
1553 .errstr = "invalid bpf_context access",
1554 .result = REJECT,
1555 },
1556 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001557 "check skb->hash half load permitted",
1558 .insns = {
1559 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001560#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001561 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1562 offsetof(struct __sk_buff, hash)),
1563#else
1564 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1565 offsetof(struct __sk_buff, hash) + 2),
1566#endif
1567 BPF_EXIT_INSN(),
1568 },
1569 .result = ACCEPT,
1570 },
1571 {
1572 "check skb->hash half load not permitted",
1573 .insns = {
1574 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001575#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001576 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1577 offsetof(struct __sk_buff, hash) + 2),
1578#else
1579 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1580 offsetof(struct __sk_buff, hash)),
1581#endif
1582 BPF_EXIT_INSN(),
1583 },
1584 .errstr = "invalid bpf_context access",
1585 .result = REJECT,
1586 },
1587 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001588 "check cb access: half, wrong type",
1589 .insns = {
1590 BPF_MOV64_IMM(BPF_REG_0, 0),
1591 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1592 offsetof(struct __sk_buff, cb[0])),
1593 BPF_EXIT_INSN(),
1594 },
1595 .errstr = "invalid bpf_context access",
1596 .result = REJECT,
1597 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1598 },
1599 {
1600 "check cb access: word",
1601 .insns = {
1602 BPF_MOV64_IMM(BPF_REG_0, 0),
1603 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1604 offsetof(struct __sk_buff, cb[0])),
1605 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1606 offsetof(struct __sk_buff, cb[1])),
1607 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1608 offsetof(struct __sk_buff, cb[2])),
1609 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1610 offsetof(struct __sk_buff, cb[3])),
1611 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1612 offsetof(struct __sk_buff, cb[4])),
1613 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1614 offsetof(struct __sk_buff, cb[0])),
1615 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1616 offsetof(struct __sk_buff, cb[1])),
1617 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1618 offsetof(struct __sk_buff, cb[2])),
1619 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1620 offsetof(struct __sk_buff, cb[3])),
1621 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1622 offsetof(struct __sk_buff, cb[4])),
1623 BPF_EXIT_INSN(),
1624 },
1625 .result = ACCEPT,
1626 },
1627 {
1628 "check cb access: word, unaligned 1",
1629 .insns = {
1630 BPF_MOV64_IMM(BPF_REG_0, 0),
1631 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1632 offsetof(struct __sk_buff, cb[0]) + 2),
1633 BPF_EXIT_INSN(),
1634 },
Edward Creef65b1842017-08-07 15:27:12 +01001635 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001636 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001637 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001638 },
1639 {
1640 "check cb access: word, unaligned 2",
1641 .insns = {
1642 BPF_MOV64_IMM(BPF_REG_0, 0),
1643 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1644 offsetof(struct __sk_buff, cb[4]) + 1),
1645 BPF_EXIT_INSN(),
1646 },
Edward Creef65b1842017-08-07 15:27:12 +01001647 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001648 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001649 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001650 },
1651 {
1652 "check cb access: word, unaligned 3",
1653 .insns = {
1654 BPF_MOV64_IMM(BPF_REG_0, 0),
1655 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1656 offsetof(struct __sk_buff, cb[4]) + 2),
1657 BPF_EXIT_INSN(),
1658 },
Edward Creef65b1842017-08-07 15:27:12 +01001659 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001660 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001661 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001662 },
1663 {
1664 "check cb access: word, unaligned 4",
1665 .insns = {
1666 BPF_MOV64_IMM(BPF_REG_0, 0),
1667 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1668 offsetof(struct __sk_buff, cb[4]) + 3),
1669 BPF_EXIT_INSN(),
1670 },
Edward Creef65b1842017-08-07 15:27:12 +01001671 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001672 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001673 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001674 },
1675 {
1676 "check cb access: double",
1677 .insns = {
1678 BPF_MOV64_IMM(BPF_REG_0, 0),
1679 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1680 offsetof(struct __sk_buff, cb[0])),
1681 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1682 offsetof(struct __sk_buff, cb[2])),
1683 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[0])),
1685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[2])),
1687 BPF_EXIT_INSN(),
1688 },
1689 .result = ACCEPT,
1690 },
1691 {
1692 "check cb access: double, unaligned 1",
1693 .insns = {
1694 BPF_MOV64_IMM(BPF_REG_0, 0),
1695 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1696 offsetof(struct __sk_buff, cb[1])),
1697 BPF_EXIT_INSN(),
1698 },
Edward Creef65b1842017-08-07 15:27:12 +01001699 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001700 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001701 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001702 },
1703 {
1704 "check cb access: double, unaligned 2",
1705 .insns = {
1706 BPF_MOV64_IMM(BPF_REG_0, 0),
1707 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1708 offsetof(struct __sk_buff, cb[3])),
1709 BPF_EXIT_INSN(),
1710 },
Edward Creef65b1842017-08-07 15:27:12 +01001711 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001712 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001713 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001714 },
1715 {
1716 "check cb access: double, oob 1",
1717 .insns = {
1718 BPF_MOV64_IMM(BPF_REG_0, 0),
1719 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1720 offsetof(struct __sk_buff, cb[4])),
1721 BPF_EXIT_INSN(),
1722 },
1723 .errstr = "invalid bpf_context access",
1724 .result = REJECT,
1725 },
1726 {
1727 "check cb access: double, oob 2",
1728 .insns = {
1729 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001730 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1731 offsetof(struct __sk_buff, cb[4])),
1732 BPF_EXIT_INSN(),
1733 },
1734 .errstr = "invalid bpf_context access",
1735 .result = REJECT,
1736 },
1737 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001738 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001739 .insns = {
1740 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07001741 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1742 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001743 BPF_EXIT_INSN(),
1744 },
1745 .errstr = "invalid bpf_context access",
1746 .result = REJECT,
1747 },
1748 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001749 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001750 .insns = {
1751 BPF_MOV64_IMM(BPF_REG_0, 0),
1752 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07001753 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001754 BPF_EXIT_INSN(),
1755 },
1756 .errstr = "invalid bpf_context access",
1757 .result = REJECT,
1758 },
1759 {
1760 "check cb access: double, wrong type",
1761 .insns = {
1762 BPF_MOV64_IMM(BPF_REG_0, 0),
1763 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1764 offsetof(struct __sk_buff, cb[0])),
1765 BPF_EXIT_INSN(),
1766 },
1767 .errstr = "invalid bpf_context access",
1768 .result = REJECT,
1769 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001770 },
1771 {
1772 "check out of range skb->cb access",
1773 .insns = {
1774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001775 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001776 BPF_EXIT_INSN(),
1777 },
1778 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001779 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001780 .result = REJECT,
1781 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1782 },
1783 {
1784 "write skb fields from socket prog",
1785 .insns = {
1786 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1787 offsetof(struct __sk_buff, cb[4])),
1788 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1789 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1790 offsetof(struct __sk_buff, mark)),
1791 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1792 offsetof(struct __sk_buff, tc_index)),
1793 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1794 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1795 offsetof(struct __sk_buff, cb[0])),
1796 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1797 offsetof(struct __sk_buff, cb[2])),
1798 BPF_EXIT_INSN(),
1799 },
1800 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001801 .errstr_unpriv = "R1 leaks addr",
1802 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001803 },
1804 {
1805 "write skb fields from tc_cls_act prog",
1806 .insns = {
1807 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1808 offsetof(struct __sk_buff, cb[0])),
1809 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1810 offsetof(struct __sk_buff, mark)),
1811 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1812 offsetof(struct __sk_buff, tc_index)),
1813 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1814 offsetof(struct __sk_buff, tc_index)),
1815 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1816 offsetof(struct __sk_buff, cb[3])),
1817 BPF_EXIT_INSN(),
1818 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001819 .errstr_unpriv = "",
1820 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001821 .result = ACCEPT,
1822 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1823 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001824 {
1825 "PTR_TO_STACK store/load",
1826 .insns = {
1827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1829 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1830 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1831 BPF_EXIT_INSN(),
1832 },
1833 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001834 .retval = 0xfaceb00c,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001835 },
1836 {
1837 "PTR_TO_STACK store/load - bad alignment on off",
1838 .insns = {
1839 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1841 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1842 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1843 BPF_EXIT_INSN(),
1844 },
1845 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001846 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001847 },
1848 {
1849 "PTR_TO_STACK store/load - bad alignment on reg",
1850 .insns = {
1851 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1853 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1854 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1855 BPF_EXIT_INSN(),
1856 },
1857 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001858 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001859 },
1860 {
1861 "PTR_TO_STACK store/load - out of bounds low",
1862 .insns = {
1863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1865 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1866 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1867 BPF_EXIT_INSN(),
1868 },
1869 .result = REJECT,
1870 .errstr = "invalid stack off=-79992 size=8",
1871 },
1872 {
1873 "PTR_TO_STACK store/load - out of bounds high",
1874 .insns = {
1875 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1877 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1878 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1879 BPF_EXIT_INSN(),
1880 },
1881 .result = REJECT,
1882 .errstr = "invalid stack off=0 size=8",
1883 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001884 {
1885 "unpriv: return pointer",
1886 .insns = {
1887 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1888 BPF_EXIT_INSN(),
1889 },
1890 .result = ACCEPT,
1891 .result_unpriv = REJECT,
1892 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08001893 .retval = POINTER_VALUE,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001894 },
1895 {
1896 "unpriv: add const to pointer",
1897 .insns = {
1898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1899 BPF_MOV64_IMM(BPF_REG_0, 0),
1900 BPF_EXIT_INSN(),
1901 },
1902 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001903 },
1904 {
1905 "unpriv: add pointer to pointer",
1906 .insns = {
1907 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1908 BPF_MOV64_IMM(BPF_REG_0, 0),
1909 BPF_EXIT_INSN(),
1910 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08001911 .result = REJECT,
1912 .errstr = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001913 },
1914 {
1915 "unpriv: neg pointer",
1916 .insns = {
1917 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1918 BPF_MOV64_IMM(BPF_REG_0, 0),
1919 BPF_EXIT_INSN(),
1920 },
1921 .result = ACCEPT,
1922 .result_unpriv = REJECT,
1923 .errstr_unpriv = "R1 pointer arithmetic",
1924 },
1925 {
1926 "unpriv: cmp pointer with const",
1927 .insns = {
1928 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1929 BPF_MOV64_IMM(BPF_REG_0, 0),
1930 BPF_EXIT_INSN(),
1931 },
1932 .result = ACCEPT,
1933 .result_unpriv = REJECT,
1934 .errstr_unpriv = "R1 pointer comparison",
1935 },
1936 {
1937 "unpriv: cmp pointer with pointer",
1938 .insns = {
1939 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1940 BPF_MOV64_IMM(BPF_REG_0, 0),
1941 BPF_EXIT_INSN(),
1942 },
1943 .result = ACCEPT,
1944 .result_unpriv = REJECT,
1945 .errstr_unpriv = "R10 pointer comparison",
1946 },
1947 {
1948 "unpriv: check that printk is disallowed",
1949 .insns = {
1950 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1953 BPF_MOV64_IMM(BPF_REG_2, 8),
1954 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1956 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001957 BPF_MOV64_IMM(BPF_REG_0, 0),
1958 BPF_EXIT_INSN(),
1959 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001960 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001961 .result_unpriv = REJECT,
1962 .result = ACCEPT,
1963 },
1964 {
1965 "unpriv: pass pointer to helper function",
1966 .insns = {
1967 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1970 BPF_LD_MAP_FD(BPF_REG_1, 0),
1971 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1972 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001973 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1974 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001975 BPF_MOV64_IMM(BPF_REG_0, 0),
1976 BPF_EXIT_INSN(),
1977 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001978 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001979 .errstr_unpriv = "R4 leaks addr",
1980 .result_unpriv = REJECT,
1981 .result = ACCEPT,
1982 },
1983 {
1984 "unpriv: indirectly pass pointer on stack to helper function",
1985 .insns = {
1986 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1987 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1989 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1991 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001992 BPF_MOV64_IMM(BPF_REG_0, 0),
1993 BPF_EXIT_INSN(),
1994 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001995 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001996 .errstr = "invalid indirect read from stack off -8+0 size 8",
1997 .result = REJECT,
1998 },
1999 {
2000 "unpriv: mangle pointer on stack 1",
2001 .insns = {
2002 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2003 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2004 BPF_MOV64_IMM(BPF_REG_0, 0),
2005 BPF_EXIT_INSN(),
2006 },
2007 .errstr_unpriv = "attempt to corrupt spilled",
2008 .result_unpriv = REJECT,
2009 .result = ACCEPT,
2010 },
2011 {
2012 "unpriv: mangle pointer on stack 2",
2013 .insns = {
2014 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2015 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2016 BPF_MOV64_IMM(BPF_REG_0, 0),
2017 BPF_EXIT_INSN(),
2018 },
2019 .errstr_unpriv = "attempt to corrupt spilled",
2020 .result_unpriv = REJECT,
2021 .result = ACCEPT,
2022 },
2023 {
2024 "unpriv: read pointer from stack in small chunks",
2025 .insns = {
2026 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2027 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2028 BPF_MOV64_IMM(BPF_REG_0, 0),
2029 BPF_EXIT_INSN(),
2030 },
2031 .errstr = "invalid size",
2032 .result = REJECT,
2033 },
2034 {
2035 "unpriv: write pointer into ctx",
2036 .insns = {
2037 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2038 BPF_MOV64_IMM(BPF_REG_0, 0),
2039 BPF_EXIT_INSN(),
2040 },
2041 .errstr_unpriv = "R1 leaks addr",
2042 .result_unpriv = REJECT,
2043 .errstr = "invalid bpf_context access",
2044 .result = REJECT,
2045 },
2046 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002047 "unpriv: spill/fill of ctx",
2048 .insns = {
2049 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2051 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2052 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2053 BPF_MOV64_IMM(BPF_REG_0, 0),
2054 BPF_EXIT_INSN(),
2055 },
2056 .result = ACCEPT,
2057 },
2058 {
2059 "unpriv: spill/fill of ctx 2",
2060 .insns = {
2061 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2063 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2064 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2066 BPF_FUNC_get_hash_recalc),
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002067 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002068 BPF_EXIT_INSN(),
2069 },
2070 .result = ACCEPT,
2071 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2072 },
2073 {
2074 "unpriv: spill/fill of ctx 3",
2075 .insns = {
2076 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2078 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2079 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2080 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2082 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002083 BPF_EXIT_INSN(),
2084 },
2085 .result = REJECT,
2086 .errstr = "R1 type=fp expected=ctx",
2087 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2088 },
2089 {
2090 "unpriv: spill/fill of ctx 4",
2091 .insns = {
2092 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2093 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2094 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2095 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002096 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2097 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002098 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002099 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2100 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002101 BPF_EXIT_INSN(),
2102 },
2103 .result = REJECT,
2104 .errstr = "R1 type=inv expected=ctx",
2105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2106 },
2107 {
2108 "unpriv: spill/fill of different pointers stx",
2109 .insns = {
2110 BPF_MOV64_IMM(BPF_REG_3, 42),
2111 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2116 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2117 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2118 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2119 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2120 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2121 offsetof(struct __sk_buff, mark)),
2122 BPF_MOV64_IMM(BPF_REG_0, 0),
2123 BPF_EXIT_INSN(),
2124 },
2125 .result = REJECT,
2126 .errstr = "same insn cannot be used with different pointers",
2127 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2128 },
2129 {
2130 "unpriv: spill/fill of different pointers ldx",
2131 .insns = {
2132 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2135 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2137 -(__s32)offsetof(struct bpf_perf_event_data,
2138 sample_period) - 8),
2139 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2140 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2141 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2142 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2143 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2144 offsetof(struct bpf_perf_event_data,
2145 sample_period)),
2146 BPF_MOV64_IMM(BPF_REG_0, 0),
2147 BPF_EXIT_INSN(),
2148 },
2149 .result = REJECT,
2150 .errstr = "same insn cannot be used with different pointers",
2151 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2152 },
2153 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002154 "unpriv: write pointer into map elem value",
2155 .insns = {
2156 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2157 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2159 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002160 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2161 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2163 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2164 BPF_EXIT_INSN(),
2165 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002166 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002167 .errstr_unpriv = "R0 leaks addr",
2168 .result_unpriv = REJECT,
2169 .result = ACCEPT,
2170 },
2171 {
2172 "unpriv: partial copy of pointer",
2173 .insns = {
2174 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2175 BPF_MOV64_IMM(BPF_REG_0, 0),
2176 BPF_EXIT_INSN(),
2177 },
2178 .errstr_unpriv = "R10 partial copy",
2179 .result_unpriv = REJECT,
2180 .result = ACCEPT,
2181 },
2182 {
2183 "unpriv: pass pointer to tail_call",
2184 .insns = {
2185 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2186 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2188 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002189 BPF_MOV64_IMM(BPF_REG_0, 0),
2190 BPF_EXIT_INSN(),
2191 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002192 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002193 .errstr_unpriv = "R3 leaks addr into helper",
2194 .result_unpriv = REJECT,
2195 .result = ACCEPT,
2196 },
2197 {
2198 "unpriv: cmp map pointer with zero",
2199 .insns = {
2200 BPF_MOV64_IMM(BPF_REG_1, 0),
2201 BPF_LD_MAP_FD(BPF_REG_1, 0),
2202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2203 BPF_MOV64_IMM(BPF_REG_0, 0),
2204 BPF_EXIT_INSN(),
2205 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002206 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002207 .errstr_unpriv = "R1 pointer comparison",
2208 .result_unpriv = REJECT,
2209 .result = ACCEPT,
2210 },
2211 {
2212 "unpriv: write into frame pointer",
2213 .insns = {
2214 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2215 BPF_MOV64_IMM(BPF_REG_0, 0),
2216 BPF_EXIT_INSN(),
2217 },
2218 .errstr = "frame pointer is read only",
2219 .result = REJECT,
2220 },
2221 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002222 "unpriv: spill/fill frame pointer",
2223 .insns = {
2224 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2226 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2227 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2228 BPF_MOV64_IMM(BPF_REG_0, 0),
2229 BPF_EXIT_INSN(),
2230 },
2231 .errstr = "frame pointer is read only",
2232 .result = REJECT,
2233 },
2234 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002235 "unpriv: cmp of frame pointer",
2236 .insns = {
2237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
2241 .errstr_unpriv = "R10 pointer comparison",
2242 .result_unpriv = REJECT,
2243 .result = ACCEPT,
2244 },
2245 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002246 "unpriv: adding of fp",
2247 .insns = {
2248 BPF_MOV64_IMM(BPF_REG_0, 0),
2249 BPF_MOV64_IMM(BPF_REG_1, 0),
2250 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2251 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2252 BPF_EXIT_INSN(),
2253 },
Edward Creef65b1842017-08-07 15:27:12 +01002254 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002255 },
2256 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002257 "unpriv: cmp of stack pointer",
2258 .insns = {
2259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2261 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2262 BPF_MOV64_IMM(BPF_REG_0, 0),
2263 BPF_EXIT_INSN(),
2264 },
2265 .errstr_unpriv = "R2 pointer comparison",
2266 .result_unpriv = REJECT,
2267 .result = ACCEPT,
2268 },
2269 {
Yonghong Song332270f2017-04-29 22:52:42 -07002270 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002271 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002272 BPF_MOV64_IMM(BPF_REG_1, 4),
2273 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2274 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2275 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2277 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2278 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2279 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2280 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2282 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002283 BPF_MOV64_IMM(BPF_REG_0, 0),
2284 BPF_EXIT_INSN(),
2285 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002286 .result = ACCEPT,
2287 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002288 {
2289 "raw_stack: no skb_load_bytes",
2290 .insns = {
2291 BPF_MOV64_IMM(BPF_REG_2, 4),
2292 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2294 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2295 BPF_MOV64_IMM(BPF_REG_4, 8),
2296 /* Call to skb_load_bytes() omitted. */
2297 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2298 BPF_EXIT_INSN(),
2299 },
2300 .result = REJECT,
2301 .errstr = "invalid read from stack off -8+0 size 8",
2302 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2303 },
2304 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002305 "raw_stack: skb_load_bytes, negative len",
2306 .insns = {
2307 BPF_MOV64_IMM(BPF_REG_2, 4),
2308 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2310 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2311 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2313 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002314 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2315 BPF_EXIT_INSN(),
2316 },
2317 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002318 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002319 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2320 },
2321 {
2322 "raw_stack: skb_load_bytes, negative len 2",
2323 .insns = {
2324 BPF_MOV64_IMM(BPF_REG_2, 4),
2325 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2327 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2328 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2330 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002331 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2332 BPF_EXIT_INSN(),
2333 },
2334 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002335 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002336 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2337 },
2338 {
2339 "raw_stack: skb_load_bytes, zero len",
2340 .insns = {
2341 BPF_MOV64_IMM(BPF_REG_2, 4),
2342 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2344 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2345 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2347 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002348 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2349 BPF_EXIT_INSN(),
2350 },
2351 .result = REJECT,
2352 .errstr = "invalid stack type R3",
2353 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2354 },
2355 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002356 "raw_stack: skb_load_bytes, no init",
2357 .insns = {
2358 BPF_MOV64_IMM(BPF_REG_2, 4),
2359 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2361 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2362 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2364 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002365 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2366 BPF_EXIT_INSN(),
2367 },
2368 .result = ACCEPT,
2369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2370 },
2371 {
2372 "raw_stack: skb_load_bytes, init",
2373 .insns = {
2374 BPF_MOV64_IMM(BPF_REG_2, 4),
2375 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2377 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2378 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2379 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2381 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002382 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2383 BPF_EXIT_INSN(),
2384 },
2385 .result = ACCEPT,
2386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2387 },
2388 {
2389 "raw_stack: skb_load_bytes, spilled regs around bounds",
2390 .insns = {
2391 BPF_MOV64_IMM(BPF_REG_2, 4),
2392 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002394 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2395 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002396 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2397 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2399 BPF_FUNC_skb_load_bytes),
2400 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2401 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002402 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2403 offsetof(struct __sk_buff, mark)),
2404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2405 offsetof(struct __sk_buff, priority)),
2406 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2407 BPF_EXIT_INSN(),
2408 },
2409 .result = ACCEPT,
2410 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2411 },
2412 {
2413 "raw_stack: skb_load_bytes, spilled regs corruption",
2414 .insns = {
2415 BPF_MOV64_IMM(BPF_REG_2, 4),
2416 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002418 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002419 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2420 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2422 BPF_FUNC_skb_load_bytes),
2423 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002424 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2425 offsetof(struct __sk_buff, mark)),
2426 BPF_EXIT_INSN(),
2427 },
2428 .result = REJECT,
2429 .errstr = "R0 invalid mem access 'inv'",
2430 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2431 },
2432 {
2433 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2434 .insns = {
2435 BPF_MOV64_IMM(BPF_REG_2, 4),
2436 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002438 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2439 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2440 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2442 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2444 BPF_FUNC_skb_load_bytes),
2445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2446 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2447 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002448 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2449 offsetof(struct __sk_buff, mark)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2451 offsetof(struct __sk_buff, priority)),
2452 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2453 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2454 offsetof(struct __sk_buff, pkt_type)),
2455 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2456 BPF_EXIT_INSN(),
2457 },
2458 .result = REJECT,
2459 .errstr = "R3 invalid mem access 'inv'",
2460 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2461 },
2462 {
2463 "raw_stack: skb_load_bytes, spilled regs + data",
2464 .insns = {
2465 BPF_MOV64_IMM(BPF_REG_2, 4),
2466 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002468 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2469 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2470 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002471 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2472 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2474 BPF_FUNC_skb_load_bytes),
2475 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2476 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2477 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2479 offsetof(struct __sk_buff, mark)),
2480 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2481 offsetof(struct __sk_buff, priority)),
2482 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2483 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2484 BPF_EXIT_INSN(),
2485 },
2486 .result = ACCEPT,
2487 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2488 },
2489 {
2490 "raw_stack: skb_load_bytes, invalid access 1",
2491 .insns = {
2492 BPF_MOV64_IMM(BPF_REG_2, 4),
2493 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2495 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2496 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2498 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002499 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2500 BPF_EXIT_INSN(),
2501 },
2502 .result = REJECT,
2503 .errstr = "invalid stack type R3 off=-513 access_size=8",
2504 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2505 },
2506 {
2507 "raw_stack: skb_load_bytes, invalid access 2",
2508 .insns = {
2509 BPF_MOV64_IMM(BPF_REG_2, 4),
2510 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2511 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2512 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2513 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2515 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002516 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2517 BPF_EXIT_INSN(),
2518 },
2519 .result = REJECT,
2520 .errstr = "invalid stack type R3 off=-1 access_size=8",
2521 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2522 },
2523 {
2524 "raw_stack: skb_load_bytes, invalid access 3",
2525 .insns = {
2526 BPF_MOV64_IMM(BPF_REG_2, 4),
2527 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2529 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2530 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002531 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2532 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2534 BPF_EXIT_INSN(),
2535 },
2536 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002537 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002538 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2539 },
2540 {
2541 "raw_stack: skb_load_bytes, invalid access 4",
2542 .insns = {
2543 BPF_MOV64_IMM(BPF_REG_2, 4),
2544 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2546 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2547 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2549 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002550 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2551 BPF_EXIT_INSN(),
2552 },
2553 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002554 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002555 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2556 },
2557 {
2558 "raw_stack: skb_load_bytes, invalid access 5",
2559 .insns = {
2560 BPF_MOV64_IMM(BPF_REG_2, 4),
2561 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2563 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2564 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002565 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2566 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002567 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2568 BPF_EXIT_INSN(),
2569 },
2570 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002571 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2573 },
2574 {
2575 "raw_stack: skb_load_bytes, invalid access 6",
2576 .insns = {
2577 BPF_MOV64_IMM(BPF_REG_2, 4),
2578 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2579 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2580 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2581 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002582 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2583 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002584 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2585 BPF_EXIT_INSN(),
2586 },
2587 .result = REJECT,
2588 .errstr = "invalid stack type R3 off=-512 access_size=0",
2589 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2590 },
2591 {
2592 "raw_stack: skb_load_bytes, large access",
2593 .insns = {
2594 BPF_MOV64_IMM(BPF_REG_2, 4),
2595 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2597 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2598 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2600 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002601 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2602 BPF_EXIT_INSN(),
2603 },
2604 .result = ACCEPT,
2605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2606 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002607 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002608 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002609 .insns = {
2610 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2611 offsetof(struct __sk_buff, data)),
2612 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2613 offsetof(struct __sk_buff, data_end)),
2614 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2616 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2617 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2618 BPF_MOV64_IMM(BPF_REG_0, 0),
2619 BPF_EXIT_INSN(),
2620 },
2621 .result = ACCEPT,
2622 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2623 },
2624 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002625 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002626 .insns = {
2627 BPF_MOV64_IMM(BPF_REG_0, 1),
2628 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2629 offsetof(struct __sk_buff, data_end)),
2630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2631 offsetof(struct __sk_buff, data)),
2632 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2634 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2635 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2636 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2637 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2638 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2639 offsetof(struct __sk_buff, data)),
2640 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2642 offsetof(struct __sk_buff, len)),
Edward Cree1f9ab382017-08-07 15:29:11 +01002643 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2644 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002645 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2648 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2649 offsetof(struct __sk_buff, data_end)),
2650 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2651 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2652 BPF_MOV64_IMM(BPF_REG_0, 0),
2653 BPF_EXIT_INSN(),
2654 },
2655 .result = ACCEPT,
2656 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2657 },
2658 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002659 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002660 .insns = {
2661 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2662 offsetof(struct __sk_buff, data)),
2663 BPF_MOV64_IMM(BPF_REG_0, 0),
2664 BPF_EXIT_INSN(),
2665 },
2666 .errstr = "invalid bpf_context access off=76",
2667 .result = REJECT,
2668 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2669 },
2670 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002671 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002672 .insns = {
2673 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2674 offsetof(struct __sk_buff, data)),
2675 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2676 offsetof(struct __sk_buff, data_end)),
2677 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2679 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2680 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2681 BPF_MOV64_IMM(BPF_REG_0, 0),
2682 BPF_EXIT_INSN(),
2683 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002684 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002685 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2686 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002687 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002688 "direct packet access: test5 (pkt_end >= reg, good access)",
2689 .insns = {
2690 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2691 offsetof(struct __sk_buff, data)),
2692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2693 offsetof(struct __sk_buff, data_end)),
2694 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2695 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2696 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2697 BPF_MOV64_IMM(BPF_REG_0, 1),
2698 BPF_EXIT_INSN(),
2699 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2700 BPF_MOV64_IMM(BPF_REG_0, 0),
2701 BPF_EXIT_INSN(),
2702 },
2703 .result = ACCEPT,
2704 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2705 },
2706 {
2707 "direct packet access: test6 (pkt_end >= reg, bad access)",
2708 .insns = {
2709 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2710 offsetof(struct __sk_buff, data)),
2711 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2712 offsetof(struct __sk_buff, data_end)),
2713 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2715 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2716 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2717 BPF_MOV64_IMM(BPF_REG_0, 1),
2718 BPF_EXIT_INSN(),
2719 BPF_MOV64_IMM(BPF_REG_0, 0),
2720 BPF_EXIT_INSN(),
2721 },
2722 .errstr = "invalid access to packet",
2723 .result = REJECT,
2724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2725 },
2726 {
2727 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2728 .insns = {
2729 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2730 offsetof(struct __sk_buff, data)),
2731 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2732 offsetof(struct __sk_buff, data_end)),
2733 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2734 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2735 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2736 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2737 BPF_MOV64_IMM(BPF_REG_0, 1),
2738 BPF_EXIT_INSN(),
2739 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2740 BPF_MOV64_IMM(BPF_REG_0, 0),
2741 BPF_EXIT_INSN(),
2742 },
2743 .errstr = "invalid access to packet",
2744 .result = REJECT,
2745 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2746 },
2747 {
2748 "direct packet access: test8 (double test, variant 1)",
2749 .insns = {
2750 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2751 offsetof(struct __sk_buff, data)),
2752 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2753 offsetof(struct __sk_buff, data_end)),
2754 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2756 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2757 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2758 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2759 BPF_MOV64_IMM(BPF_REG_0, 1),
2760 BPF_EXIT_INSN(),
2761 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2762 BPF_MOV64_IMM(BPF_REG_0, 0),
2763 BPF_EXIT_INSN(),
2764 },
2765 .result = ACCEPT,
2766 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2767 },
2768 {
2769 "direct packet access: test9 (double test, variant 2)",
2770 .insns = {
2771 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2772 offsetof(struct __sk_buff, data)),
2773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2774 offsetof(struct __sk_buff, data_end)),
2775 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2777 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2778 BPF_MOV64_IMM(BPF_REG_0, 1),
2779 BPF_EXIT_INSN(),
2780 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2781 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2782 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2783 BPF_MOV64_IMM(BPF_REG_0, 0),
2784 BPF_EXIT_INSN(),
2785 },
2786 .result = ACCEPT,
2787 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2788 },
2789 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002790 "direct packet access: test10 (write invalid)",
2791 .insns = {
2792 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2793 offsetof(struct __sk_buff, data)),
2794 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2795 offsetof(struct __sk_buff, data_end)),
2796 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2798 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2799 BPF_MOV64_IMM(BPF_REG_0, 0),
2800 BPF_EXIT_INSN(),
2801 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2802 BPF_MOV64_IMM(BPF_REG_0, 0),
2803 BPF_EXIT_INSN(),
2804 },
2805 .errstr = "invalid access to packet",
2806 .result = REJECT,
2807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2808 },
2809 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002810 "direct packet access: test11 (shift, good access)",
2811 .insns = {
2812 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2813 offsetof(struct __sk_buff, data)),
2814 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2815 offsetof(struct __sk_buff, data_end)),
2816 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2818 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2819 BPF_MOV64_IMM(BPF_REG_3, 144),
2820 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2821 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2822 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2823 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2824 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2825 BPF_MOV64_IMM(BPF_REG_0, 1),
2826 BPF_EXIT_INSN(),
2827 BPF_MOV64_IMM(BPF_REG_0, 0),
2828 BPF_EXIT_INSN(),
2829 },
2830 .result = ACCEPT,
2831 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002832 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002833 },
2834 {
2835 "direct packet access: test12 (and, good access)",
2836 .insns = {
2837 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2838 offsetof(struct __sk_buff, data)),
2839 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2840 offsetof(struct __sk_buff, data_end)),
2841 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2843 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2844 BPF_MOV64_IMM(BPF_REG_3, 144),
2845 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2846 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2847 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2848 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2849 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2850 BPF_MOV64_IMM(BPF_REG_0, 1),
2851 BPF_EXIT_INSN(),
2852 BPF_MOV64_IMM(BPF_REG_0, 0),
2853 BPF_EXIT_INSN(),
2854 },
2855 .result = ACCEPT,
2856 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002857 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002858 },
2859 {
2860 "direct packet access: test13 (branches, good access)",
2861 .insns = {
2862 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2863 offsetof(struct __sk_buff, data)),
2864 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2865 offsetof(struct __sk_buff, data_end)),
2866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2868 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2869 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2870 offsetof(struct __sk_buff, mark)),
2871 BPF_MOV64_IMM(BPF_REG_4, 1),
2872 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2873 BPF_MOV64_IMM(BPF_REG_3, 14),
2874 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2875 BPF_MOV64_IMM(BPF_REG_3, 24),
2876 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2878 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2879 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2880 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2881 BPF_MOV64_IMM(BPF_REG_0, 1),
2882 BPF_EXIT_INSN(),
2883 BPF_MOV64_IMM(BPF_REG_0, 0),
2884 BPF_EXIT_INSN(),
2885 },
2886 .result = ACCEPT,
2887 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002888 .retval = 1,
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002889 },
2890 {
William Tu63dfef72017-02-04 08:37:29 -08002891 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2892 .insns = {
2893 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2894 offsetof(struct __sk_buff, data)),
2895 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2896 offsetof(struct __sk_buff, data_end)),
2897 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2899 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2900 BPF_MOV64_IMM(BPF_REG_5, 12),
2901 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2902 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2903 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2904 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2905 BPF_MOV64_IMM(BPF_REG_0, 1),
2906 BPF_EXIT_INSN(),
2907 BPF_MOV64_IMM(BPF_REG_0, 0),
2908 BPF_EXIT_INSN(),
2909 },
2910 .result = ACCEPT,
2911 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08002912 .retval = 1,
William Tu63dfef72017-02-04 08:37:29 -08002913 },
2914 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002915 "direct packet access: test15 (spill with xadd)",
2916 .insns = {
2917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2918 offsetof(struct __sk_buff, data)),
2919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2920 offsetof(struct __sk_buff, data_end)),
2921 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2923 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2924 BPF_MOV64_IMM(BPF_REG_5, 4096),
2925 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2927 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2928 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2929 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2930 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2931 BPF_MOV64_IMM(BPF_REG_0, 0),
2932 BPF_EXIT_INSN(),
2933 },
2934 .errstr = "R2 invalid mem access 'inv'",
2935 .result = REJECT,
2936 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2937 },
2938 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002939 "direct packet access: test16 (arith on data_end)",
2940 .insns = {
2941 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2942 offsetof(struct __sk_buff, data)),
2943 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2944 offsetof(struct __sk_buff, data_end)),
2945 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2946 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2947 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2948 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2949 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2950 BPF_MOV64_IMM(BPF_REG_0, 0),
2951 BPF_EXIT_INSN(),
2952 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08002953 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmann728a8532017-04-27 01:39:32 +02002954 .result = REJECT,
2955 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2956 },
2957 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002958 "direct packet access: test17 (pruning, alignment)",
2959 .insns = {
2960 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2961 offsetof(struct __sk_buff, data)),
2962 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2963 offsetof(struct __sk_buff, data_end)),
2964 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2965 offsetof(struct __sk_buff, mark)),
2966 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2968 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2969 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2970 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2971 BPF_MOV64_IMM(BPF_REG_0, 0),
2972 BPF_EXIT_INSN(),
2973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2974 BPF_JMP_A(-6),
2975 },
Edward Creef65b1842017-08-07 15:27:12 +01002976 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002977 .result = REJECT,
2978 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2979 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2980 },
2981 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002982 "direct packet access: test18 (imm += pkt_ptr, 1)",
2983 .insns = {
2984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2985 offsetof(struct __sk_buff, data)),
2986 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2987 offsetof(struct __sk_buff, data_end)),
2988 BPF_MOV64_IMM(BPF_REG_0, 8),
2989 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2990 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2991 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2992 BPF_MOV64_IMM(BPF_REG_0, 0),
2993 BPF_EXIT_INSN(),
2994 },
2995 .result = ACCEPT,
2996 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2997 },
2998 {
2999 "direct packet access: test19 (imm += pkt_ptr, 2)",
3000 .insns = {
3001 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3002 offsetof(struct __sk_buff, data)),
3003 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3004 offsetof(struct __sk_buff, data_end)),
3005 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3007 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3008 BPF_MOV64_IMM(BPF_REG_4, 4),
3009 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3010 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3011 BPF_MOV64_IMM(BPF_REG_0, 0),
3012 BPF_EXIT_INSN(),
3013 },
3014 .result = ACCEPT,
3015 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3016 },
3017 {
3018 "direct packet access: test20 (x += pkt_ptr, 1)",
3019 .insns = {
3020 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3021 offsetof(struct __sk_buff, data)),
3022 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3023 offsetof(struct __sk_buff, data_end)),
3024 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3025 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3026 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003027 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003028 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3029 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3030 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003032 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3033 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3034 BPF_MOV64_IMM(BPF_REG_0, 0),
3035 BPF_EXIT_INSN(),
3036 },
3037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3038 .result = ACCEPT,
3039 },
3040 {
3041 "direct packet access: test21 (x += pkt_ptr, 2)",
3042 .insns = {
3043 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3044 offsetof(struct __sk_buff, data)),
3045 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3046 offsetof(struct __sk_buff, data_end)),
3047 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3049 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3050 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3051 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3052 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003053 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003054 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3055 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003057 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3058 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3059 BPF_MOV64_IMM(BPF_REG_0, 0),
3060 BPF_EXIT_INSN(),
3061 },
3062 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3063 .result = ACCEPT,
3064 },
3065 {
3066 "direct packet access: test22 (x += pkt_ptr, 3)",
3067 .insns = {
3068 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3069 offsetof(struct __sk_buff, data)),
3070 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3071 offsetof(struct __sk_buff, data_end)),
3072 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3074 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3075 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3076 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3077 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3078 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3079 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3080 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3081 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003082 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003083 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3084 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3086 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3087 BPF_MOV64_IMM(BPF_REG_2, 1),
3088 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3089 BPF_MOV64_IMM(BPF_REG_0, 0),
3090 BPF_EXIT_INSN(),
3091 },
3092 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3093 .result = ACCEPT,
3094 },
3095 {
3096 "direct packet access: test23 (x += pkt_ptr, 4)",
3097 .insns = {
3098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3099 offsetof(struct __sk_buff, data)),
3100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3101 offsetof(struct __sk_buff, data_end)),
3102 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3103 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3104 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3105 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3106 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3107 BPF_MOV64_IMM(BPF_REG_0, 31),
3108 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3109 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3110 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3112 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3113 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3114 BPF_MOV64_IMM(BPF_REG_0, 0),
3115 BPF_EXIT_INSN(),
3116 },
3117 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3118 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003119 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003120 },
3121 {
3122 "direct packet access: test24 (x += pkt_ptr, 5)",
3123 .insns = {
3124 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3125 offsetof(struct __sk_buff, data)),
3126 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3127 offsetof(struct __sk_buff, data_end)),
3128 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3129 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3130 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3131 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3132 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3133 BPF_MOV64_IMM(BPF_REG_0, 64),
3134 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3135 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3136 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003138 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3139 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3140 BPF_MOV64_IMM(BPF_REG_0, 0),
3141 BPF_EXIT_INSN(),
3142 },
3143 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3144 .result = ACCEPT,
3145 },
3146 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003147 "direct packet access: test25 (marking on <, good access)",
3148 .insns = {
3149 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3150 offsetof(struct __sk_buff, data)),
3151 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3152 offsetof(struct __sk_buff, data_end)),
3153 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3155 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3156 BPF_MOV64_IMM(BPF_REG_0, 0),
3157 BPF_EXIT_INSN(),
3158 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3159 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3160 },
3161 .result = ACCEPT,
3162 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3163 },
3164 {
3165 "direct packet access: test26 (marking on <, bad access)",
3166 .insns = {
3167 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3168 offsetof(struct __sk_buff, data)),
3169 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3170 offsetof(struct __sk_buff, data_end)),
3171 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3173 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3174 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3175 BPF_MOV64_IMM(BPF_REG_0, 0),
3176 BPF_EXIT_INSN(),
3177 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3178 },
3179 .result = REJECT,
3180 .errstr = "invalid access to packet",
3181 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3182 },
3183 {
3184 "direct packet access: test27 (marking on <=, good access)",
3185 .insns = {
3186 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3187 offsetof(struct __sk_buff, data)),
3188 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3189 offsetof(struct __sk_buff, data_end)),
3190 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3192 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3193 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3194 BPF_MOV64_IMM(BPF_REG_0, 1),
3195 BPF_EXIT_INSN(),
3196 },
3197 .result = ACCEPT,
3198 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08003199 .retval = 1,
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003200 },
3201 {
3202 "direct packet access: test28 (marking on <=, bad access)",
3203 .insns = {
3204 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3205 offsetof(struct __sk_buff, data)),
3206 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3207 offsetof(struct __sk_buff, data_end)),
3208 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3210 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3211 BPF_MOV64_IMM(BPF_REG_0, 1),
3212 BPF_EXIT_INSN(),
3213 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3214 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3215 },
3216 .result = REJECT,
3217 .errstr = "invalid access to packet",
3218 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3219 },
3220 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003221 "helper access to packet: test1, valid packet_ptr range",
3222 .insns = {
3223 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3224 offsetof(struct xdp_md, data)),
3225 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3226 offsetof(struct xdp_md, data_end)),
3227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3229 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3230 BPF_LD_MAP_FD(BPF_REG_1, 0),
3231 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3232 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003233 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3234 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003235 BPF_MOV64_IMM(BPF_REG_0, 0),
3236 BPF_EXIT_INSN(),
3237 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003238 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003239 .result_unpriv = ACCEPT,
3240 .result = ACCEPT,
3241 .prog_type = BPF_PROG_TYPE_XDP,
3242 },
3243 {
3244 "helper access to packet: test2, unchecked packet_ptr",
3245 .insns = {
3246 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3247 offsetof(struct xdp_md, data)),
3248 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003249 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3250 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003251 BPF_MOV64_IMM(BPF_REG_0, 0),
3252 BPF_EXIT_INSN(),
3253 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003254 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003255 .result = REJECT,
3256 .errstr = "invalid access to packet",
3257 .prog_type = BPF_PROG_TYPE_XDP,
3258 },
3259 {
3260 "helper access to packet: test3, variable add",
3261 .insns = {
3262 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3263 offsetof(struct xdp_md, data)),
3264 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3265 offsetof(struct xdp_md, data_end)),
3266 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3268 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3269 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3270 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3271 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3272 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3274 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3275 BPF_LD_MAP_FD(BPF_REG_1, 0),
3276 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3278 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003279 BPF_MOV64_IMM(BPF_REG_0, 0),
3280 BPF_EXIT_INSN(),
3281 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003282 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003283 .result = ACCEPT,
3284 .prog_type = BPF_PROG_TYPE_XDP,
3285 },
3286 {
3287 "helper access to packet: test4, packet_ptr with bad range",
3288 .insns = {
3289 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3290 offsetof(struct xdp_md, data)),
3291 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3292 offsetof(struct xdp_md, data_end)),
3293 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3295 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3296 BPF_MOV64_IMM(BPF_REG_0, 0),
3297 BPF_EXIT_INSN(),
3298 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003299 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3300 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003301 BPF_MOV64_IMM(BPF_REG_0, 0),
3302 BPF_EXIT_INSN(),
3303 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003304 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003305 .result = REJECT,
3306 .errstr = "invalid access to packet",
3307 .prog_type = BPF_PROG_TYPE_XDP,
3308 },
3309 {
3310 "helper access to packet: test5, packet_ptr with too short range",
3311 .insns = {
3312 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3313 offsetof(struct xdp_md, data)),
3314 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3315 offsetof(struct xdp_md, data_end)),
3316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3317 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3319 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3320 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3322 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003323 BPF_MOV64_IMM(BPF_REG_0, 0),
3324 BPF_EXIT_INSN(),
3325 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003326 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003327 .result = REJECT,
3328 .errstr = "invalid access to packet",
3329 .prog_type = BPF_PROG_TYPE_XDP,
3330 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003331 {
3332 "helper access to packet: test6, cls valid packet_ptr range",
3333 .insns = {
3334 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3335 offsetof(struct __sk_buff, data)),
3336 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3337 offsetof(struct __sk_buff, data_end)),
3338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3340 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3341 BPF_LD_MAP_FD(BPF_REG_1, 0),
3342 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3343 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3345 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003346 BPF_MOV64_IMM(BPF_REG_0, 0),
3347 BPF_EXIT_INSN(),
3348 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003349 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003350 .result = ACCEPT,
3351 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3352 },
3353 {
3354 "helper access to packet: test7, cls unchecked packet_ptr",
3355 .insns = {
3356 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3357 offsetof(struct __sk_buff, data)),
3358 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3360 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003361 BPF_MOV64_IMM(BPF_REG_0, 0),
3362 BPF_EXIT_INSN(),
3363 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003364 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003365 .result = REJECT,
3366 .errstr = "invalid access to packet",
3367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3368 },
3369 {
3370 "helper access to packet: test8, cls variable add",
3371 .insns = {
3372 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3373 offsetof(struct __sk_buff, data)),
3374 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3375 offsetof(struct __sk_buff, data_end)),
3376 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3378 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3379 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3380 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3381 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3382 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3384 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3385 BPF_LD_MAP_FD(BPF_REG_1, 0),
3386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3388 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003389 BPF_MOV64_IMM(BPF_REG_0, 0),
3390 BPF_EXIT_INSN(),
3391 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003392 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003393 .result = ACCEPT,
3394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3395 },
3396 {
3397 "helper access to packet: test9, cls packet_ptr with bad range",
3398 .insns = {
3399 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3400 offsetof(struct __sk_buff, data)),
3401 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3402 offsetof(struct __sk_buff, data_end)),
3403 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3404 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3405 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3406 BPF_MOV64_IMM(BPF_REG_0, 0),
3407 BPF_EXIT_INSN(),
3408 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3410 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003411 BPF_MOV64_IMM(BPF_REG_0, 0),
3412 BPF_EXIT_INSN(),
3413 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003414 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003415 .result = REJECT,
3416 .errstr = "invalid access to packet",
3417 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3418 },
3419 {
3420 "helper access to packet: test10, cls packet_ptr with too short range",
3421 .insns = {
3422 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3423 offsetof(struct __sk_buff, data)),
3424 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3425 offsetof(struct __sk_buff, data_end)),
3426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3427 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3429 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3430 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3432 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003433 BPF_MOV64_IMM(BPF_REG_0, 0),
3434 BPF_EXIT_INSN(),
3435 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003436 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003437 .result = REJECT,
3438 .errstr = "invalid access to packet",
3439 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3440 },
3441 {
3442 "helper access to packet: test11, cls unsuitable helper 1",
3443 .insns = {
3444 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3445 offsetof(struct __sk_buff, data)),
3446 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3447 offsetof(struct __sk_buff, data_end)),
3448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3449 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3451 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3452 BPF_MOV64_IMM(BPF_REG_2, 0),
3453 BPF_MOV64_IMM(BPF_REG_4, 42),
3454 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3456 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003457 BPF_MOV64_IMM(BPF_REG_0, 0),
3458 BPF_EXIT_INSN(),
3459 },
3460 .result = REJECT,
3461 .errstr = "helper access to the packet",
3462 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3463 },
3464 {
3465 "helper access to packet: test12, cls unsuitable helper 2",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3474 BPF_MOV64_IMM(BPF_REG_2, 0),
3475 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003476 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3477 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003478 BPF_MOV64_IMM(BPF_REG_0, 0),
3479 BPF_EXIT_INSN(),
3480 },
3481 .result = REJECT,
3482 .errstr = "helper access to the packet",
3483 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3484 },
3485 {
3486 "helper access to packet: test13, cls helper ok",
3487 .insns = {
3488 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3489 offsetof(struct __sk_buff, data)),
3490 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3491 offsetof(struct __sk_buff, data_end)),
3492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3495 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3497 BPF_MOV64_IMM(BPF_REG_2, 4),
3498 BPF_MOV64_IMM(BPF_REG_3, 0),
3499 BPF_MOV64_IMM(BPF_REG_4, 0),
3500 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003501 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3502 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003503 BPF_MOV64_IMM(BPF_REG_0, 0),
3504 BPF_EXIT_INSN(),
3505 },
3506 .result = ACCEPT,
3507 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3508 },
3509 {
Edward Creef65b1842017-08-07 15:27:12 +01003510 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003511 .insns = {
3512 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3513 offsetof(struct __sk_buff, data)),
3514 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3515 offsetof(struct __sk_buff, data_end)),
3516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3519 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3520 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3521 BPF_MOV64_IMM(BPF_REG_2, 4),
3522 BPF_MOV64_IMM(BPF_REG_3, 0),
3523 BPF_MOV64_IMM(BPF_REG_4, 0),
3524 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3526 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003527 BPF_MOV64_IMM(BPF_REG_0, 0),
3528 BPF_EXIT_INSN(),
3529 },
Edward Creef65b1842017-08-07 15:27:12 +01003530 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3532 },
3533 {
Edward Creef65b1842017-08-07 15:27:12 +01003534 "helper access to packet: test15, cls helper fail sub",
3535 .insns = {
3536 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3537 offsetof(struct __sk_buff, data)),
3538 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3539 offsetof(struct __sk_buff, data_end)),
3540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3541 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3543 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3544 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3545 BPF_MOV64_IMM(BPF_REG_2, 4),
3546 BPF_MOV64_IMM(BPF_REG_3, 0),
3547 BPF_MOV64_IMM(BPF_REG_4, 0),
3548 BPF_MOV64_IMM(BPF_REG_5, 0),
3549 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3550 BPF_FUNC_csum_diff),
3551 BPF_MOV64_IMM(BPF_REG_0, 0),
3552 BPF_EXIT_INSN(),
3553 },
3554 .result = REJECT,
3555 .errstr = "invalid access to packet",
3556 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3557 },
3558 {
3559 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003560 .insns = {
3561 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3562 offsetof(struct __sk_buff, data)),
3563 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3564 offsetof(struct __sk_buff, data_end)),
3565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3566 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3568 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3569 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3570 BPF_MOV64_IMM(BPF_REG_2, 8),
3571 BPF_MOV64_IMM(BPF_REG_3, 0),
3572 BPF_MOV64_IMM(BPF_REG_4, 0),
3573 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003574 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3575 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003576 BPF_MOV64_IMM(BPF_REG_0, 0),
3577 BPF_EXIT_INSN(),
3578 },
3579 .result = REJECT,
3580 .errstr = "invalid access to packet",
3581 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3582 },
3583 {
Edward Creef65b1842017-08-07 15:27:12 +01003584 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003585 .insns = {
3586 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3587 offsetof(struct __sk_buff, data)),
3588 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3589 offsetof(struct __sk_buff, data_end)),
3590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3591 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3593 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3594 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3595 BPF_MOV64_IMM(BPF_REG_2, -9),
3596 BPF_MOV64_IMM(BPF_REG_3, 0),
3597 BPF_MOV64_IMM(BPF_REG_4, 0),
3598 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3600 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003601 BPF_MOV64_IMM(BPF_REG_0, 0),
3602 BPF_EXIT_INSN(),
3603 },
3604 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003605 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3607 },
3608 {
Edward Creef65b1842017-08-07 15:27:12 +01003609 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003610 .insns = {
3611 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3612 offsetof(struct __sk_buff, data)),
3613 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3614 offsetof(struct __sk_buff, data_end)),
3615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3616 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3618 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3620 BPF_MOV64_IMM(BPF_REG_2, ~0),
3621 BPF_MOV64_IMM(BPF_REG_3, 0),
3622 BPF_MOV64_IMM(BPF_REG_4, 0),
3623 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003624 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3625 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003626 BPF_MOV64_IMM(BPF_REG_0, 0),
3627 BPF_EXIT_INSN(),
3628 },
3629 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003630 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003631 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3632 },
3633 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08003634 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003635 .insns = {
3636 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3637 offsetof(struct __sk_buff, data)),
3638 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3639 offsetof(struct __sk_buff, data_end)),
3640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3643 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3645 BPF_MOV64_IMM(BPF_REG_2, 0),
3646 BPF_MOV64_IMM(BPF_REG_3, 0),
3647 BPF_MOV64_IMM(BPF_REG_4, 0),
3648 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3650 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003651 BPF_MOV64_IMM(BPF_REG_0, 0),
3652 BPF_EXIT_INSN(),
3653 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08003654 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003655 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3656 },
3657 {
Edward Creef65b1842017-08-07 15:27:12 +01003658 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003659 .insns = {
3660 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3661 offsetof(struct __sk_buff, data)),
3662 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3663 offsetof(struct __sk_buff, data_end)),
3664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3665 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3666 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3667 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3668 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3669 BPF_MOV64_IMM(BPF_REG_2, 4),
3670 BPF_MOV64_IMM(BPF_REG_3, 0),
3671 BPF_MOV64_IMM(BPF_REG_4, 0),
3672 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003673 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3674 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003675 BPF_MOV64_IMM(BPF_REG_0, 0),
3676 BPF_EXIT_INSN(),
3677 },
3678 .result = REJECT,
3679 .errstr = "R1 type=pkt_end expected=fp",
3680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3681 },
3682 {
Edward Creef65b1842017-08-07 15:27:12 +01003683 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003684 .insns = {
3685 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3686 offsetof(struct __sk_buff, data)),
3687 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3688 offsetof(struct __sk_buff, data_end)),
3689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3691 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3692 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3693 BPF_MOV64_IMM(BPF_REG_2, 4),
3694 BPF_MOV64_IMM(BPF_REG_3, 0),
3695 BPF_MOV64_IMM(BPF_REG_4, 0),
3696 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003697 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3698 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003699 BPF_MOV64_IMM(BPF_REG_0, 0),
3700 BPF_EXIT_INSN(),
3701 },
3702 .result = REJECT,
3703 .errstr = "invalid access to packet",
3704 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3705 },
Josef Bacik48461132016-09-28 10:54:32 -04003706 {
3707 "valid map access into an array with a constant",
3708 .insns = {
3709 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3712 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3714 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003716 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3717 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003718 BPF_EXIT_INSN(),
3719 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003720 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003721 .errstr_unpriv = "R0 leaks addr",
3722 .result_unpriv = REJECT,
3723 .result = ACCEPT,
3724 },
3725 {
3726 "valid map access into an array with a register",
3727 .insns = {
3728 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3731 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003732 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3733 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003734 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3735 BPF_MOV64_IMM(BPF_REG_1, 4),
3736 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3737 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003738 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3739 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003740 BPF_EXIT_INSN(),
3741 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003742 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003743 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003744 .result_unpriv = REJECT,
3745 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003746 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003747 },
3748 {
3749 "valid map access into an array with a variable",
3750 .insns = {
3751 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3752 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3754 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3756 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3758 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3759 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3760 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3761 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003762 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3763 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003764 BPF_EXIT_INSN(),
3765 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003766 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003767 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003768 .result_unpriv = REJECT,
3769 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003770 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003771 },
3772 {
3773 "valid map access into an array with a signed variable",
3774 .insns = {
3775 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3776 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3778 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003779 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3780 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3782 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3783 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3784 BPF_MOV32_IMM(BPF_REG_1, 0),
3785 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3786 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3787 BPF_MOV32_IMM(BPF_REG_1, 0),
3788 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3789 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003790 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3791 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003792 BPF_EXIT_INSN(),
3793 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003794 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003795 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003796 .result_unpriv = REJECT,
3797 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003798 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003799 },
3800 {
3801 "invalid map access into an array with a constant",
3802 .insns = {
3803 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3806 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3808 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3810 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3811 offsetof(struct test_val, foo)),
3812 BPF_EXIT_INSN(),
3813 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003814 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003815 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3816 .result = REJECT,
3817 },
3818 {
3819 "invalid map access into an array with a register",
3820 .insns = {
3821 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3823 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3824 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003825 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3826 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3828 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3829 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3830 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003831 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3832 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003833 BPF_EXIT_INSN(),
3834 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003835 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003836 .errstr = "R0 min value is outside of the array range",
3837 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003838 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003839 },
3840 {
3841 "invalid map access into an array with a variable",
3842 .insns = {
3843 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3846 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003847 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3848 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3850 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3851 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3852 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003853 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3854 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003855 BPF_EXIT_INSN(),
3856 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003857 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003858 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04003859 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003860 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003861 },
3862 {
3863 "invalid map access into an array with no floor check",
3864 .insns = {
3865 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3868 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3870 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003871 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01003872 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04003873 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3874 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3875 BPF_MOV32_IMM(BPF_REG_1, 0),
3876 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3877 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003878 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3879 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003880 BPF_EXIT_INSN(),
3881 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003882 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003883 .errstr_unpriv = "R0 leaks addr",
3884 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003885 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003886 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003887 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003888 },
3889 {
3890 "invalid map access into an array with a invalid max check",
3891 .insns = {
3892 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3893 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3894 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3895 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3897 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003898 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3899 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3900 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3901 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3902 BPF_MOV32_IMM(BPF_REG_1, 0),
3903 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3904 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003905 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3906 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003907 BPF_EXIT_INSN(),
3908 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003909 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003910 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003911 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003912 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003913 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003914 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003915 },
3916 {
3917 "invalid map access into an array with a invalid max check",
3918 .insns = {
3919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3922 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003923 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3924 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3926 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3927 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3928 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3930 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3932 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003933 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3936 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003937 BPF_EXIT_INSN(),
3938 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003939 .fixup_map2 = { 3, 11 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003940 .errstr = "R0 pointer += pointer",
Josef Bacik48461132016-09-28 10:54:32 -04003941 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003942 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003943 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003944 {
3945 "multiple registers share map_lookup_elem result",
3946 .insns = {
3947 BPF_MOV64_IMM(BPF_REG_1, 10),
3948 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3951 BPF_LD_MAP_FD(BPF_REG_1, 0),
3952 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3953 BPF_FUNC_map_lookup_elem),
3954 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3955 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3956 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3957 BPF_EXIT_INSN(),
3958 },
3959 .fixup_map1 = { 4 },
3960 .result = ACCEPT,
3961 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3962 },
3963 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003964 "alu ops on ptr_to_map_value_or_null, 1",
3965 .insns = {
3966 BPF_MOV64_IMM(BPF_REG_1, 10),
3967 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3970 BPF_LD_MAP_FD(BPF_REG_1, 0),
3971 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3972 BPF_FUNC_map_lookup_elem),
3973 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3976 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3977 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3978 BPF_EXIT_INSN(),
3979 },
3980 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08003981 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003982 .result = REJECT,
3983 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3984 },
3985 {
3986 "alu ops on ptr_to_map_value_or_null, 2",
3987 .insns = {
3988 BPF_MOV64_IMM(BPF_REG_1, 10),
3989 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3990 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3992 BPF_LD_MAP_FD(BPF_REG_1, 0),
3993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3994 BPF_FUNC_map_lookup_elem),
3995 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3996 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3997 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3998 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3999 BPF_EXIT_INSN(),
4000 },
4001 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004002 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004003 .result = REJECT,
4004 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4005 },
4006 {
4007 "alu ops on ptr_to_map_value_or_null, 3",
4008 .insns = {
4009 BPF_MOV64_IMM(BPF_REG_1, 10),
4010 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4011 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4012 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4013 BPF_LD_MAP_FD(BPF_REG_1, 0),
4014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4015 BPF_FUNC_map_lookup_elem),
4016 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4017 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
4018 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4019 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4020 BPF_EXIT_INSN(),
4021 },
4022 .fixup_map1 = { 4 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08004023 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02004024 .result = REJECT,
4025 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4026 },
4027 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02004028 "invalid memory access with multiple map_lookup_elem calls",
4029 .insns = {
4030 BPF_MOV64_IMM(BPF_REG_1, 10),
4031 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4034 BPF_LD_MAP_FD(BPF_REG_1, 0),
4035 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4036 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4038 BPF_FUNC_map_lookup_elem),
4039 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4043 BPF_FUNC_map_lookup_elem),
4044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4045 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4046 BPF_EXIT_INSN(),
4047 },
4048 .fixup_map1 = { 4 },
4049 .result = REJECT,
4050 .errstr = "R4 !read_ok",
4051 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4052 },
4053 {
4054 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4055 .insns = {
4056 BPF_MOV64_IMM(BPF_REG_1, 10),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4060 BPF_LD_MAP_FD(BPF_REG_1, 0),
4061 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4062 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4063 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4064 BPF_FUNC_map_lookup_elem),
4065 BPF_MOV64_IMM(BPF_REG_2, 10),
4066 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4067 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4068 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4069 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4070 BPF_FUNC_map_lookup_elem),
4071 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4072 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4073 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4074 BPF_EXIT_INSN(),
4075 },
4076 .fixup_map1 = { 4 },
4077 .result = ACCEPT,
4078 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4079 },
Josef Bacike9548902016-11-29 12:35:19 -05004080 {
4081 "invalid map access from else condition",
4082 .insns = {
4083 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4084 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4086 BPF_LD_MAP_FD(BPF_REG_1, 0),
4087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4089 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4090 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4092 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4093 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4094 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4095 BPF_EXIT_INSN(),
4096 },
4097 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004098 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004099 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004100 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004101 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004102 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004103 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004104 {
4105 "constant register |= constant should keep constant type",
4106 .insns = {
4107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4109 BPF_MOV64_IMM(BPF_REG_2, 34),
4110 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4111 BPF_MOV64_IMM(BPF_REG_3, 0),
4112 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4113 BPF_EXIT_INSN(),
4114 },
4115 .result = ACCEPT,
4116 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4117 },
4118 {
4119 "constant register |= constant should not bypass stack boundary checks",
4120 .insns = {
4121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4123 BPF_MOV64_IMM(BPF_REG_2, 34),
4124 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4125 BPF_MOV64_IMM(BPF_REG_3, 0),
4126 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4127 BPF_EXIT_INSN(),
4128 },
4129 .errstr = "invalid stack type R1 off=-48 access_size=58",
4130 .result = REJECT,
4131 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4132 },
4133 {
4134 "constant register |= constant register should keep constant type",
4135 .insns = {
4136 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4138 BPF_MOV64_IMM(BPF_REG_2, 34),
4139 BPF_MOV64_IMM(BPF_REG_4, 13),
4140 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4141 BPF_MOV64_IMM(BPF_REG_3, 0),
4142 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4143 BPF_EXIT_INSN(),
4144 },
4145 .result = ACCEPT,
4146 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4147 },
4148 {
4149 "constant register |= constant register should not bypass stack boundary checks",
4150 .insns = {
4151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4153 BPF_MOV64_IMM(BPF_REG_2, 34),
4154 BPF_MOV64_IMM(BPF_REG_4, 24),
4155 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4156 BPF_MOV64_IMM(BPF_REG_3, 0),
4157 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4158 BPF_EXIT_INSN(),
4159 },
4160 .errstr = "invalid stack type R1 off=-48 access_size=58",
4161 .result = REJECT,
4162 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4163 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004164 {
4165 "invalid direct packet write for LWT_IN",
4166 .insns = {
4167 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4168 offsetof(struct __sk_buff, data)),
4169 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4170 offsetof(struct __sk_buff, data_end)),
4171 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4173 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4174 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4175 BPF_MOV64_IMM(BPF_REG_0, 0),
4176 BPF_EXIT_INSN(),
4177 },
4178 .errstr = "cannot write into packet",
4179 .result = REJECT,
4180 .prog_type = BPF_PROG_TYPE_LWT_IN,
4181 },
4182 {
4183 "invalid direct packet write for LWT_OUT",
4184 .insns = {
4185 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4186 offsetof(struct __sk_buff, data)),
4187 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4188 offsetof(struct __sk_buff, data_end)),
4189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4191 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4192 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4193 BPF_MOV64_IMM(BPF_REG_0, 0),
4194 BPF_EXIT_INSN(),
4195 },
4196 .errstr = "cannot write into packet",
4197 .result = REJECT,
4198 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4199 },
4200 {
4201 "direct packet write for LWT_XMIT",
4202 .insns = {
4203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4204 offsetof(struct __sk_buff, data)),
4205 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4206 offsetof(struct __sk_buff, data_end)),
4207 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4209 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4210 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4211 BPF_MOV64_IMM(BPF_REG_0, 0),
4212 BPF_EXIT_INSN(),
4213 },
4214 .result = ACCEPT,
4215 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4216 },
4217 {
4218 "direct packet read for LWT_IN",
4219 .insns = {
4220 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4221 offsetof(struct __sk_buff, data)),
4222 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4223 offsetof(struct __sk_buff, data_end)),
4224 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4226 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4227 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4228 BPF_MOV64_IMM(BPF_REG_0, 0),
4229 BPF_EXIT_INSN(),
4230 },
4231 .result = ACCEPT,
4232 .prog_type = BPF_PROG_TYPE_LWT_IN,
4233 },
4234 {
4235 "direct packet read for LWT_OUT",
4236 .insns = {
4237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4238 offsetof(struct __sk_buff, data)),
4239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4240 offsetof(struct __sk_buff, data_end)),
4241 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4243 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4244 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4245 BPF_MOV64_IMM(BPF_REG_0, 0),
4246 BPF_EXIT_INSN(),
4247 },
4248 .result = ACCEPT,
4249 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4250 },
4251 {
4252 "direct packet read for LWT_XMIT",
4253 .insns = {
4254 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4255 offsetof(struct __sk_buff, data)),
4256 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4257 offsetof(struct __sk_buff, data_end)),
4258 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4260 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4261 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4262 BPF_MOV64_IMM(BPF_REG_0, 0),
4263 BPF_EXIT_INSN(),
4264 },
4265 .result = ACCEPT,
4266 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4267 },
4268 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004269 "overlapping checks for direct packet access",
4270 .insns = {
4271 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4272 offsetof(struct __sk_buff, data)),
4273 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4274 offsetof(struct __sk_buff, data_end)),
4275 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4277 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4278 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4280 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4281 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4282 BPF_MOV64_IMM(BPF_REG_0, 0),
4283 BPF_EXIT_INSN(),
4284 },
4285 .result = ACCEPT,
4286 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4287 },
4288 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004289 "invalid access of tc_classid for LWT_IN",
4290 .insns = {
4291 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4292 offsetof(struct __sk_buff, tc_classid)),
4293 BPF_EXIT_INSN(),
4294 },
4295 .result = REJECT,
4296 .errstr = "invalid bpf_context access",
4297 },
4298 {
4299 "invalid access of tc_classid for LWT_OUT",
4300 .insns = {
4301 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4302 offsetof(struct __sk_buff, tc_classid)),
4303 BPF_EXIT_INSN(),
4304 },
4305 .result = REJECT,
4306 .errstr = "invalid bpf_context access",
4307 },
4308 {
4309 "invalid access of tc_classid for LWT_XMIT",
4310 .insns = {
4311 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4312 offsetof(struct __sk_buff, tc_classid)),
4313 BPF_EXIT_INSN(),
4314 },
4315 .result = REJECT,
4316 .errstr = "invalid bpf_context access",
4317 },
Gianluca Borello57225692017-01-09 10:19:47 -08004318 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004319 "leak pointer into ctx 1",
4320 .insns = {
4321 BPF_MOV64_IMM(BPF_REG_0, 0),
4322 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4323 offsetof(struct __sk_buff, cb[0])),
4324 BPF_LD_MAP_FD(BPF_REG_2, 0),
4325 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4326 offsetof(struct __sk_buff, cb[0])),
4327 BPF_EXIT_INSN(),
4328 },
4329 .fixup_map1 = { 2 },
4330 .errstr_unpriv = "R2 leaks addr into mem",
4331 .result_unpriv = REJECT,
4332 .result = ACCEPT,
4333 },
4334 {
4335 "leak pointer into ctx 2",
4336 .insns = {
4337 BPF_MOV64_IMM(BPF_REG_0, 0),
4338 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4339 offsetof(struct __sk_buff, cb[0])),
4340 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4341 offsetof(struct __sk_buff, cb[0])),
4342 BPF_EXIT_INSN(),
4343 },
4344 .errstr_unpriv = "R10 leaks addr into mem",
4345 .result_unpriv = REJECT,
4346 .result = ACCEPT,
4347 },
4348 {
4349 "leak pointer into ctx 3",
4350 .insns = {
4351 BPF_MOV64_IMM(BPF_REG_0, 0),
4352 BPF_LD_MAP_FD(BPF_REG_2, 0),
4353 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4354 offsetof(struct __sk_buff, cb[0])),
4355 BPF_EXIT_INSN(),
4356 },
4357 .fixup_map1 = { 1 },
4358 .errstr_unpriv = "R2 leaks addr into ctx",
4359 .result_unpriv = REJECT,
4360 .result = ACCEPT,
4361 },
4362 {
4363 "leak pointer into map val",
4364 .insns = {
4365 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4366 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4367 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4369 BPF_LD_MAP_FD(BPF_REG_1, 0),
4370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4371 BPF_FUNC_map_lookup_elem),
4372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4373 BPF_MOV64_IMM(BPF_REG_3, 0),
4374 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4375 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4376 BPF_MOV64_IMM(BPF_REG_0, 0),
4377 BPF_EXIT_INSN(),
4378 },
4379 .fixup_map1 = { 4 },
4380 .errstr_unpriv = "R6 leaks addr into mem",
4381 .result_unpriv = REJECT,
4382 .result = ACCEPT,
4383 },
4384 {
Gianluca Borello57225692017-01-09 10:19:47 -08004385 "helper access to map: full range",
4386 .insns = {
4387 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4389 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4390 BPF_LD_MAP_FD(BPF_REG_1, 0),
4391 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4392 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4393 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4394 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4395 BPF_MOV64_IMM(BPF_REG_3, 0),
4396 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4397 BPF_EXIT_INSN(),
4398 },
4399 .fixup_map2 = { 3 },
4400 .result = ACCEPT,
4401 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4402 },
4403 {
4404 "helper access to map: partial range",
4405 .insns = {
4406 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4407 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4408 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4409 BPF_LD_MAP_FD(BPF_REG_1, 0),
4410 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4411 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4413 BPF_MOV64_IMM(BPF_REG_2, 8),
4414 BPF_MOV64_IMM(BPF_REG_3, 0),
4415 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4416 BPF_EXIT_INSN(),
4417 },
4418 .fixup_map2 = { 3 },
4419 .result = ACCEPT,
4420 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4421 },
4422 {
4423 "helper access to map: empty range",
4424 .insns = {
4425 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4427 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4428 BPF_LD_MAP_FD(BPF_REG_1, 0),
4429 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004430 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4431 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4432 BPF_MOV64_IMM(BPF_REG_2, 0),
4433 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004434 BPF_EXIT_INSN(),
4435 },
4436 .fixup_map2 = { 3 },
4437 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4438 .result = REJECT,
4439 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4440 },
4441 {
4442 "helper access to map: out-of-bound range",
4443 .insns = {
4444 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4446 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4447 BPF_LD_MAP_FD(BPF_REG_1, 0),
4448 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4449 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4450 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4451 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4452 BPF_MOV64_IMM(BPF_REG_3, 0),
4453 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4454 BPF_EXIT_INSN(),
4455 },
4456 .fixup_map2 = { 3 },
4457 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4458 .result = REJECT,
4459 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4460 },
4461 {
4462 "helper access to map: negative range",
4463 .insns = {
4464 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4466 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4467 BPF_LD_MAP_FD(BPF_REG_1, 0),
4468 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4469 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4470 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4471 BPF_MOV64_IMM(BPF_REG_2, -8),
4472 BPF_MOV64_IMM(BPF_REG_3, 0),
4473 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4474 BPF_EXIT_INSN(),
4475 },
4476 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004477 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004478 .result = REJECT,
4479 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4480 },
4481 {
4482 "helper access to adjusted map (via const imm): full range",
4483 .insns = {
4484 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4486 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4487 BPF_LD_MAP_FD(BPF_REG_1, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4492 offsetof(struct test_val, foo)),
4493 BPF_MOV64_IMM(BPF_REG_2,
4494 sizeof(struct test_val) -
4495 offsetof(struct test_val, foo)),
4496 BPF_MOV64_IMM(BPF_REG_3, 0),
4497 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4498 BPF_EXIT_INSN(),
4499 },
4500 .fixup_map2 = { 3 },
4501 .result = ACCEPT,
4502 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4503 },
4504 {
4505 "helper access to adjusted map (via const imm): partial range",
4506 .insns = {
4507 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4509 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4510 BPF_LD_MAP_FD(BPF_REG_1, 0),
4511 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4512 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4513 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4515 offsetof(struct test_val, foo)),
4516 BPF_MOV64_IMM(BPF_REG_2, 8),
4517 BPF_MOV64_IMM(BPF_REG_3, 0),
4518 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4519 BPF_EXIT_INSN(),
4520 },
4521 .fixup_map2 = { 3 },
4522 .result = ACCEPT,
4523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4524 },
4525 {
4526 "helper access to adjusted map (via const imm): empty range",
4527 .insns = {
4528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4530 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4531 BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004534 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4536 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004537 BPF_MOV64_IMM(BPF_REG_2, 0),
4538 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004539 BPF_EXIT_INSN(),
4540 },
4541 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004542 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004543 .result = REJECT,
4544 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4545 },
4546 {
4547 "helper access to adjusted map (via const imm): out-of-bound range",
4548 .insns = {
4549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4551 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4552 BPF_LD_MAP_FD(BPF_REG_1, 0),
4553 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4557 offsetof(struct test_val, foo)),
4558 BPF_MOV64_IMM(BPF_REG_2,
4559 sizeof(struct test_val) -
4560 offsetof(struct test_val, foo) + 8),
4561 BPF_MOV64_IMM(BPF_REG_3, 0),
4562 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4563 BPF_EXIT_INSN(),
4564 },
4565 .fixup_map2 = { 3 },
4566 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4567 .result = REJECT,
4568 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4569 },
4570 {
4571 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4572 .insns = {
4573 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4575 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4576 BPF_LD_MAP_FD(BPF_REG_1, 0),
4577 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4579 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4581 offsetof(struct test_val, foo)),
4582 BPF_MOV64_IMM(BPF_REG_2, -8),
4583 BPF_MOV64_IMM(BPF_REG_3, 0),
4584 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4585 BPF_EXIT_INSN(),
4586 },
4587 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004588 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004589 .result = REJECT,
4590 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4591 },
4592 {
4593 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4594 .insns = {
4595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4596 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4597 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4598 BPF_LD_MAP_FD(BPF_REG_1, 0),
4599 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4600 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4602 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4603 offsetof(struct test_val, foo)),
4604 BPF_MOV64_IMM(BPF_REG_2, -1),
4605 BPF_MOV64_IMM(BPF_REG_3, 0),
4606 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4607 BPF_EXIT_INSN(),
4608 },
4609 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004610 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004611 .result = REJECT,
4612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4613 },
4614 {
4615 "helper access to adjusted map (via const reg): full range",
4616 .insns = {
4617 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4618 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4619 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4620 BPF_LD_MAP_FD(BPF_REG_1, 0),
4621 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4622 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4624 BPF_MOV64_IMM(BPF_REG_3,
4625 offsetof(struct test_val, foo)),
4626 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4627 BPF_MOV64_IMM(BPF_REG_2,
4628 sizeof(struct test_val) -
4629 offsetof(struct test_val, foo)),
4630 BPF_MOV64_IMM(BPF_REG_3, 0),
4631 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4632 BPF_EXIT_INSN(),
4633 },
4634 .fixup_map2 = { 3 },
4635 .result = ACCEPT,
4636 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4637 },
4638 {
4639 "helper access to adjusted map (via const reg): partial range",
4640 .insns = {
4641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4643 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4644 BPF_LD_MAP_FD(BPF_REG_1, 0),
4645 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4646 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4647 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4648 BPF_MOV64_IMM(BPF_REG_3,
4649 offsetof(struct test_val, foo)),
4650 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4651 BPF_MOV64_IMM(BPF_REG_2, 8),
4652 BPF_MOV64_IMM(BPF_REG_3, 0),
4653 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4654 BPF_EXIT_INSN(),
4655 },
4656 .fixup_map2 = { 3 },
4657 .result = ACCEPT,
4658 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4659 },
4660 {
4661 "helper access to adjusted map (via const reg): empty range",
4662 .insns = {
4663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4665 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4666 BPF_LD_MAP_FD(BPF_REG_1, 0),
4667 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08004669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4670 BPF_MOV64_IMM(BPF_REG_3, 0),
4671 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004672 BPF_MOV64_IMM(BPF_REG_2, 0),
4673 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004674 BPF_EXIT_INSN(),
4675 },
4676 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004677 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08004678 .result = REJECT,
4679 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4680 },
4681 {
4682 "helper access to adjusted map (via const reg): out-of-bound range",
4683 .insns = {
4684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4686 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4687 BPF_LD_MAP_FD(BPF_REG_1, 0),
4688 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4690 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4691 BPF_MOV64_IMM(BPF_REG_3,
4692 offsetof(struct test_val, foo)),
4693 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4694 BPF_MOV64_IMM(BPF_REG_2,
4695 sizeof(struct test_val) -
4696 offsetof(struct test_val, foo) + 8),
4697 BPF_MOV64_IMM(BPF_REG_3, 0),
4698 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4699 BPF_EXIT_INSN(),
4700 },
4701 .fixup_map2 = { 3 },
4702 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4703 .result = REJECT,
4704 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4705 },
4706 {
4707 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4708 .insns = {
4709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4711 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4712 BPF_LD_MAP_FD(BPF_REG_1, 0),
4713 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4716 BPF_MOV64_IMM(BPF_REG_3,
4717 offsetof(struct test_val, foo)),
4718 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4719 BPF_MOV64_IMM(BPF_REG_2, -8),
4720 BPF_MOV64_IMM(BPF_REG_3, 0),
4721 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4722 BPF_EXIT_INSN(),
4723 },
4724 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004725 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004726 .result = REJECT,
4727 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4728 },
4729 {
4730 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4731 .insns = {
4732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4734 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4735 BPF_LD_MAP_FD(BPF_REG_1, 0),
4736 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4739 BPF_MOV64_IMM(BPF_REG_3,
4740 offsetof(struct test_val, foo)),
4741 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4742 BPF_MOV64_IMM(BPF_REG_2, -1),
4743 BPF_MOV64_IMM(BPF_REG_3, 0),
4744 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4745 BPF_EXIT_INSN(),
4746 },
4747 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004748 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004749 .result = REJECT,
4750 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4751 },
4752 {
4753 "helper access to adjusted map (via variable): full range",
4754 .insns = {
4755 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4757 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4758 BPF_LD_MAP_FD(BPF_REG_1, 0),
4759 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4760 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4762 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4763 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4764 offsetof(struct test_val, foo), 4),
4765 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4766 BPF_MOV64_IMM(BPF_REG_2,
4767 sizeof(struct test_val) -
4768 offsetof(struct test_val, foo)),
4769 BPF_MOV64_IMM(BPF_REG_3, 0),
4770 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4771 BPF_EXIT_INSN(),
4772 },
4773 .fixup_map2 = { 3 },
4774 .result = ACCEPT,
4775 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4776 },
4777 {
4778 "helper access to adjusted map (via variable): partial range",
4779 .insns = {
4780 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4782 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4783 BPF_LD_MAP_FD(BPF_REG_1, 0),
4784 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4786 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4787 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4788 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4789 offsetof(struct test_val, foo), 4),
4790 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4791 BPF_MOV64_IMM(BPF_REG_2, 8),
4792 BPF_MOV64_IMM(BPF_REG_3, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4794 BPF_EXIT_INSN(),
4795 },
4796 .fixup_map2 = { 3 },
4797 .result = ACCEPT,
4798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4799 },
4800 {
4801 "helper access to adjusted map (via variable): empty range",
4802 .insns = {
4803 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4805 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4806 BPF_LD_MAP_FD(BPF_REG_1, 0),
4807 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08004809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4810 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4811 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004812 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08004813 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004814 BPF_MOV64_IMM(BPF_REG_2, 0),
4815 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004816 BPF_EXIT_INSN(),
4817 },
4818 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004819 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08004820 .result = REJECT,
4821 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4822 },
4823 {
4824 "helper access to adjusted map (via variable): no max check",
4825 .insns = {
4826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4828 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4829 BPF_LD_MAP_FD(BPF_REG_1, 0),
4830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4833 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4834 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01004835 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08004836 BPF_MOV64_IMM(BPF_REG_3, 0),
4837 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4838 BPF_EXIT_INSN(),
4839 },
4840 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004841 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08004842 .result = REJECT,
4843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4844 },
4845 {
4846 "helper access to adjusted map (via variable): wrong max check",
4847 .insns = {
4848 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4850 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4851 BPF_LD_MAP_FD(BPF_REG_1, 0),
4852 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4853 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4854 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4856 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4857 offsetof(struct test_val, foo), 4),
4858 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4859 BPF_MOV64_IMM(BPF_REG_2,
4860 sizeof(struct test_val) -
4861 offsetof(struct test_val, foo) + 1),
4862 BPF_MOV64_IMM(BPF_REG_3, 0),
4863 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4864 BPF_EXIT_INSN(),
4865 },
4866 .fixup_map2 = { 3 },
4867 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4868 .result = REJECT,
4869 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4870 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004871 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02004872 "helper access to map: bounds check using <, good access",
4873 .insns = {
4874 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4876 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4877 BPF_LD_MAP_FD(BPF_REG_1, 0),
4878 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4879 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4880 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4881 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4882 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4883 BPF_MOV64_IMM(BPF_REG_0, 0),
4884 BPF_EXIT_INSN(),
4885 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4886 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4887 BPF_MOV64_IMM(BPF_REG_0, 0),
4888 BPF_EXIT_INSN(),
4889 },
4890 .fixup_map2 = { 3 },
4891 .result = ACCEPT,
4892 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4893 },
4894 {
4895 "helper access to map: bounds check using <, bad access",
4896 .insns = {
4897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4899 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4900 BPF_LD_MAP_FD(BPF_REG_1, 0),
4901 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4902 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4904 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4905 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4906 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4907 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4908 BPF_MOV64_IMM(BPF_REG_0, 0),
4909 BPF_EXIT_INSN(),
4910 BPF_MOV64_IMM(BPF_REG_0, 0),
4911 BPF_EXIT_INSN(),
4912 },
4913 .fixup_map2 = { 3 },
4914 .result = REJECT,
4915 .errstr = "R1 unbounded memory access",
4916 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4917 },
4918 {
4919 "helper access to map: bounds check using <=, good access",
4920 .insns = {
4921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4923 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4924 BPF_LD_MAP_FD(BPF_REG_1, 0),
4925 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4927 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4929 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4930 BPF_MOV64_IMM(BPF_REG_0, 0),
4931 BPF_EXIT_INSN(),
4932 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4933 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4934 BPF_MOV64_IMM(BPF_REG_0, 0),
4935 BPF_EXIT_INSN(),
4936 },
4937 .fixup_map2 = { 3 },
4938 .result = ACCEPT,
4939 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4940 },
4941 {
4942 "helper access to map: bounds check using <=, bad access",
4943 .insns = {
4944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4946 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4947 BPF_LD_MAP_FD(BPF_REG_1, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4950 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4952 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4953 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4954 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4955 BPF_MOV64_IMM(BPF_REG_0, 0),
4956 BPF_EXIT_INSN(),
4957 BPF_MOV64_IMM(BPF_REG_0, 0),
4958 BPF_EXIT_INSN(),
4959 },
4960 .fixup_map2 = { 3 },
4961 .result = REJECT,
4962 .errstr = "R1 unbounded memory access",
4963 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4964 },
4965 {
4966 "helper access to map: bounds check using s<, good access",
4967 .insns = {
4968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4973 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4975 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4976 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4977 BPF_MOV64_IMM(BPF_REG_0, 0),
4978 BPF_EXIT_INSN(),
4979 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4980 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4981 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4982 BPF_MOV64_IMM(BPF_REG_0, 0),
4983 BPF_EXIT_INSN(),
4984 },
4985 .fixup_map2 = { 3 },
4986 .result = ACCEPT,
4987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4988 },
4989 {
4990 "helper access to map: bounds check using s<, good access 2",
4991 .insns = {
4992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4994 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4995 BPF_LD_MAP_FD(BPF_REG_1, 0),
4996 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4997 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4999 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5000 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5001 BPF_MOV64_IMM(BPF_REG_0, 0),
5002 BPF_EXIT_INSN(),
5003 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5004 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5005 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5006 BPF_MOV64_IMM(BPF_REG_0, 0),
5007 BPF_EXIT_INSN(),
5008 },
5009 .fixup_map2 = { 3 },
5010 .result = ACCEPT,
5011 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5012 },
5013 {
5014 "helper access to map: bounds check using s<, bad access",
5015 .insns = {
5016 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5018 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5019 BPF_LD_MAP_FD(BPF_REG_1, 0),
5020 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5021 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5022 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5023 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5024 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
5025 BPF_MOV64_IMM(BPF_REG_0, 0),
5026 BPF_EXIT_INSN(),
5027 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
5028 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5029 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5030 BPF_MOV64_IMM(BPF_REG_0, 0),
5031 BPF_EXIT_INSN(),
5032 },
5033 .fixup_map2 = { 3 },
5034 .result = REJECT,
5035 .errstr = "R1 min value is negative",
5036 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5037 },
5038 {
5039 "helper access to map: bounds check using s<=, good access",
5040 .insns = {
5041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5043 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5044 BPF_LD_MAP_FD(BPF_REG_1, 0),
5045 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5049 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5050 BPF_MOV64_IMM(BPF_REG_0, 0),
5051 BPF_EXIT_INSN(),
5052 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5053 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5054 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5055 BPF_MOV64_IMM(BPF_REG_0, 0),
5056 BPF_EXIT_INSN(),
5057 },
5058 .fixup_map2 = { 3 },
5059 .result = ACCEPT,
5060 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5061 },
5062 {
5063 "helper access to map: bounds check using s<=, good access 2",
5064 .insns = {
5065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5067 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5068 BPF_LD_MAP_FD(BPF_REG_1, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5072 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5073 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5074 BPF_MOV64_IMM(BPF_REG_0, 0),
5075 BPF_EXIT_INSN(),
5076 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5077 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5078 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5079 BPF_MOV64_IMM(BPF_REG_0, 0),
5080 BPF_EXIT_INSN(),
5081 },
5082 .fixup_map2 = { 3 },
5083 .result = ACCEPT,
5084 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5085 },
5086 {
5087 "helper access to map: bounds check using s<=, bad access",
5088 .insns = {
5089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5091 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5092 BPF_LD_MAP_FD(BPF_REG_1, 0),
5093 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5094 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5095 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5096 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5097 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5098 BPF_MOV64_IMM(BPF_REG_0, 0),
5099 BPF_EXIT_INSN(),
5100 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5101 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5102 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5103 BPF_MOV64_IMM(BPF_REG_0, 0),
5104 BPF_EXIT_INSN(),
5105 },
5106 .fixup_map2 = { 3 },
5107 .result = REJECT,
5108 .errstr = "R1 min value is negative",
5109 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5110 },
5111 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005112 "map element value is preserved across register spilling",
5113 .insns = {
5114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5116 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5117 BPF_LD_MAP_FD(BPF_REG_1, 0),
5118 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5120 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5121 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5123 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5124 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5125 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5126 BPF_EXIT_INSN(),
5127 },
5128 .fixup_map2 = { 3 },
5129 .errstr_unpriv = "R0 leaks addr",
5130 .result = ACCEPT,
5131 .result_unpriv = REJECT,
5132 },
5133 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005134 "map element value or null is marked on register spilling",
5135 .insns = {
5136 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5138 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5139 BPF_LD_MAP_FD(BPF_REG_1, 0),
5140 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5141 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5142 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5143 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5145 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5146 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5147 BPF_EXIT_INSN(),
5148 },
5149 .fixup_map2 = { 3 },
5150 .errstr_unpriv = "R0 leaks addr",
5151 .result = ACCEPT,
5152 .result_unpriv = REJECT,
5153 },
5154 {
5155 "map element value store of cleared call register",
5156 .insns = {
5157 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5159 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5160 BPF_LD_MAP_FD(BPF_REG_1, 0),
5161 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5163 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5164 BPF_EXIT_INSN(),
5165 },
5166 .fixup_map2 = { 3 },
5167 .errstr_unpriv = "R1 !read_ok",
5168 .errstr = "R1 !read_ok",
5169 .result = REJECT,
5170 .result_unpriv = REJECT,
5171 },
5172 {
5173 "map element value with unaligned store",
5174 .insns = {
5175 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5177 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5178 BPF_LD_MAP_FD(BPF_REG_1, 0),
5179 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5182 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5183 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5184 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5185 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5186 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5187 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5188 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5190 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5191 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5192 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5193 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5195 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5196 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5197 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5198 BPF_EXIT_INSN(),
5199 },
5200 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005201 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005202 .result = ACCEPT,
5203 .result_unpriv = REJECT,
5204 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5205 },
5206 {
5207 "map element value with unaligned load",
5208 .insns = {
5209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5211 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5212 BPF_LD_MAP_FD(BPF_REG_1, 0),
5213 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5215 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5216 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5218 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5219 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5220 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5221 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5222 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5224 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5225 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5226 BPF_EXIT_INSN(),
5227 },
5228 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005229 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005230 .result = ACCEPT,
5231 .result_unpriv = REJECT,
5232 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5233 },
5234 {
5235 "map element value illegal alu op, 1",
5236 .insns = {
5237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5239 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5240 BPF_LD_MAP_FD(BPF_REG_1, 0),
5241 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5242 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5243 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5244 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5245 BPF_EXIT_INSN(),
5246 },
5247 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005248 .errstr = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005249 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005250 },
5251 {
5252 "map element value illegal alu op, 2",
5253 .insns = {
5254 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5256 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5257 BPF_LD_MAP_FD(BPF_REG_1, 0),
5258 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5259 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5260 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5261 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5262 BPF_EXIT_INSN(),
5263 },
5264 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005265 .errstr = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005266 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005267 },
5268 {
5269 "map element value illegal alu op, 3",
5270 .insns = {
5271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5273 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5274 BPF_LD_MAP_FD(BPF_REG_1, 0),
5275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5277 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5278 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5279 BPF_EXIT_INSN(),
5280 },
5281 .fixup_map2 = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08005282 .errstr = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005283 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005284 },
5285 {
5286 "map element value illegal alu op, 4",
5287 .insns = {
5288 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5290 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5291 BPF_LD_MAP_FD(BPF_REG_1, 0),
5292 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5293 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5294 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5295 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5296 BPF_EXIT_INSN(),
5297 },
5298 .fixup_map2 = { 3 },
5299 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5300 .errstr = "invalid mem access 'inv'",
5301 .result = REJECT,
5302 .result_unpriv = REJECT,
5303 },
5304 {
5305 "map element value illegal alu op, 5",
5306 .insns = {
5307 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5309 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5310 BPF_LD_MAP_FD(BPF_REG_1, 0),
5311 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5312 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5313 BPF_MOV64_IMM(BPF_REG_3, 4096),
5314 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5316 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5317 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5318 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5319 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5320 BPF_EXIT_INSN(),
5321 },
5322 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005323 .errstr = "R0 invalid mem access 'inv'",
5324 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005325 },
5326 {
5327 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005328 .insns = {
5329 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5331 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5332 BPF_LD_MAP_FD(BPF_REG_1, 0),
5333 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5334 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5336 offsetof(struct test_val, foo)),
5337 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5340 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5341 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5342 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5343 BPF_EXIT_INSN(),
5344 },
5345 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005346 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005347 .result = ACCEPT,
5348 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005349 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005350 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005351 {
5352 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5353 .insns = {
5354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5356 BPF_MOV64_IMM(BPF_REG_0, 0),
5357 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5358 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5359 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5361 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5363 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5364 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5365 BPF_MOV64_IMM(BPF_REG_2, 16),
5366 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5367 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5368 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5369 BPF_MOV64_IMM(BPF_REG_4, 0),
5370 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5371 BPF_MOV64_IMM(BPF_REG_3, 0),
5372 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5373 BPF_MOV64_IMM(BPF_REG_0, 0),
5374 BPF_EXIT_INSN(),
5375 },
5376 .result = ACCEPT,
5377 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5378 },
5379 {
5380 "helper access to variable memory: stack, bitwise AND, zero included",
5381 .insns = {
5382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5384 BPF_MOV64_IMM(BPF_REG_2, 16),
5385 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5386 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5387 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5388 BPF_MOV64_IMM(BPF_REG_3, 0),
5389 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5390 BPF_EXIT_INSN(),
5391 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005392 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005393 .result = REJECT,
5394 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5395 },
5396 {
5397 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5398 .insns = {
5399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5400 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5401 BPF_MOV64_IMM(BPF_REG_2, 16),
5402 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5403 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5404 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5405 BPF_MOV64_IMM(BPF_REG_4, 0),
5406 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5407 BPF_MOV64_IMM(BPF_REG_3, 0),
5408 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5409 BPF_MOV64_IMM(BPF_REG_0, 0),
5410 BPF_EXIT_INSN(),
5411 },
5412 .errstr = "invalid stack type R1 off=-64 access_size=65",
5413 .result = REJECT,
5414 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5415 },
5416 {
5417 "helper access to variable memory: stack, JMP, correct bounds",
5418 .insns = {
5419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5421 BPF_MOV64_IMM(BPF_REG_0, 0),
5422 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5423 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5424 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5425 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5426 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5427 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5428 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5429 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5430 BPF_MOV64_IMM(BPF_REG_2, 16),
5431 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5432 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5433 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5434 BPF_MOV64_IMM(BPF_REG_4, 0),
5435 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5436 BPF_MOV64_IMM(BPF_REG_3, 0),
5437 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5438 BPF_MOV64_IMM(BPF_REG_0, 0),
5439 BPF_EXIT_INSN(),
5440 },
5441 .result = ACCEPT,
5442 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5443 },
5444 {
5445 "helper access to variable memory: stack, JMP (signed), correct bounds",
5446 .insns = {
5447 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5449 BPF_MOV64_IMM(BPF_REG_0, 0),
5450 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5451 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5452 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5453 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5454 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5455 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5456 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5457 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5458 BPF_MOV64_IMM(BPF_REG_2, 16),
5459 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5461 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5462 BPF_MOV64_IMM(BPF_REG_4, 0),
5463 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5464 BPF_MOV64_IMM(BPF_REG_3, 0),
5465 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5466 BPF_MOV64_IMM(BPF_REG_0, 0),
5467 BPF_EXIT_INSN(),
5468 },
5469 .result = ACCEPT,
5470 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5471 },
5472 {
5473 "helper access to variable memory: stack, JMP, bounds + offset",
5474 .insns = {
5475 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5476 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5477 BPF_MOV64_IMM(BPF_REG_2, 16),
5478 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5479 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5480 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5481 BPF_MOV64_IMM(BPF_REG_4, 0),
5482 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5484 BPF_MOV64_IMM(BPF_REG_3, 0),
5485 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5486 BPF_MOV64_IMM(BPF_REG_0, 0),
5487 BPF_EXIT_INSN(),
5488 },
5489 .errstr = "invalid stack type R1 off=-64 access_size=65",
5490 .result = REJECT,
5491 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5492 },
5493 {
5494 "helper access to variable memory: stack, JMP, wrong max",
5495 .insns = {
5496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5498 BPF_MOV64_IMM(BPF_REG_2, 16),
5499 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5500 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5501 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5502 BPF_MOV64_IMM(BPF_REG_4, 0),
5503 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5504 BPF_MOV64_IMM(BPF_REG_3, 0),
5505 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5506 BPF_MOV64_IMM(BPF_REG_0, 0),
5507 BPF_EXIT_INSN(),
5508 },
5509 .errstr = "invalid stack type R1 off=-64 access_size=65",
5510 .result = REJECT,
5511 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5512 },
5513 {
5514 "helper access to variable memory: stack, JMP, no max check",
5515 .insns = {
5516 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5518 BPF_MOV64_IMM(BPF_REG_2, 16),
5519 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5520 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5521 BPF_MOV64_IMM(BPF_REG_4, 0),
5522 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5523 BPF_MOV64_IMM(BPF_REG_3, 0),
5524 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5525 BPF_MOV64_IMM(BPF_REG_0, 0),
5526 BPF_EXIT_INSN(),
5527 },
Edward Creef65b1842017-08-07 15:27:12 +01005528 /* because max wasn't checked, signed min is negative */
5529 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005530 .result = REJECT,
5531 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5532 },
5533 {
5534 "helper access to variable memory: stack, JMP, no min check",
5535 .insns = {
5536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5538 BPF_MOV64_IMM(BPF_REG_2, 16),
5539 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5540 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5541 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5542 BPF_MOV64_IMM(BPF_REG_3, 0),
5543 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5544 BPF_MOV64_IMM(BPF_REG_0, 0),
5545 BPF_EXIT_INSN(),
5546 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005547 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005548 .result = REJECT,
5549 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5550 },
5551 {
5552 "helper access to variable memory: stack, JMP (signed), no min check",
5553 .insns = {
5554 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5556 BPF_MOV64_IMM(BPF_REG_2, 16),
5557 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5558 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5559 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5560 BPF_MOV64_IMM(BPF_REG_3, 0),
5561 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5562 BPF_MOV64_IMM(BPF_REG_0, 0),
5563 BPF_EXIT_INSN(),
5564 },
5565 .errstr = "R2 min value is negative",
5566 .result = REJECT,
5567 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5568 },
5569 {
5570 "helper access to variable memory: map, JMP, correct bounds",
5571 .insns = {
5572 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5573 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5574 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5575 BPF_LD_MAP_FD(BPF_REG_1, 0),
5576 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5577 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5578 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5579 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5580 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5581 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5582 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5583 sizeof(struct test_val), 4),
5584 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005585 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005586 BPF_MOV64_IMM(BPF_REG_3, 0),
5587 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5588 BPF_MOV64_IMM(BPF_REG_0, 0),
5589 BPF_EXIT_INSN(),
5590 },
5591 .fixup_map2 = { 3 },
5592 .result = ACCEPT,
5593 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5594 },
5595 {
5596 "helper access to variable memory: map, JMP, wrong max",
5597 .insns = {
5598 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5599 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5600 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5601 BPF_LD_MAP_FD(BPF_REG_1, 0),
5602 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5603 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5605 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5606 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5607 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5608 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5609 sizeof(struct test_val) + 1, 4),
5610 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005611 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005612 BPF_MOV64_IMM(BPF_REG_3, 0),
5613 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5614 BPF_MOV64_IMM(BPF_REG_0, 0),
5615 BPF_EXIT_INSN(),
5616 },
5617 .fixup_map2 = { 3 },
5618 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5619 .result = REJECT,
5620 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5621 },
5622 {
5623 "helper access to variable memory: map adjusted, JMP, correct bounds",
5624 .insns = {
5625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5627 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5628 BPF_LD_MAP_FD(BPF_REG_1, 0),
5629 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5631 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5633 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5634 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5635 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5636 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5637 sizeof(struct test_val) - 20, 4),
5638 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005639 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005640 BPF_MOV64_IMM(BPF_REG_3, 0),
5641 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5642 BPF_MOV64_IMM(BPF_REG_0, 0),
5643 BPF_EXIT_INSN(),
5644 },
5645 .fixup_map2 = { 3 },
5646 .result = ACCEPT,
5647 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5648 },
5649 {
5650 "helper access to variable memory: map adjusted, JMP, wrong max",
5651 .insns = {
5652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5654 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5655 BPF_LD_MAP_FD(BPF_REG_1, 0),
5656 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5658 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5660 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5661 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5662 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5663 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5664 sizeof(struct test_val) - 19, 4),
5665 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005666 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005667 BPF_MOV64_IMM(BPF_REG_3, 0),
5668 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5669 BPF_MOV64_IMM(BPF_REG_0, 0),
5670 BPF_EXIT_INSN(),
5671 },
5672 .fixup_map2 = { 3 },
5673 .errstr = "R1 min value is outside of the array range",
5674 .result = REJECT,
5675 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5676 },
5677 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005678 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01005679 .insns = {
5680 BPF_MOV64_IMM(BPF_REG_1, 0),
5681 BPF_MOV64_IMM(BPF_REG_2, 0),
5682 BPF_MOV64_IMM(BPF_REG_3, 0),
5683 BPF_MOV64_IMM(BPF_REG_4, 0),
5684 BPF_MOV64_IMM(BPF_REG_5, 0),
5685 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5686 BPF_EXIT_INSN(),
5687 },
5688 .result = ACCEPT,
5689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5690 },
5691 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005692 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005693 .insns = {
5694 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08005695 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005696 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5697 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005698 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5699 BPF_MOV64_IMM(BPF_REG_3, 0),
5700 BPF_MOV64_IMM(BPF_REG_4, 0),
5701 BPF_MOV64_IMM(BPF_REG_5, 0),
5702 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5703 BPF_EXIT_INSN(),
5704 },
Edward Creef65b1842017-08-07 15:27:12 +01005705 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005706 .result = REJECT,
5707 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5708 },
5709 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005710 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005711 .insns = {
5712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5713 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5714 BPF_MOV64_IMM(BPF_REG_2, 0),
5715 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5716 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5717 BPF_MOV64_IMM(BPF_REG_3, 0),
5718 BPF_MOV64_IMM(BPF_REG_4, 0),
5719 BPF_MOV64_IMM(BPF_REG_5, 0),
5720 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5721 BPF_EXIT_INSN(),
5722 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005723 .result = ACCEPT,
5724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5725 },
5726 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005727 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005728 .insns = {
5729 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5731 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5732 BPF_LD_MAP_FD(BPF_REG_1, 0),
5733 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5734 BPF_FUNC_map_lookup_elem),
5735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5737 BPF_MOV64_IMM(BPF_REG_2, 0),
5738 BPF_MOV64_IMM(BPF_REG_3, 0),
5739 BPF_MOV64_IMM(BPF_REG_4, 0),
5740 BPF_MOV64_IMM(BPF_REG_5, 0),
5741 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5742 BPF_EXIT_INSN(),
5743 },
5744 .fixup_map1 = { 3 },
5745 .result = ACCEPT,
5746 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5747 },
5748 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005749 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005750 .insns = {
5751 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5752 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5754 BPF_LD_MAP_FD(BPF_REG_1, 0),
5755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5756 BPF_FUNC_map_lookup_elem),
5757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5758 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5759 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
5760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5762 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5763 BPF_MOV64_IMM(BPF_REG_3, 0),
5764 BPF_MOV64_IMM(BPF_REG_4, 0),
5765 BPF_MOV64_IMM(BPF_REG_5, 0),
5766 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5767 BPF_EXIT_INSN(),
5768 },
5769 .fixup_map1 = { 3 },
5770 .result = ACCEPT,
5771 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5772 },
5773 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005774 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005775 .insns = {
5776 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5779 BPF_LD_MAP_FD(BPF_REG_1, 0),
5780 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5781 BPF_FUNC_map_lookup_elem),
5782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5784 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5785 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5786 BPF_MOV64_IMM(BPF_REG_3, 0),
5787 BPF_MOV64_IMM(BPF_REG_4, 0),
5788 BPF_MOV64_IMM(BPF_REG_5, 0),
5789 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5790 BPF_EXIT_INSN(),
5791 },
5792 .fixup_map1 = { 3 },
5793 .result = ACCEPT,
5794 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5795 },
5796 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005797 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005798 .insns = {
5799 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5800 offsetof(struct __sk_buff, data)),
5801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5802 offsetof(struct __sk_buff, data_end)),
5803 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
5804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5805 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
5806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5807 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
5808 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5809 BPF_MOV64_IMM(BPF_REG_3, 0),
5810 BPF_MOV64_IMM(BPF_REG_4, 0),
5811 BPF_MOV64_IMM(BPF_REG_5, 0),
5812 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5813 BPF_EXIT_INSN(),
5814 },
5815 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08005816 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08005817 .retval = 0 /* csum_diff of 64-byte packet */,
Gianluca Borello06c1c042017-01-09 10:19:49 -08005818 },
5819 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005820 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5821 .insns = {
5822 BPF_MOV64_IMM(BPF_REG_1, 0),
5823 BPF_MOV64_IMM(BPF_REG_2, 0),
5824 BPF_MOV64_IMM(BPF_REG_3, 0),
5825 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5826 BPF_EXIT_INSN(),
5827 },
5828 .errstr = "R1 type=inv expected=fp",
5829 .result = REJECT,
5830 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5831 },
5832 {
5833 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5834 .insns = {
5835 BPF_MOV64_IMM(BPF_REG_1, 0),
5836 BPF_MOV64_IMM(BPF_REG_2, 1),
5837 BPF_MOV64_IMM(BPF_REG_3, 0),
5838 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5839 BPF_EXIT_INSN(),
5840 },
5841 .errstr = "R1 type=inv expected=fp",
5842 .result = REJECT,
5843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5844 },
5845 {
5846 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5847 .insns = {
5848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5850 BPF_MOV64_IMM(BPF_REG_2, 0),
5851 BPF_MOV64_IMM(BPF_REG_3, 0),
5852 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5853 BPF_EXIT_INSN(),
5854 },
5855 .result = ACCEPT,
5856 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5857 },
5858 {
5859 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5860 .insns = {
5861 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5862 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5863 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5864 BPF_LD_MAP_FD(BPF_REG_1, 0),
5865 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5868 BPF_MOV64_IMM(BPF_REG_2, 0),
5869 BPF_MOV64_IMM(BPF_REG_3, 0),
5870 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5871 BPF_EXIT_INSN(),
5872 },
5873 .fixup_map1 = { 3 },
5874 .result = ACCEPT,
5875 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5876 },
5877 {
5878 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5879 .insns = {
5880 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5881 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5883 BPF_LD_MAP_FD(BPF_REG_1, 0),
5884 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5886 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5887 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5888 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5890 BPF_MOV64_IMM(BPF_REG_3, 0),
5891 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5892 BPF_EXIT_INSN(),
5893 },
5894 .fixup_map1 = { 3 },
5895 .result = ACCEPT,
5896 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5897 },
5898 {
5899 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5900 .insns = {
5901 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5902 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5904 BPF_LD_MAP_FD(BPF_REG_1, 0),
5905 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5908 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5909 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
5910 BPF_MOV64_IMM(BPF_REG_3, 0),
5911 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5912 BPF_EXIT_INSN(),
5913 },
5914 .fixup_map1 = { 3 },
5915 .result = ACCEPT,
5916 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5917 },
5918 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08005919 "helper access to variable memory: 8 bytes leak",
5920 .insns = {
5921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5923 BPF_MOV64_IMM(BPF_REG_0, 0),
5924 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5925 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5926 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5927 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5928 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5929 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5930 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08005931 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005932 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5933 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005934 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5936 BPF_MOV64_IMM(BPF_REG_3, 0),
5937 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5938 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5939 BPF_EXIT_INSN(),
5940 },
5941 .errstr = "invalid indirect read from stack off -64+32 size 64",
5942 .result = REJECT,
5943 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5944 },
5945 {
5946 "helper access to variable memory: 8 bytes no leak (init memory)",
5947 .insns = {
5948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5949 BPF_MOV64_IMM(BPF_REG_0, 0),
5950 BPF_MOV64_IMM(BPF_REG_0, 0),
5951 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5952 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5953 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5954 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5955 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5956 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5957 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5958 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5960 BPF_MOV64_IMM(BPF_REG_2, 0),
5961 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5963 BPF_MOV64_IMM(BPF_REG_3, 0),
5964 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5965 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5966 BPF_EXIT_INSN(),
5967 },
5968 .result = ACCEPT,
5969 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5970 },
Josef Bacik29200c12017-02-03 16:25:23 -05005971 {
5972 "invalid and of negative number",
5973 .insns = {
5974 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5975 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5977 BPF_LD_MAP_FD(BPF_REG_1, 0),
5978 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5979 BPF_FUNC_map_lookup_elem),
5980 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01005981 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05005982 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5983 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5984 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5985 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5986 offsetof(struct test_val, foo)),
5987 BPF_EXIT_INSN(),
5988 },
5989 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005990 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05005991 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005992 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05005993 },
5994 {
5995 "invalid range check",
5996 .insns = {
5997 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6000 BPF_LD_MAP_FD(BPF_REG_1, 0),
6001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6002 BPF_FUNC_map_lookup_elem),
6003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
6004 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6005 BPF_MOV64_IMM(BPF_REG_9, 1),
6006 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
6007 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
6008 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
6009 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
6010 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
6011 BPF_MOV32_IMM(BPF_REG_3, 1),
6012 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
6013 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
6014 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
6015 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
6016 BPF_MOV64_REG(BPF_REG_0, 0),
6017 BPF_EXIT_INSN(),
6018 },
6019 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006020 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05006021 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006022 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006023 },
6024 {
6025 "map in map access",
6026 .insns = {
6027 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6030 BPF_LD_MAP_FD(BPF_REG_1, 0),
6031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6032 BPF_FUNC_map_lookup_elem),
6033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6034 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6035 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6039 BPF_FUNC_map_lookup_elem),
6040 BPF_MOV64_REG(BPF_REG_0, 0),
6041 BPF_EXIT_INSN(),
6042 },
6043 .fixup_map_in_map = { 3 },
6044 .result = ACCEPT,
6045 },
6046 {
6047 "invalid inner map pointer",
6048 .insns = {
6049 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6050 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6052 BPF_LD_MAP_FD(BPF_REG_1, 0),
6053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6054 BPF_FUNC_map_lookup_elem),
6055 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6056 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6061 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6062 BPF_FUNC_map_lookup_elem),
6063 BPF_MOV64_REG(BPF_REG_0, 0),
6064 BPF_EXIT_INSN(),
6065 },
6066 .fixup_map_in_map = { 3 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08006067 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006068 .result = REJECT,
6069 },
6070 {
6071 "forgot null checking on the inner map pointer",
6072 .insns = {
6073 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6074 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6076 BPF_LD_MAP_FD(BPF_REG_1, 0),
6077 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6078 BPF_FUNC_map_lookup_elem),
6079 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6080 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6081 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6082 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6084 BPF_FUNC_map_lookup_elem),
6085 BPF_MOV64_REG(BPF_REG_0, 0),
6086 BPF_EXIT_INSN(),
6087 },
6088 .fixup_map_in_map = { 3 },
6089 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6090 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006091 },
6092 {
6093 "ld_abs: check calling conv, r1",
6094 .insns = {
6095 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6096 BPF_MOV64_IMM(BPF_REG_1, 0),
6097 BPF_LD_ABS(BPF_W, -0x200000),
6098 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6099 BPF_EXIT_INSN(),
6100 },
6101 .errstr = "R1 !read_ok",
6102 .result = REJECT,
6103 },
6104 {
6105 "ld_abs: check calling conv, r2",
6106 .insns = {
6107 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6108 BPF_MOV64_IMM(BPF_REG_2, 0),
6109 BPF_LD_ABS(BPF_W, -0x200000),
6110 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6111 BPF_EXIT_INSN(),
6112 },
6113 .errstr = "R2 !read_ok",
6114 .result = REJECT,
6115 },
6116 {
6117 "ld_abs: check calling conv, r3",
6118 .insns = {
6119 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6120 BPF_MOV64_IMM(BPF_REG_3, 0),
6121 BPF_LD_ABS(BPF_W, -0x200000),
6122 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6123 BPF_EXIT_INSN(),
6124 },
6125 .errstr = "R3 !read_ok",
6126 .result = REJECT,
6127 },
6128 {
6129 "ld_abs: check calling conv, r4",
6130 .insns = {
6131 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6132 BPF_MOV64_IMM(BPF_REG_4, 0),
6133 BPF_LD_ABS(BPF_W, -0x200000),
6134 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6135 BPF_EXIT_INSN(),
6136 },
6137 .errstr = "R4 !read_ok",
6138 .result = REJECT,
6139 },
6140 {
6141 "ld_abs: check calling conv, r5",
6142 .insns = {
6143 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6144 BPF_MOV64_IMM(BPF_REG_5, 0),
6145 BPF_LD_ABS(BPF_W, -0x200000),
6146 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6147 BPF_EXIT_INSN(),
6148 },
6149 .errstr = "R5 !read_ok",
6150 .result = REJECT,
6151 },
6152 {
6153 "ld_abs: check calling conv, r7",
6154 .insns = {
6155 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6156 BPF_MOV64_IMM(BPF_REG_7, 0),
6157 BPF_LD_ABS(BPF_W, -0x200000),
6158 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6159 BPF_EXIT_INSN(),
6160 },
6161 .result = ACCEPT,
6162 },
6163 {
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006164 "ld_abs: tests on r6 and skb data reload helper",
6165 .insns = {
6166 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6167 BPF_LD_ABS(BPF_B, 0),
6168 BPF_LD_ABS(BPF_H, 0),
6169 BPF_LD_ABS(BPF_W, 0),
6170 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6171 BPF_MOV64_IMM(BPF_REG_6, 0),
6172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6173 BPF_MOV64_IMM(BPF_REG_2, 1),
6174 BPF_MOV64_IMM(BPF_REG_3, 2),
6175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6176 BPF_FUNC_skb_vlan_push),
6177 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6178 BPF_LD_ABS(BPF_B, 0),
6179 BPF_LD_ABS(BPF_H, 0),
6180 BPF_LD_ABS(BPF_W, 0),
6181 BPF_MOV64_IMM(BPF_REG_0, 42),
6182 BPF_EXIT_INSN(),
6183 },
6184 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6185 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006186 .retval = 42 /* ultimate return value */,
Daniel Borkmann87ab8192017-12-14 21:07:27 +01006187 },
6188 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006189 "ld_ind: check calling conv, r1",
6190 .insns = {
6191 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6192 BPF_MOV64_IMM(BPF_REG_1, 1),
6193 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6194 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6195 BPF_EXIT_INSN(),
6196 },
6197 .errstr = "R1 !read_ok",
6198 .result = REJECT,
6199 },
6200 {
6201 "ld_ind: check calling conv, r2",
6202 .insns = {
6203 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6204 BPF_MOV64_IMM(BPF_REG_2, 1),
6205 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6206 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6207 BPF_EXIT_INSN(),
6208 },
6209 .errstr = "R2 !read_ok",
6210 .result = REJECT,
6211 },
6212 {
6213 "ld_ind: check calling conv, r3",
6214 .insns = {
6215 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6216 BPF_MOV64_IMM(BPF_REG_3, 1),
6217 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6218 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6219 BPF_EXIT_INSN(),
6220 },
6221 .errstr = "R3 !read_ok",
6222 .result = REJECT,
6223 },
6224 {
6225 "ld_ind: check calling conv, r4",
6226 .insns = {
6227 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6228 BPF_MOV64_IMM(BPF_REG_4, 1),
6229 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6230 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6231 BPF_EXIT_INSN(),
6232 },
6233 .errstr = "R4 !read_ok",
6234 .result = REJECT,
6235 },
6236 {
6237 "ld_ind: check calling conv, r5",
6238 .insns = {
6239 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6240 BPF_MOV64_IMM(BPF_REG_5, 1),
6241 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6242 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6243 BPF_EXIT_INSN(),
6244 },
6245 .errstr = "R5 !read_ok",
6246 .result = REJECT,
6247 },
6248 {
6249 "ld_ind: check calling conv, r7",
6250 .insns = {
6251 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6252 BPF_MOV64_IMM(BPF_REG_7, 1),
6253 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6254 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6255 BPF_EXIT_INSN(),
6256 },
6257 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08006258 .retval = 1,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006259 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006260 {
6261 "check bpf_perf_event_data->sample_period byte load permitted",
6262 .insns = {
6263 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006264#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006265 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6266 offsetof(struct bpf_perf_event_data, sample_period)),
6267#else
6268 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6269 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6270#endif
6271 BPF_EXIT_INSN(),
6272 },
6273 .result = ACCEPT,
6274 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6275 },
6276 {
6277 "check bpf_perf_event_data->sample_period half load permitted",
6278 .insns = {
6279 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006280#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006281 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6282 offsetof(struct bpf_perf_event_data, sample_period)),
6283#else
6284 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6285 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6286#endif
6287 BPF_EXIT_INSN(),
6288 },
6289 .result = ACCEPT,
6290 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6291 },
6292 {
6293 "check bpf_perf_event_data->sample_period word load permitted",
6294 .insns = {
6295 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006296#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006297 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6298 offsetof(struct bpf_perf_event_data, sample_period)),
6299#else
6300 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6301 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6302#endif
6303 BPF_EXIT_INSN(),
6304 },
6305 .result = ACCEPT,
6306 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6307 },
6308 {
6309 "check bpf_perf_event_data->sample_period dword load permitted",
6310 .insns = {
6311 BPF_MOV64_IMM(BPF_REG_0, 0),
6312 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6313 offsetof(struct bpf_perf_event_data, sample_period)),
6314 BPF_EXIT_INSN(),
6315 },
6316 .result = ACCEPT,
6317 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6318 },
6319 {
6320 "check skb->data half load not permitted",
6321 .insns = {
6322 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006323#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006324 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6325 offsetof(struct __sk_buff, data)),
6326#else
6327 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6328 offsetof(struct __sk_buff, data) + 2),
6329#endif
6330 BPF_EXIT_INSN(),
6331 },
6332 .result = REJECT,
6333 .errstr = "invalid bpf_context access",
6334 },
6335 {
6336 "check skb->tc_classid half load not permitted for lwt prog",
6337 .insns = {
6338 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006339#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006340 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6341 offsetof(struct __sk_buff, tc_classid)),
6342#else
6343 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6344 offsetof(struct __sk_buff, tc_classid) + 2),
6345#endif
6346 BPF_EXIT_INSN(),
6347 },
6348 .result = REJECT,
6349 .errstr = "invalid bpf_context access",
6350 .prog_type = BPF_PROG_TYPE_LWT_IN,
6351 },
Edward Creeb712296a2017-07-21 00:00:24 +02006352 {
6353 "bounds checks mixing signed and unsigned, positive bounds",
6354 .insns = {
6355 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6356 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6358 BPF_LD_MAP_FD(BPF_REG_1, 0),
6359 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6360 BPF_FUNC_map_lookup_elem),
6361 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6362 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6363 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6364 BPF_MOV64_IMM(BPF_REG_2, 2),
6365 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6366 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6367 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6368 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6369 BPF_MOV64_IMM(BPF_REG_0, 0),
6370 BPF_EXIT_INSN(),
6371 },
6372 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006373 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02006374 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006375 },
6376 {
6377 "bounds checks mixing signed and unsigned",
6378 .insns = {
6379 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6380 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6381 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6382 BPF_LD_MAP_FD(BPF_REG_1, 0),
6383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6384 BPF_FUNC_map_lookup_elem),
6385 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6386 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6387 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6388 BPF_MOV64_IMM(BPF_REG_2, -1),
6389 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6390 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6391 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6392 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6393 BPF_MOV64_IMM(BPF_REG_0, 0),
6394 BPF_EXIT_INSN(),
6395 },
6396 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006397 .errstr = "unbounded min value",
Edward Creeb712296a2017-07-21 00:00:24 +02006398 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006399 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006400 {
6401 "bounds checks mixing signed and unsigned, variant 2",
6402 .insns = {
6403 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6404 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6406 BPF_LD_MAP_FD(BPF_REG_1, 0),
6407 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6408 BPF_FUNC_map_lookup_elem),
6409 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6410 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6411 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6412 BPF_MOV64_IMM(BPF_REG_2, -1),
6413 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6414 BPF_MOV64_IMM(BPF_REG_8, 0),
6415 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6416 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6417 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6418 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6419 BPF_MOV64_IMM(BPF_REG_0, 0),
6420 BPF_EXIT_INSN(),
6421 },
6422 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006423 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006424 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006425 },
6426 {
6427 "bounds checks mixing signed and unsigned, variant 3",
6428 .insns = {
6429 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6430 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6431 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6432 BPF_LD_MAP_FD(BPF_REG_1, 0),
6433 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6434 BPF_FUNC_map_lookup_elem),
6435 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6436 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6437 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6438 BPF_MOV64_IMM(BPF_REG_2, -1),
6439 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6440 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6441 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6442 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6443 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6444 BPF_MOV64_IMM(BPF_REG_0, 0),
6445 BPF_EXIT_INSN(),
6446 },
6447 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006448 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006449 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006450 },
6451 {
6452 "bounds checks mixing signed and unsigned, variant 4",
6453 .insns = {
6454 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6455 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6457 BPF_LD_MAP_FD(BPF_REG_1, 0),
6458 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6459 BPF_FUNC_map_lookup_elem),
6460 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6461 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6462 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6463 BPF_MOV64_IMM(BPF_REG_2, 1),
6464 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6465 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6466 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6467 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6468 BPF_MOV64_IMM(BPF_REG_0, 0),
6469 BPF_EXIT_INSN(),
6470 },
6471 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006472 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006473 },
6474 {
6475 "bounds checks mixing signed and unsigned, variant 5",
6476 .insns = {
6477 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6478 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6480 BPF_LD_MAP_FD(BPF_REG_1, 0),
6481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6482 BPF_FUNC_map_lookup_elem),
6483 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6484 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6485 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6486 BPF_MOV64_IMM(BPF_REG_2, -1),
6487 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6488 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6490 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6491 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6492 BPF_MOV64_IMM(BPF_REG_0, 0),
6493 BPF_EXIT_INSN(),
6494 },
6495 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006496 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006497 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006498 },
6499 {
6500 "bounds checks mixing signed and unsigned, variant 6",
6501 .insns = {
6502 BPF_MOV64_IMM(BPF_REG_2, 0),
6503 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6506 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6507 BPF_MOV64_IMM(BPF_REG_6, -1),
6508 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6509 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6511 BPF_MOV64_IMM(BPF_REG_5, 0),
6512 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6514 BPF_FUNC_skb_load_bytes),
6515 BPF_MOV64_IMM(BPF_REG_0, 0),
6516 BPF_EXIT_INSN(),
6517 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006518 .errstr = "R4 min value is negative, either use unsigned",
6519 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006520 },
6521 {
6522 "bounds checks mixing signed and unsigned, variant 7",
6523 .insns = {
6524 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6525 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6526 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6527 BPF_LD_MAP_FD(BPF_REG_1, 0),
6528 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6529 BPF_FUNC_map_lookup_elem),
6530 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6531 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6532 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6533 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6534 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6535 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6536 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6537 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6538 BPF_MOV64_IMM(BPF_REG_0, 0),
6539 BPF_EXIT_INSN(),
6540 },
6541 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006542 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006543 },
6544 {
6545 "bounds checks mixing signed and unsigned, variant 8",
6546 .insns = {
6547 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6550 BPF_LD_MAP_FD(BPF_REG_1, 0),
6551 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6552 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6554 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6555 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6556 BPF_MOV64_IMM(BPF_REG_2, -1),
6557 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6558 BPF_MOV64_IMM(BPF_REG_0, 0),
6559 BPF_EXIT_INSN(),
6560 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6561 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6562 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6563 BPF_MOV64_IMM(BPF_REG_0, 0),
6564 BPF_EXIT_INSN(),
6565 },
6566 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006567 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006568 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006569 },
6570 {
Edward Creef65b1842017-08-07 15:27:12 +01006571 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006572 .insns = {
6573 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6576 BPF_LD_MAP_FD(BPF_REG_1, 0),
6577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6578 BPF_FUNC_map_lookup_elem),
6579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6580 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6581 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6582 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6583 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6584 BPF_MOV64_IMM(BPF_REG_0, 0),
6585 BPF_EXIT_INSN(),
6586 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6587 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6588 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6589 BPF_MOV64_IMM(BPF_REG_0, 0),
6590 BPF_EXIT_INSN(),
6591 },
6592 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006593 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006594 },
6595 {
Edward Creef65b1842017-08-07 15:27:12 +01006596 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006597 .insns = {
6598 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6601 BPF_LD_MAP_FD(BPF_REG_1, 0),
6602 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6603 BPF_FUNC_map_lookup_elem),
6604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6605 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6606 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6607 BPF_MOV64_IMM(BPF_REG_2, 0),
6608 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6609 BPF_MOV64_IMM(BPF_REG_0, 0),
6610 BPF_EXIT_INSN(),
6611 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6612 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6613 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6614 BPF_MOV64_IMM(BPF_REG_0, 0),
6615 BPF_EXIT_INSN(),
6616 },
6617 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006618 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006619 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006620 },
6621 {
Edward Creef65b1842017-08-07 15:27:12 +01006622 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006623 .insns = {
6624 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6627 BPF_LD_MAP_FD(BPF_REG_1, 0),
6628 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6629 BPF_FUNC_map_lookup_elem),
6630 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6631 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6632 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6633 BPF_MOV64_IMM(BPF_REG_2, -1),
6634 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6635 /* Dead branch. */
6636 BPF_MOV64_IMM(BPF_REG_0, 0),
6637 BPF_EXIT_INSN(),
6638 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6639 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6640 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6641 BPF_MOV64_IMM(BPF_REG_0, 0),
6642 BPF_EXIT_INSN(),
6643 },
6644 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006645 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006646 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006647 },
6648 {
Edward Creef65b1842017-08-07 15:27:12 +01006649 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02006650 .insns = {
6651 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6652 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6653 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6654 BPF_LD_MAP_FD(BPF_REG_1, 0),
6655 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6656 BPF_FUNC_map_lookup_elem),
6657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6658 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6659 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6660 BPF_MOV64_IMM(BPF_REG_2, -6),
6661 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6662 BPF_MOV64_IMM(BPF_REG_0, 0),
6663 BPF_EXIT_INSN(),
6664 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6665 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6666 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6667 BPF_MOV64_IMM(BPF_REG_0, 0),
6668 BPF_EXIT_INSN(),
6669 },
6670 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006671 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006672 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006673 },
6674 {
Edward Creef65b1842017-08-07 15:27:12 +01006675 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02006676 .insns = {
6677 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6680 BPF_LD_MAP_FD(BPF_REG_1, 0),
6681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6682 BPF_FUNC_map_lookup_elem),
6683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6684 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6685 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6686 BPF_MOV64_IMM(BPF_REG_2, 2),
6687 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6688 BPF_MOV64_IMM(BPF_REG_7, 1),
6689 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6690 BPF_MOV64_IMM(BPF_REG_0, 0),
6691 BPF_EXIT_INSN(),
6692 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6693 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6694 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6695 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6696 BPF_MOV64_IMM(BPF_REG_0, 0),
6697 BPF_EXIT_INSN(),
6698 },
6699 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006700 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006701 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006702 },
6703 {
Edward Creef65b1842017-08-07 15:27:12 +01006704 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02006705 .insns = {
6706 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6707 offsetof(struct __sk_buff, mark)),
6708 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6711 BPF_LD_MAP_FD(BPF_REG_1, 0),
6712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6713 BPF_FUNC_map_lookup_elem),
6714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6715 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6716 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6717 BPF_MOV64_IMM(BPF_REG_2, -1),
6718 BPF_MOV64_IMM(BPF_REG_8, 2),
6719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6720 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6721 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6722 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6723 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6724 BPF_MOV64_IMM(BPF_REG_0, 0),
6725 BPF_EXIT_INSN(),
6726 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6727 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6728 },
6729 .fixup_map1 = { 4 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006730 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006731 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006732 },
6733 {
Edward Creef65b1842017-08-07 15:27:12 +01006734 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02006735 .insns = {
6736 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6737 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6739 BPF_LD_MAP_FD(BPF_REG_1, 0),
6740 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6741 BPF_FUNC_map_lookup_elem),
6742 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6743 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6744 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6745 BPF_MOV64_IMM(BPF_REG_2, -6),
6746 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6747 BPF_MOV64_IMM(BPF_REG_0, 0),
6748 BPF_EXIT_INSN(),
6749 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6750 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6751 BPF_MOV64_IMM(BPF_REG_0, 0),
6752 BPF_EXIT_INSN(),
6753 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6754 BPF_MOV64_IMM(BPF_REG_0, 0),
6755 BPF_EXIT_INSN(),
6756 },
6757 .fixup_map1 = { 3 },
Jann Horn2255f8d2017-12-18 20:12:01 -08006758 .errstr = "unbounded min value",
Daniel Borkmann86412502017-07-21 00:00:25 +02006759 .result = REJECT,
6760 .result_unpriv = REJECT,
6761 },
Edward Cree545722c2017-07-21 14:36:57 +01006762 {
Edward Creef65b1842017-08-07 15:27:12 +01006763 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01006764 .insns = {
6765 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6768 BPF_LD_MAP_FD(BPF_REG_1, 0),
6769 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6770 BPF_FUNC_map_lookup_elem),
6771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6772 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6773 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6774 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6775 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6776 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6777 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6778 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6779 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6780 BPF_EXIT_INSN(),
6781 BPF_MOV64_IMM(BPF_REG_0, 0),
6782 BPF_EXIT_INSN(),
6783 },
6784 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006785 .errstr = "R0 max value is outside of the array range",
6786 .result = REJECT,
6787 },
6788 {
6789 "subtraction bounds (map value) variant 2",
6790 .insns = {
6791 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6792 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6794 BPF_LD_MAP_FD(BPF_REG_1, 0),
6795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6796 BPF_FUNC_map_lookup_elem),
6797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6798 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6799 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6800 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6801 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6802 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6803 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6804 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6805 BPF_EXIT_INSN(),
6806 BPF_MOV64_IMM(BPF_REG_0, 0),
6807 BPF_EXIT_INSN(),
6808 },
6809 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01006810 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6811 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01006812 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01006813 {
Jann Horn2255f8d2017-12-18 20:12:01 -08006814 "bounds check based on zero-extended MOV",
6815 .insns = {
6816 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6819 BPF_LD_MAP_FD(BPF_REG_1, 0),
6820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6821 BPF_FUNC_map_lookup_elem),
6822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6823 /* r2 = 0x0000'0000'ffff'ffff */
6824 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6825 /* r2 = 0 */
6826 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6827 /* no-op */
6828 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6829 /* access at offset 0 */
6830 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6831 /* exit */
6832 BPF_MOV64_IMM(BPF_REG_0, 0),
6833 BPF_EXIT_INSN(),
6834 },
6835 .fixup_map1 = { 3 },
6836 .result = ACCEPT
6837 },
6838 {
6839 "bounds check based on sign-extended MOV. test1",
6840 .insns = {
6841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6844 BPF_LD_MAP_FD(BPF_REG_1, 0),
6845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6846 BPF_FUNC_map_lookup_elem),
6847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6848 /* r2 = 0xffff'ffff'ffff'ffff */
6849 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6850 /* r2 = 0xffff'ffff */
6851 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6852 /* r0 = <oob pointer> */
6853 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6854 /* access to OOB pointer */
6855 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6856 /* exit */
6857 BPF_MOV64_IMM(BPF_REG_0, 0),
6858 BPF_EXIT_INSN(),
6859 },
6860 .fixup_map1 = { 3 },
6861 .errstr = "map_value pointer and 4294967295",
6862 .result = REJECT
6863 },
6864 {
6865 "bounds check based on sign-extended MOV. test2",
6866 .insns = {
6867 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6870 BPF_LD_MAP_FD(BPF_REG_1, 0),
6871 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6872 BPF_FUNC_map_lookup_elem),
6873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6874 /* r2 = 0xffff'ffff'ffff'ffff */
6875 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6876 /* r2 = 0xfff'ffff */
6877 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6878 /* r0 = <oob pointer> */
6879 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6880 /* access to OOB pointer */
6881 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6882 /* exit */
6883 BPF_MOV64_IMM(BPF_REG_0, 0),
6884 BPF_EXIT_INSN(),
6885 },
6886 .fixup_map1 = { 3 },
6887 .errstr = "R0 min value is outside of the array range",
6888 .result = REJECT
6889 },
6890 {
6891 "bounds check based on reg_off + var_off + insn_off. test1",
6892 .insns = {
6893 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6894 offsetof(struct __sk_buff, mark)),
6895 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6898 BPF_LD_MAP_FD(BPF_REG_1, 0),
6899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6900 BPF_FUNC_map_lookup_elem),
6901 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6902 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6904 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6906 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6907 BPF_MOV64_IMM(BPF_REG_0, 0),
6908 BPF_EXIT_INSN(),
6909 },
6910 .fixup_map1 = { 4 },
6911 .errstr = "value_size=8 off=1073741825",
6912 .result = REJECT,
6913 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6914 },
6915 {
6916 "bounds check based on reg_off + var_off + insn_off. test2",
6917 .insns = {
6918 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6919 offsetof(struct __sk_buff, mark)),
6920 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6921 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6923 BPF_LD_MAP_FD(BPF_REG_1, 0),
6924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6925 BPF_FUNC_map_lookup_elem),
6926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6927 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6929 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6931 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6932 BPF_MOV64_IMM(BPF_REG_0, 0),
6933 BPF_EXIT_INSN(),
6934 },
6935 .fixup_map1 = { 4 },
6936 .errstr = "value 1073741823",
6937 .result = REJECT,
6938 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6939 },
6940 {
6941 "bounds check after truncation of non-boundary-crossing range",
6942 .insns = {
6943 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6946 BPF_LD_MAP_FD(BPF_REG_1, 0),
6947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6948 BPF_FUNC_map_lookup_elem),
6949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6950 /* r1 = [0x00, 0xff] */
6951 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6952 BPF_MOV64_IMM(BPF_REG_2, 1),
6953 /* r2 = 0x10'0000'0000 */
6954 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6955 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6956 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6957 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6959 /* r1 = [0x00, 0xff] */
6960 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6961 /* r1 = 0 */
6962 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6963 /* no-op */
6964 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6965 /* access at offset 0 */
6966 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6967 /* exit */
6968 BPF_MOV64_IMM(BPF_REG_0, 0),
6969 BPF_EXIT_INSN(),
6970 },
6971 .fixup_map1 = { 3 },
6972 .result = ACCEPT
6973 },
6974 {
6975 "bounds check after truncation of boundary-crossing range (1)",
6976 .insns = {
6977 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6978 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6979 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6980 BPF_LD_MAP_FD(BPF_REG_1, 0),
6981 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6982 BPF_FUNC_map_lookup_elem),
6983 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6984 /* r1 = [0x00, 0xff] */
6985 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6986 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6987 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6989 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6990 * [0x0000'0000, 0x0000'007f]
6991 */
6992 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6993 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6994 /* r1 = [0x00, 0xff] or
6995 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6996 */
6997 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6998 /* r1 = 0 or
6999 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7000 */
7001 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7002 /* no-op or OOB pointer computation */
7003 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7004 /* potentially OOB access */
7005 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7006 /* exit */
7007 BPF_MOV64_IMM(BPF_REG_0, 0),
7008 BPF_EXIT_INSN(),
7009 },
7010 .fixup_map1 = { 3 },
7011 /* not actually fully unbounded, but the bound is very high */
7012 .errstr = "R0 unbounded memory access",
7013 .result = REJECT
7014 },
7015 {
7016 "bounds check after truncation of boundary-crossing range (2)",
7017 .insns = {
7018 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7019 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7020 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7021 BPF_LD_MAP_FD(BPF_REG_1, 0),
7022 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7023 BPF_FUNC_map_lookup_elem),
7024 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7025 /* r1 = [0x00, 0xff] */
7026 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7028 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
7029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
7030 /* r1 = [0xffff'ff80, 0xffff'ffff] or
7031 * [0x0000'0000, 0x0000'007f]
7032 * difference to previous test: truncation via MOV32
7033 * instead of ALU32.
7034 */
7035 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
7036 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7037 /* r1 = [0x00, 0xff] or
7038 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
7039 */
7040 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
7041 /* r1 = 0 or
7042 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
7043 */
7044 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7045 /* no-op or OOB pointer computation */
7046 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7047 /* potentially OOB access */
7048 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7049 /* exit */
7050 BPF_MOV64_IMM(BPF_REG_0, 0),
7051 BPF_EXIT_INSN(),
7052 },
7053 .fixup_map1 = { 3 },
7054 /* not actually fully unbounded, but the bound is very high */
7055 .errstr = "R0 unbounded memory access",
7056 .result = REJECT
7057 },
7058 {
7059 "bounds check after wrapping 32-bit addition",
7060 .insns = {
7061 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7062 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7064 BPF_LD_MAP_FD(BPF_REG_1, 0),
7065 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7066 BPF_FUNC_map_lookup_elem),
7067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7068 /* r1 = 0x7fff'ffff */
7069 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7070 /* r1 = 0xffff'fffe */
7071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7072 /* r1 = 0 */
7073 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7074 /* no-op */
7075 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7076 /* access at offset 0 */
7077 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7078 /* exit */
7079 BPF_MOV64_IMM(BPF_REG_0, 0),
7080 BPF_EXIT_INSN(),
7081 },
7082 .fixup_map1 = { 3 },
7083 .result = ACCEPT
7084 },
7085 {
7086 "bounds check after shift with oversized count operand",
7087 .insns = {
7088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7091 BPF_LD_MAP_FD(BPF_REG_1, 0),
7092 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7093 BPF_FUNC_map_lookup_elem),
7094 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7095 BPF_MOV64_IMM(BPF_REG_2, 32),
7096 BPF_MOV64_IMM(BPF_REG_1, 1),
7097 /* r1 = (u32)1 << (u32)32 = ? */
7098 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7099 /* r1 = [0x0000, 0xffff] */
7100 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7101 /* computes unknown pointer, potentially OOB */
7102 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7103 /* potentially OOB access */
7104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7105 /* exit */
7106 BPF_MOV64_IMM(BPF_REG_0, 0),
7107 BPF_EXIT_INSN(),
7108 },
7109 .fixup_map1 = { 3 },
7110 .errstr = "R0 max value is outside of the array range",
7111 .result = REJECT
7112 },
7113 {
7114 "bounds check after right shift of maybe-negative number",
7115 .insns = {
7116 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7117 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7118 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7119 BPF_LD_MAP_FD(BPF_REG_1, 0),
7120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7121 BPF_FUNC_map_lookup_elem),
7122 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7123 /* r1 = [0x00, 0xff] */
7124 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7125 /* r1 = [-0x01, 0xfe] */
7126 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7127 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7128 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7129 /* r1 = 0 or 0xffff'ffff'ffff */
7130 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7131 /* computes unknown pointer, potentially OOB */
7132 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7133 /* potentially OOB access */
7134 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7135 /* exit */
7136 BPF_MOV64_IMM(BPF_REG_0, 0),
7137 BPF_EXIT_INSN(),
7138 },
7139 .fixup_map1 = { 3 },
7140 .errstr = "R0 unbounded memory access",
7141 .result = REJECT
7142 },
7143 {
7144 "bounds check map access with off+size signed 32bit overflow. test1",
7145 .insns = {
7146 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7147 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7148 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7149 BPF_LD_MAP_FD(BPF_REG_1, 0),
7150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7151 BPF_FUNC_map_lookup_elem),
7152 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7153 BPF_EXIT_INSN(),
7154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7155 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7156 BPF_JMP_A(0),
7157 BPF_EXIT_INSN(),
7158 },
7159 .fixup_map1 = { 3 },
7160 .errstr = "map_value pointer and 2147483646",
7161 .result = REJECT
7162 },
7163 {
7164 "bounds check map access with off+size signed 32bit overflow. test2",
7165 .insns = {
7166 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7167 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7169 BPF_LD_MAP_FD(BPF_REG_1, 0),
7170 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7171 BPF_FUNC_map_lookup_elem),
7172 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7173 BPF_EXIT_INSN(),
7174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7177 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7178 BPF_JMP_A(0),
7179 BPF_EXIT_INSN(),
7180 },
7181 .fixup_map1 = { 3 },
7182 .errstr = "pointer offset 1073741822",
7183 .result = REJECT
7184 },
7185 {
7186 "bounds check map access with off+size signed 32bit overflow. test3",
7187 .insns = {
7188 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7189 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7191 BPF_LD_MAP_FD(BPF_REG_1, 0),
7192 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7193 BPF_FUNC_map_lookup_elem),
7194 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7195 BPF_EXIT_INSN(),
7196 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7197 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7198 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7199 BPF_JMP_A(0),
7200 BPF_EXIT_INSN(),
7201 },
7202 .fixup_map1 = { 3 },
7203 .errstr = "pointer offset -1073741822",
7204 .result = REJECT
7205 },
7206 {
7207 "bounds check map access with off+size signed 32bit overflow. test4",
7208 .insns = {
7209 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7212 BPF_LD_MAP_FD(BPF_REG_1, 0),
7213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7214 BPF_FUNC_map_lookup_elem),
7215 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7216 BPF_EXIT_INSN(),
7217 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7218 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7219 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7221 BPF_JMP_A(0),
7222 BPF_EXIT_INSN(),
7223 },
7224 .fixup_map1 = { 3 },
7225 .errstr = "map_value pointer and 1000000000000",
7226 .result = REJECT
7227 },
7228 {
7229 "pointer/scalar confusion in state equality check (way 1)",
7230 .insns = {
7231 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7234 BPF_LD_MAP_FD(BPF_REG_1, 0),
7235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7236 BPF_FUNC_map_lookup_elem),
7237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7238 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7239 BPF_JMP_A(1),
7240 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7241 BPF_JMP_A(0),
7242 BPF_EXIT_INSN(),
7243 },
7244 .fixup_map1 = { 3 },
7245 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007246 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007247 .result_unpriv = REJECT,
7248 .errstr_unpriv = "R0 leaks addr as return value"
7249 },
7250 {
7251 "pointer/scalar confusion in state equality check (way 2)",
7252 .insns = {
7253 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7254 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7255 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7256 BPF_LD_MAP_FD(BPF_REG_1, 0),
7257 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7258 BPF_FUNC_map_lookup_elem),
7259 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7260 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7261 BPF_JMP_A(1),
7262 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7263 BPF_EXIT_INSN(),
7264 },
7265 .fixup_map1 = { 3 },
7266 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007267 .retval = POINTER_VALUE,
Jann Horn2255f8d2017-12-18 20:12:01 -08007268 .result_unpriv = REJECT,
7269 .errstr_unpriv = "R0 leaks addr as return value"
7270 },
7271 {
Edward Cree69c4e8a2017-08-07 15:29:51 +01007272 "variable-offset ctx access",
7273 .insns = {
7274 /* Get an unknown value */
7275 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7276 /* Make it small and 4-byte aligned */
7277 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7278 /* add it to skb. We now have either &skb->len or
7279 * &skb->pkt_type, but we don't know which
7280 */
7281 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7282 /* dereference it */
7283 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7284 BPF_EXIT_INSN(),
7285 },
7286 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7287 .result = REJECT,
7288 .prog_type = BPF_PROG_TYPE_LWT_IN,
7289 },
7290 {
7291 "variable-offset stack access",
7292 .insns = {
7293 /* Fill the top 8 bytes of the stack */
7294 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7295 /* Get an unknown value */
7296 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7297 /* Make it small and 4-byte aligned */
7298 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7299 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7300 /* add it to fp. We now have either fp-4 or fp-8, but
7301 * we don't know which
7302 */
7303 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7304 /* dereference it */
7305 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7306 BPF_EXIT_INSN(),
7307 },
7308 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7309 .result = REJECT,
7310 .prog_type = BPF_PROG_TYPE_LWT_IN,
7311 },
Edward Creed893dc22017-08-23 15:09:46 +01007312 {
Jann Horn2255f8d2017-12-18 20:12:01 -08007313 "indirect variable-offset stack access",
7314 .insns = {
7315 /* Fill the top 8 bytes of the stack */
7316 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7317 /* Get an unknown value */
7318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7319 /* Make it small and 4-byte aligned */
7320 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7321 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7322 /* add it to fp. We now have either fp-4 or fp-8, but
7323 * we don't know which
7324 */
7325 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7326 /* dereference it indirectly */
7327 BPF_LD_MAP_FD(BPF_REG_1, 0),
7328 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7329 BPF_FUNC_map_lookup_elem),
7330 BPF_MOV64_IMM(BPF_REG_0, 0),
7331 BPF_EXIT_INSN(),
7332 },
7333 .fixup_map1 = { 5 },
7334 .errstr = "variable stack read R2",
7335 .result = REJECT,
7336 .prog_type = BPF_PROG_TYPE_LWT_IN,
7337 },
7338 {
7339 "direct stack access with 32-bit wraparound. test1",
7340 .insns = {
7341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7343 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7344 BPF_MOV32_IMM(BPF_REG_0, 0),
7345 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7346 BPF_EXIT_INSN()
7347 },
7348 .errstr = "fp pointer and 2147483647",
7349 .result = REJECT
7350 },
7351 {
7352 "direct stack access with 32-bit wraparound. test2",
7353 .insns = {
7354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7357 BPF_MOV32_IMM(BPF_REG_0, 0),
7358 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7359 BPF_EXIT_INSN()
7360 },
7361 .errstr = "fp pointer and 1073741823",
7362 .result = REJECT
7363 },
7364 {
7365 "direct stack access with 32-bit wraparound. test3",
7366 .insns = {
7367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7370 BPF_MOV32_IMM(BPF_REG_0, 0),
7371 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7372 BPF_EXIT_INSN()
7373 },
7374 .errstr = "fp pointer offset 1073741822",
7375 .result = REJECT
7376 },
7377 {
Edward Creed893dc22017-08-23 15:09:46 +01007378 "liveness pruning and write screening",
7379 .insns = {
7380 /* Get an unknown value */
7381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7382 /* branch conditions teach us nothing about R2 */
7383 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7384 BPF_MOV64_IMM(BPF_REG_0, 0),
7385 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7386 BPF_MOV64_IMM(BPF_REG_0, 0),
7387 BPF_EXIT_INSN(),
7388 },
7389 .errstr = "R0 !read_ok",
7390 .result = REJECT,
7391 .prog_type = BPF_PROG_TYPE_LWT_IN,
7392 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01007393 {
7394 "varlen_map_value_access pruning",
7395 .insns = {
7396 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7399 BPF_LD_MAP_FD(BPF_REG_1, 0),
7400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7401 BPF_FUNC_map_lookup_elem),
7402 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7403 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7404 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7405 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7406 BPF_MOV32_IMM(BPF_REG_1, 0),
7407 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7408 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7409 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7410 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7411 offsetof(struct test_val, foo)),
7412 BPF_EXIT_INSN(),
7413 },
7414 .fixup_map2 = { 3 },
7415 .errstr_unpriv = "R0 leaks addr",
7416 .errstr = "R0 unbounded memory access",
7417 .result_unpriv = REJECT,
7418 .result = REJECT,
7419 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7420 },
Edward Creee67b8a62017-09-15 14:37:38 +01007421 {
7422 "invalid 64-bit BPF_END",
7423 .insns = {
7424 BPF_MOV32_IMM(BPF_REG_0, 0),
7425 {
7426 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7427 .dst_reg = BPF_REG_0,
7428 .src_reg = 0,
7429 .off = 0,
7430 .imm = 32,
7431 },
7432 BPF_EXIT_INSN(),
7433 },
7434 .errstr = "BPF_END uses reserved fields",
7435 .result = REJECT,
7436 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02007437 {
7438 "meta access, test1",
7439 .insns = {
7440 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7441 offsetof(struct xdp_md, data_meta)),
7442 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7443 offsetof(struct xdp_md, data)),
7444 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7446 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7448 BPF_MOV64_IMM(BPF_REG_0, 0),
7449 BPF_EXIT_INSN(),
7450 },
7451 .result = ACCEPT,
7452 .prog_type = BPF_PROG_TYPE_XDP,
7453 },
7454 {
7455 "meta access, test2",
7456 .insns = {
7457 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7458 offsetof(struct xdp_md, data_meta)),
7459 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7460 offsetof(struct xdp_md, data)),
7461 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7462 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7463 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7465 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7466 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7467 BPF_MOV64_IMM(BPF_REG_0, 0),
7468 BPF_EXIT_INSN(),
7469 },
7470 .result = REJECT,
7471 .errstr = "invalid access to packet, off=-8",
7472 .prog_type = BPF_PROG_TYPE_XDP,
7473 },
7474 {
7475 "meta access, test3",
7476 .insns = {
7477 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7478 offsetof(struct xdp_md, data_meta)),
7479 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7480 offsetof(struct xdp_md, data_end)),
7481 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7483 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7484 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7485 BPF_MOV64_IMM(BPF_REG_0, 0),
7486 BPF_EXIT_INSN(),
7487 },
7488 .result = REJECT,
7489 .errstr = "invalid access to packet",
7490 .prog_type = BPF_PROG_TYPE_XDP,
7491 },
7492 {
7493 "meta access, test4",
7494 .insns = {
7495 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7496 offsetof(struct xdp_md, data_meta)),
7497 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7498 offsetof(struct xdp_md, data_end)),
7499 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7500 offsetof(struct xdp_md, data)),
7501 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7503 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7504 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7505 BPF_MOV64_IMM(BPF_REG_0, 0),
7506 BPF_EXIT_INSN(),
7507 },
7508 .result = REJECT,
7509 .errstr = "invalid access to packet",
7510 .prog_type = BPF_PROG_TYPE_XDP,
7511 },
7512 {
7513 "meta access, test5",
7514 .insns = {
7515 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7516 offsetof(struct xdp_md, data_meta)),
7517 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7518 offsetof(struct xdp_md, data)),
7519 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7521 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7522 BPF_MOV64_IMM(BPF_REG_2, -8),
7523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7524 BPF_FUNC_xdp_adjust_meta),
7525 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7526 BPF_MOV64_IMM(BPF_REG_0, 0),
7527 BPF_EXIT_INSN(),
7528 },
7529 .result = REJECT,
7530 .errstr = "R3 !read_ok",
7531 .prog_type = BPF_PROG_TYPE_XDP,
7532 },
7533 {
7534 "meta access, test6",
7535 .insns = {
7536 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7537 offsetof(struct xdp_md, data_meta)),
7538 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7539 offsetof(struct xdp_md, data)),
7540 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7542 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7543 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7544 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7545 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7546 BPF_MOV64_IMM(BPF_REG_0, 0),
7547 BPF_EXIT_INSN(),
7548 },
7549 .result = REJECT,
7550 .errstr = "invalid access to packet",
7551 .prog_type = BPF_PROG_TYPE_XDP,
7552 },
7553 {
7554 "meta access, test7",
7555 .insns = {
7556 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7557 offsetof(struct xdp_md, data_meta)),
7558 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7559 offsetof(struct xdp_md, data)),
7560 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7562 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7564 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7565 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7566 BPF_MOV64_IMM(BPF_REG_0, 0),
7567 BPF_EXIT_INSN(),
7568 },
7569 .result = ACCEPT,
7570 .prog_type = BPF_PROG_TYPE_XDP,
7571 },
7572 {
7573 "meta access, test8",
7574 .insns = {
7575 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7576 offsetof(struct xdp_md, data_meta)),
7577 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7578 offsetof(struct xdp_md, data)),
7579 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7580 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7581 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7582 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7583 BPF_MOV64_IMM(BPF_REG_0, 0),
7584 BPF_EXIT_INSN(),
7585 },
7586 .result = ACCEPT,
7587 .prog_type = BPF_PROG_TYPE_XDP,
7588 },
7589 {
7590 "meta access, test9",
7591 .insns = {
7592 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7593 offsetof(struct xdp_md, data_meta)),
7594 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7595 offsetof(struct xdp_md, data)),
7596 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7599 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7600 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7601 BPF_MOV64_IMM(BPF_REG_0, 0),
7602 BPF_EXIT_INSN(),
7603 },
7604 .result = REJECT,
7605 .errstr = "invalid access to packet",
7606 .prog_type = BPF_PROG_TYPE_XDP,
7607 },
7608 {
7609 "meta access, test10",
7610 .insns = {
7611 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7612 offsetof(struct xdp_md, data_meta)),
7613 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7614 offsetof(struct xdp_md, data)),
7615 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7616 offsetof(struct xdp_md, data_end)),
7617 BPF_MOV64_IMM(BPF_REG_5, 42),
7618 BPF_MOV64_IMM(BPF_REG_6, 24),
7619 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7620 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7621 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7622 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7623 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
7624 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7625 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7626 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7627 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
7628 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
7629 BPF_MOV64_IMM(BPF_REG_0, 0),
7630 BPF_EXIT_INSN(),
7631 },
7632 .result = REJECT,
7633 .errstr = "invalid access to packet",
7634 .prog_type = BPF_PROG_TYPE_XDP,
7635 },
7636 {
7637 "meta access, test11",
7638 .insns = {
7639 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7640 offsetof(struct xdp_md, data_meta)),
7641 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7642 offsetof(struct xdp_md, data)),
7643 BPF_MOV64_IMM(BPF_REG_5, 42),
7644 BPF_MOV64_IMM(BPF_REG_6, 24),
7645 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7646 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7647 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7648 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7649 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
7650 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7651 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7652 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7653 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
7654 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
7655 BPF_MOV64_IMM(BPF_REG_0, 0),
7656 BPF_EXIT_INSN(),
7657 },
7658 .result = ACCEPT,
7659 .prog_type = BPF_PROG_TYPE_XDP,
7660 },
7661 {
7662 "meta access, test12",
7663 .insns = {
7664 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7665 offsetof(struct xdp_md, data_meta)),
7666 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7667 offsetof(struct xdp_md, data)),
7668 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7669 offsetof(struct xdp_md, data_end)),
7670 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7672 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
7673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7674 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7675 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7676 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
7677 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7678 BPF_MOV64_IMM(BPF_REG_0, 0),
7679 BPF_EXIT_INSN(),
7680 },
7681 .result = ACCEPT,
7682 .prog_type = BPF_PROG_TYPE_XDP,
7683 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07007684 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07007685 "arithmetic ops make PTR_TO_CTX unusable",
7686 .insns = {
7687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
7688 offsetof(struct __sk_buff, data) -
7689 offsetof(struct __sk_buff, mark)),
7690 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7691 offsetof(struct __sk_buff, mark)),
7692 BPF_EXIT_INSN(),
7693 },
7694 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
7695 .result = REJECT,
7696 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7697 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02007698 {
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007699 "pkt_end - pkt_start is allowed",
7700 .insns = {
7701 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7702 offsetof(struct __sk_buff, data_end)),
7703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7704 offsetof(struct __sk_buff, data)),
7705 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7706 BPF_EXIT_INSN(),
7707 },
7708 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08007709 .retval = TEST_DATA_LEN,
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007710 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7711 },
7712 {
Daniel Borkmannb37242c2017-10-21 02:34:23 +02007713 "XDP pkt read, pkt_end mangling, bad access 1",
7714 .insns = {
7715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7716 offsetof(struct xdp_md, data)),
7717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7718 offsetof(struct xdp_md, data_end)),
7719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
7722 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7723 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7724 BPF_MOV64_IMM(BPF_REG_0, 0),
7725 BPF_EXIT_INSN(),
7726 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007727 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02007728 .result = REJECT,
7729 .prog_type = BPF_PROG_TYPE_XDP,
7730 },
7731 {
7732 "XDP pkt read, pkt_end mangling, bad access 2",
7733 .insns = {
7734 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7735 offsetof(struct xdp_md, data)),
7736 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7737 offsetof(struct xdp_md, data_end)),
7738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7740 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
7741 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7742 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7743 BPF_MOV64_IMM(BPF_REG_0, 0),
7744 BPF_EXIT_INSN(),
7745 },
Alexei Starovoitov82abbf82017-12-18 20:15:20 -08007746 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
Daniel Borkmannb37242c2017-10-21 02:34:23 +02007747 .result = REJECT,
7748 .prog_type = BPF_PROG_TYPE_XDP,
7749 },
7750 {
7751 "XDP pkt read, pkt_data' > pkt_end, good access",
7752 .insns = {
7753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7754 offsetof(struct xdp_md, data)),
7755 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7756 offsetof(struct xdp_md, data_end)),
7757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7759 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7760 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7761 BPF_MOV64_IMM(BPF_REG_0, 0),
7762 BPF_EXIT_INSN(),
7763 },
7764 .result = ACCEPT,
7765 .prog_type = BPF_PROG_TYPE_XDP,
7766 },
7767 {
7768 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7769 .insns = {
7770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7771 offsetof(struct xdp_md, data)),
7772 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7773 offsetof(struct xdp_md, data_end)),
7774 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7776 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7777 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7778 BPF_MOV64_IMM(BPF_REG_0, 0),
7779 BPF_EXIT_INSN(),
7780 },
7781 .errstr = "R1 offset is outside of the packet",
7782 .result = REJECT,
7783 .prog_type = BPF_PROG_TYPE_XDP,
7784 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7785 },
7786 {
7787 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7788 .insns = {
7789 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7790 offsetof(struct xdp_md, data)),
7791 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7792 offsetof(struct xdp_md, data_end)),
7793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7795 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7796 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7797 BPF_MOV64_IMM(BPF_REG_0, 0),
7798 BPF_EXIT_INSN(),
7799 },
7800 .errstr = "R1 offset is outside of the packet",
7801 .result = REJECT,
7802 .prog_type = BPF_PROG_TYPE_XDP,
7803 },
7804 {
7805 "XDP pkt read, pkt_end > pkt_data', good access",
7806 .insns = {
7807 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7808 offsetof(struct xdp_md, data)),
7809 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7810 offsetof(struct xdp_md, data_end)),
7811 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7813 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7814 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7815 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7816 BPF_MOV64_IMM(BPF_REG_0, 0),
7817 BPF_EXIT_INSN(),
7818 },
7819 .result = ACCEPT,
7820 .prog_type = BPF_PROG_TYPE_XDP,
7821 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7822 },
7823 {
7824 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7825 .insns = {
7826 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7827 offsetof(struct xdp_md, data)),
7828 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7829 offsetof(struct xdp_md, data_end)),
7830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7832 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7833 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7834 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7835 BPF_MOV64_IMM(BPF_REG_0, 0),
7836 BPF_EXIT_INSN(),
7837 },
7838 .errstr = "R1 offset is outside of the packet",
7839 .result = REJECT,
7840 .prog_type = BPF_PROG_TYPE_XDP,
7841 },
7842 {
7843 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7844 .insns = {
7845 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7846 offsetof(struct xdp_md, data)),
7847 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7848 offsetof(struct xdp_md, data_end)),
7849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7851 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7853 BPF_MOV64_IMM(BPF_REG_0, 0),
7854 BPF_EXIT_INSN(),
7855 },
7856 .errstr = "R1 offset is outside of the packet",
7857 .result = REJECT,
7858 .prog_type = BPF_PROG_TYPE_XDP,
7859 },
7860 {
7861 "XDP pkt read, pkt_data' < pkt_end, good access",
7862 .insns = {
7863 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7864 offsetof(struct xdp_md, data)),
7865 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7866 offsetof(struct xdp_md, data_end)),
7867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7869 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7870 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7871 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7872 BPF_MOV64_IMM(BPF_REG_0, 0),
7873 BPF_EXIT_INSN(),
7874 },
7875 .result = ACCEPT,
7876 .prog_type = BPF_PROG_TYPE_XDP,
7877 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7878 },
7879 {
7880 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7881 .insns = {
7882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7883 offsetof(struct xdp_md, data)),
7884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7885 offsetof(struct xdp_md, data_end)),
7886 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7888 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7889 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7890 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7891 BPF_MOV64_IMM(BPF_REG_0, 0),
7892 BPF_EXIT_INSN(),
7893 },
7894 .errstr = "R1 offset is outside of the packet",
7895 .result = REJECT,
7896 .prog_type = BPF_PROG_TYPE_XDP,
7897 },
7898 {
7899 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7900 .insns = {
7901 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7902 offsetof(struct xdp_md, data)),
7903 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7904 offsetof(struct xdp_md, data_end)),
7905 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7907 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7908 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7909 BPF_MOV64_IMM(BPF_REG_0, 0),
7910 BPF_EXIT_INSN(),
7911 },
7912 .errstr = "R1 offset is outside of the packet",
7913 .result = REJECT,
7914 .prog_type = BPF_PROG_TYPE_XDP,
7915 },
7916 {
7917 "XDP pkt read, pkt_end < pkt_data', good access",
7918 .insns = {
7919 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7920 offsetof(struct xdp_md, data)),
7921 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7922 offsetof(struct xdp_md, data_end)),
7923 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7925 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7926 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7927 BPF_MOV64_IMM(BPF_REG_0, 0),
7928 BPF_EXIT_INSN(),
7929 },
7930 .result = ACCEPT,
7931 .prog_type = BPF_PROG_TYPE_XDP,
7932 },
7933 {
7934 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7935 .insns = {
7936 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7937 offsetof(struct xdp_md, data)),
7938 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7939 offsetof(struct xdp_md, data_end)),
7940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7942 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7943 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7944 BPF_MOV64_IMM(BPF_REG_0, 0),
7945 BPF_EXIT_INSN(),
7946 },
7947 .errstr = "R1 offset is outside of the packet",
7948 .result = REJECT,
7949 .prog_type = BPF_PROG_TYPE_XDP,
7950 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7951 },
7952 {
7953 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7954 .insns = {
7955 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7956 offsetof(struct xdp_md, data)),
7957 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7958 offsetof(struct xdp_md, data_end)),
7959 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7961 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7962 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7963 BPF_MOV64_IMM(BPF_REG_0, 0),
7964 BPF_EXIT_INSN(),
7965 },
7966 .errstr = "R1 offset is outside of the packet",
7967 .result = REJECT,
7968 .prog_type = BPF_PROG_TYPE_XDP,
7969 },
7970 {
7971 "XDP pkt read, pkt_data' >= pkt_end, good access",
7972 .insns = {
7973 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7974 offsetof(struct xdp_md, data)),
7975 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7976 offsetof(struct xdp_md, data_end)),
7977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7979 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7980 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7981 BPF_MOV64_IMM(BPF_REG_0, 0),
7982 BPF_EXIT_INSN(),
7983 },
7984 .result = ACCEPT,
7985 .prog_type = BPF_PROG_TYPE_XDP,
7986 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7987 },
7988 {
7989 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7990 .insns = {
7991 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7992 offsetof(struct xdp_md, data)),
7993 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7994 offsetof(struct xdp_md, data_end)),
7995 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7997 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7999 BPF_MOV64_IMM(BPF_REG_0, 0),
8000 BPF_EXIT_INSN(),
8001 },
8002 .errstr = "R1 offset is outside of the packet",
8003 .result = REJECT,
8004 .prog_type = BPF_PROG_TYPE_XDP,
8005 },
8006 {
8007 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
8008 .insns = {
8009 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8010 offsetof(struct xdp_md, data)),
8011 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8012 offsetof(struct xdp_md, data_end)),
8013 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8015 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8017 BPF_MOV64_IMM(BPF_REG_0, 0),
8018 BPF_EXIT_INSN(),
8019 },
8020 .errstr = "R1 offset is outside of the packet",
8021 .result = REJECT,
8022 .prog_type = BPF_PROG_TYPE_XDP,
8023 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8024 },
8025 {
8026 "XDP pkt read, pkt_end >= pkt_data', good access",
8027 .insns = {
8028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8029 offsetof(struct xdp_md, data)),
8030 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8031 offsetof(struct xdp_md, data_end)),
8032 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8034 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8035 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8036 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8037 BPF_MOV64_IMM(BPF_REG_0, 0),
8038 BPF_EXIT_INSN(),
8039 },
8040 .result = ACCEPT,
8041 .prog_type = BPF_PROG_TYPE_XDP,
8042 },
8043 {
8044 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
8045 .insns = {
8046 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8047 offsetof(struct xdp_md, data)),
8048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8049 offsetof(struct xdp_md, data_end)),
8050 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8052 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8053 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8055 BPF_MOV64_IMM(BPF_REG_0, 0),
8056 BPF_EXIT_INSN(),
8057 },
8058 .errstr = "R1 offset is outside of the packet",
8059 .result = REJECT,
8060 .prog_type = BPF_PROG_TYPE_XDP,
8061 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8062 },
8063 {
8064 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
8065 .insns = {
8066 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8067 offsetof(struct xdp_md, data)),
8068 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8069 offsetof(struct xdp_md, data_end)),
8070 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8072 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8073 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8074 BPF_MOV64_IMM(BPF_REG_0, 0),
8075 BPF_EXIT_INSN(),
8076 },
8077 .errstr = "R1 offset is outside of the packet",
8078 .result = REJECT,
8079 .prog_type = BPF_PROG_TYPE_XDP,
8080 },
8081 {
8082 "XDP pkt read, pkt_data' <= pkt_end, good access",
8083 .insns = {
8084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8085 offsetof(struct xdp_md, data)),
8086 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8087 offsetof(struct xdp_md, data_end)),
8088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8090 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8091 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8092 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8093 BPF_MOV64_IMM(BPF_REG_0, 0),
8094 BPF_EXIT_INSN(),
8095 },
8096 .result = ACCEPT,
8097 .prog_type = BPF_PROG_TYPE_XDP,
8098 },
8099 {
8100 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
8101 .insns = {
8102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8103 offsetof(struct xdp_md, data)),
8104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8105 offsetof(struct xdp_md, data_end)),
8106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8108 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8109 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8110 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8111 BPF_MOV64_IMM(BPF_REG_0, 0),
8112 BPF_EXIT_INSN(),
8113 },
8114 .errstr = "R1 offset is outside of the packet",
8115 .result = REJECT,
8116 .prog_type = BPF_PROG_TYPE_XDP,
8117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8118 },
8119 {
8120 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
8121 .insns = {
8122 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8123 offsetof(struct xdp_md, data)),
8124 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8125 offsetof(struct xdp_md, data_end)),
8126 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8128 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8129 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8130 BPF_MOV64_IMM(BPF_REG_0, 0),
8131 BPF_EXIT_INSN(),
8132 },
8133 .errstr = "R1 offset is outside of the packet",
8134 .result = REJECT,
8135 .prog_type = BPF_PROG_TYPE_XDP,
8136 },
8137 {
8138 "XDP pkt read, pkt_end <= pkt_data', good access",
8139 .insns = {
8140 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8141 offsetof(struct xdp_md, data)),
8142 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8143 offsetof(struct xdp_md, data_end)),
8144 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8146 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8147 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8148 BPF_MOV64_IMM(BPF_REG_0, 0),
8149 BPF_EXIT_INSN(),
8150 },
8151 .result = ACCEPT,
8152 .prog_type = BPF_PROG_TYPE_XDP,
8153 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8154 },
8155 {
8156 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
8157 .insns = {
8158 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8159 offsetof(struct xdp_md, data)),
8160 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8161 offsetof(struct xdp_md, data_end)),
8162 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8164 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8165 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8166 BPF_MOV64_IMM(BPF_REG_0, 0),
8167 BPF_EXIT_INSN(),
8168 },
8169 .errstr = "R1 offset is outside of the packet",
8170 .result = REJECT,
8171 .prog_type = BPF_PROG_TYPE_XDP,
8172 },
8173 {
8174 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
8175 .insns = {
8176 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8177 offsetof(struct xdp_md, data)),
8178 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8179 offsetof(struct xdp_md, data_end)),
8180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8182 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8183 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8184 BPF_MOV64_IMM(BPF_REG_0, 0),
8185 BPF_EXIT_INSN(),
8186 },
8187 .errstr = "R1 offset is outside of the packet",
8188 .result = REJECT,
8189 .prog_type = BPF_PROG_TYPE_XDP,
8190 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8191 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008192 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01008193 "XDP pkt read, pkt_meta' > pkt_data, good access",
8194 .insns = {
8195 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8196 offsetof(struct xdp_md, data_meta)),
8197 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8198 offsetof(struct xdp_md, data)),
8199 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8201 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8202 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8203 BPF_MOV64_IMM(BPF_REG_0, 0),
8204 BPF_EXIT_INSN(),
8205 },
8206 .result = ACCEPT,
8207 .prog_type = BPF_PROG_TYPE_XDP,
8208 },
8209 {
8210 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8211 .insns = {
8212 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8213 offsetof(struct xdp_md, data_meta)),
8214 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8215 offsetof(struct xdp_md, data)),
8216 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8218 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8219 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8220 BPF_MOV64_IMM(BPF_REG_0, 0),
8221 BPF_EXIT_INSN(),
8222 },
8223 .errstr = "R1 offset is outside of the packet",
8224 .result = REJECT,
8225 .prog_type = BPF_PROG_TYPE_XDP,
8226 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8227 },
8228 {
8229 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8230 .insns = {
8231 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8232 offsetof(struct xdp_md, data_meta)),
8233 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8234 offsetof(struct xdp_md, data)),
8235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8237 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8238 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8239 BPF_MOV64_IMM(BPF_REG_0, 0),
8240 BPF_EXIT_INSN(),
8241 },
8242 .errstr = "R1 offset is outside of the packet",
8243 .result = REJECT,
8244 .prog_type = BPF_PROG_TYPE_XDP,
8245 },
8246 {
8247 "XDP pkt read, pkt_data > pkt_meta', good access",
8248 .insns = {
8249 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8250 offsetof(struct xdp_md, data_meta)),
8251 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8252 offsetof(struct xdp_md, data)),
8253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8255 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8256 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8257 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8258 BPF_MOV64_IMM(BPF_REG_0, 0),
8259 BPF_EXIT_INSN(),
8260 },
8261 .result = ACCEPT,
8262 .prog_type = BPF_PROG_TYPE_XDP,
8263 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8264 },
8265 {
8266 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8267 .insns = {
8268 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8269 offsetof(struct xdp_md, data_meta)),
8270 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8271 offsetof(struct xdp_md, data)),
8272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8274 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8275 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8277 BPF_MOV64_IMM(BPF_REG_0, 0),
8278 BPF_EXIT_INSN(),
8279 },
8280 .errstr = "R1 offset is outside of the packet",
8281 .result = REJECT,
8282 .prog_type = BPF_PROG_TYPE_XDP,
8283 },
8284 {
8285 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8286 .insns = {
8287 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8288 offsetof(struct xdp_md, data_meta)),
8289 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8290 offsetof(struct xdp_md, data)),
8291 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8293 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8294 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8295 BPF_MOV64_IMM(BPF_REG_0, 0),
8296 BPF_EXIT_INSN(),
8297 },
8298 .errstr = "R1 offset is outside of the packet",
8299 .result = REJECT,
8300 .prog_type = BPF_PROG_TYPE_XDP,
8301 },
8302 {
8303 "XDP pkt read, pkt_meta' < pkt_data, good access",
8304 .insns = {
8305 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8306 offsetof(struct xdp_md, data_meta)),
8307 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8308 offsetof(struct xdp_md, data)),
8309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8311 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8312 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8313 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8314 BPF_MOV64_IMM(BPF_REG_0, 0),
8315 BPF_EXIT_INSN(),
8316 },
8317 .result = ACCEPT,
8318 .prog_type = BPF_PROG_TYPE_XDP,
8319 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8320 },
8321 {
8322 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8323 .insns = {
8324 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8325 offsetof(struct xdp_md, data_meta)),
8326 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8327 offsetof(struct xdp_md, data)),
8328 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8330 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8331 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8332 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8333 BPF_MOV64_IMM(BPF_REG_0, 0),
8334 BPF_EXIT_INSN(),
8335 },
8336 .errstr = "R1 offset is outside of the packet",
8337 .result = REJECT,
8338 .prog_type = BPF_PROG_TYPE_XDP,
8339 },
8340 {
8341 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8342 .insns = {
8343 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8344 offsetof(struct xdp_md, data_meta)),
8345 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8346 offsetof(struct xdp_md, data)),
8347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8349 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8350 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8351 BPF_MOV64_IMM(BPF_REG_0, 0),
8352 BPF_EXIT_INSN(),
8353 },
8354 .errstr = "R1 offset is outside of the packet",
8355 .result = REJECT,
8356 .prog_type = BPF_PROG_TYPE_XDP,
8357 },
8358 {
8359 "XDP pkt read, pkt_data < pkt_meta', good access",
8360 .insns = {
8361 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8362 offsetof(struct xdp_md, data_meta)),
8363 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8364 offsetof(struct xdp_md, data)),
8365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8367 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8368 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8369 BPF_MOV64_IMM(BPF_REG_0, 0),
8370 BPF_EXIT_INSN(),
8371 },
8372 .result = ACCEPT,
8373 .prog_type = BPF_PROG_TYPE_XDP,
8374 },
8375 {
8376 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8377 .insns = {
8378 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8379 offsetof(struct xdp_md, data_meta)),
8380 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8381 offsetof(struct xdp_md, data)),
8382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8384 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8386 BPF_MOV64_IMM(BPF_REG_0, 0),
8387 BPF_EXIT_INSN(),
8388 },
8389 .errstr = "R1 offset is outside of the packet",
8390 .result = REJECT,
8391 .prog_type = BPF_PROG_TYPE_XDP,
8392 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8393 },
8394 {
8395 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8396 .insns = {
8397 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8398 offsetof(struct xdp_md, data_meta)),
8399 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8400 offsetof(struct xdp_md, data)),
8401 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8403 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8404 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8405 BPF_MOV64_IMM(BPF_REG_0, 0),
8406 BPF_EXIT_INSN(),
8407 },
8408 .errstr = "R1 offset is outside of the packet",
8409 .result = REJECT,
8410 .prog_type = BPF_PROG_TYPE_XDP,
8411 },
8412 {
8413 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8414 .insns = {
8415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8416 offsetof(struct xdp_md, data_meta)),
8417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8418 offsetof(struct xdp_md, data)),
8419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8421 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8422 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8423 BPF_MOV64_IMM(BPF_REG_0, 0),
8424 BPF_EXIT_INSN(),
8425 },
8426 .result = ACCEPT,
8427 .prog_type = BPF_PROG_TYPE_XDP,
8428 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8429 },
8430 {
8431 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8432 .insns = {
8433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8434 offsetof(struct xdp_md, data_meta)),
8435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8436 offsetof(struct xdp_md, data)),
8437 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8439 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8440 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8441 BPF_MOV64_IMM(BPF_REG_0, 0),
8442 BPF_EXIT_INSN(),
8443 },
8444 .errstr = "R1 offset is outside of the packet",
8445 .result = REJECT,
8446 .prog_type = BPF_PROG_TYPE_XDP,
8447 },
8448 {
8449 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8450 .insns = {
8451 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8452 offsetof(struct xdp_md, data_meta)),
8453 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8454 offsetof(struct xdp_md, data)),
8455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8456 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8457 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8459 BPF_MOV64_IMM(BPF_REG_0, 0),
8460 BPF_EXIT_INSN(),
8461 },
8462 .errstr = "R1 offset is outside of the packet",
8463 .result = REJECT,
8464 .prog_type = BPF_PROG_TYPE_XDP,
8465 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8466 },
8467 {
8468 "XDP pkt read, pkt_data >= pkt_meta', good access",
8469 .insns = {
8470 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8471 offsetof(struct xdp_md, data_meta)),
8472 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8473 offsetof(struct xdp_md, data)),
8474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8476 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8477 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8478 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8479 BPF_MOV64_IMM(BPF_REG_0, 0),
8480 BPF_EXIT_INSN(),
8481 },
8482 .result = ACCEPT,
8483 .prog_type = BPF_PROG_TYPE_XDP,
8484 },
8485 {
8486 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8487 .insns = {
8488 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8489 offsetof(struct xdp_md, data_meta)),
8490 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8491 offsetof(struct xdp_md, data)),
8492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8494 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8495 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8496 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8497 BPF_MOV64_IMM(BPF_REG_0, 0),
8498 BPF_EXIT_INSN(),
8499 },
8500 .errstr = "R1 offset is outside of the packet",
8501 .result = REJECT,
8502 .prog_type = BPF_PROG_TYPE_XDP,
8503 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8504 },
8505 {
8506 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8507 .insns = {
8508 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8509 offsetof(struct xdp_md, data_meta)),
8510 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8511 offsetof(struct xdp_md, data)),
8512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8514 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8515 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8516 BPF_MOV64_IMM(BPF_REG_0, 0),
8517 BPF_EXIT_INSN(),
8518 },
8519 .errstr = "R1 offset is outside of the packet",
8520 .result = REJECT,
8521 .prog_type = BPF_PROG_TYPE_XDP,
8522 },
8523 {
8524 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8525 .insns = {
8526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8527 offsetof(struct xdp_md, data_meta)),
8528 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8529 offsetof(struct xdp_md, data)),
8530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8532 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8533 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8534 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8535 BPF_MOV64_IMM(BPF_REG_0, 0),
8536 BPF_EXIT_INSN(),
8537 },
8538 .result = ACCEPT,
8539 .prog_type = BPF_PROG_TYPE_XDP,
8540 },
8541 {
8542 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8543 .insns = {
8544 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8545 offsetof(struct xdp_md, data_meta)),
8546 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8547 offsetof(struct xdp_md, data)),
8548 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8550 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8551 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8553 BPF_MOV64_IMM(BPF_REG_0, 0),
8554 BPF_EXIT_INSN(),
8555 },
8556 .errstr = "R1 offset is outside of the packet",
8557 .result = REJECT,
8558 .prog_type = BPF_PROG_TYPE_XDP,
8559 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8560 },
8561 {
8562 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8563 .insns = {
8564 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8565 offsetof(struct xdp_md, data_meta)),
8566 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8567 offsetof(struct xdp_md, data)),
8568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8570 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8572 BPF_MOV64_IMM(BPF_REG_0, 0),
8573 BPF_EXIT_INSN(),
8574 },
8575 .errstr = "R1 offset is outside of the packet",
8576 .result = REJECT,
8577 .prog_type = BPF_PROG_TYPE_XDP,
8578 },
8579 {
8580 "XDP pkt read, pkt_data <= pkt_meta', good access",
8581 .insns = {
8582 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8583 offsetof(struct xdp_md, data_meta)),
8584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8585 offsetof(struct xdp_md, data)),
8586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8588 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8589 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8590 BPF_MOV64_IMM(BPF_REG_0, 0),
8591 BPF_EXIT_INSN(),
8592 },
8593 .result = ACCEPT,
8594 .prog_type = BPF_PROG_TYPE_XDP,
8595 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8596 },
8597 {
8598 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8599 .insns = {
8600 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8601 offsetof(struct xdp_md, data_meta)),
8602 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8603 offsetof(struct xdp_md, data)),
8604 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8606 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8607 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8608 BPF_MOV64_IMM(BPF_REG_0, 0),
8609 BPF_EXIT_INSN(),
8610 },
8611 .errstr = "R1 offset is outside of the packet",
8612 .result = REJECT,
8613 .prog_type = BPF_PROG_TYPE_XDP,
8614 },
8615 {
8616 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8617 .insns = {
8618 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8619 offsetof(struct xdp_md, data_meta)),
8620 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8621 offsetof(struct xdp_md, data)),
8622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8624 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8625 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8626 BPF_MOV64_IMM(BPF_REG_0, 0),
8627 BPF_EXIT_INSN(),
8628 },
8629 .errstr = "R1 offset is outside of the packet",
8630 .result = REJECT,
8631 .prog_type = BPF_PROG_TYPE_XDP,
8632 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8633 },
8634 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008635 "bpf_exit with invalid return code. test1",
8636 .insns = {
8637 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8638 BPF_EXIT_INSN(),
8639 },
8640 .errstr = "R0 has value (0x0; 0xffffffff)",
8641 .result = REJECT,
8642 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8643 },
8644 {
8645 "bpf_exit with invalid return code. test2",
8646 .insns = {
8647 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8648 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
8649 BPF_EXIT_INSN(),
8650 },
8651 .result = ACCEPT,
8652 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8653 },
8654 {
8655 "bpf_exit with invalid return code. test3",
8656 .insns = {
8657 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8658 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
8659 BPF_EXIT_INSN(),
8660 },
8661 .errstr = "R0 has value (0x0; 0x3)",
8662 .result = REJECT,
8663 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8664 },
8665 {
8666 "bpf_exit with invalid return code. test4",
8667 .insns = {
8668 BPF_MOV64_IMM(BPF_REG_0, 1),
8669 BPF_EXIT_INSN(),
8670 },
8671 .result = ACCEPT,
8672 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8673 },
8674 {
8675 "bpf_exit with invalid return code. test5",
8676 .insns = {
8677 BPF_MOV64_IMM(BPF_REG_0, 2),
8678 BPF_EXIT_INSN(),
8679 },
8680 .errstr = "R0 has value (0x2; 0x0)",
8681 .result = REJECT,
8682 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8683 },
8684 {
8685 "bpf_exit with invalid return code. test6",
8686 .insns = {
8687 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8688 BPF_EXIT_INSN(),
8689 },
8690 .errstr = "R0 is not a known value (ctx)",
8691 .result = REJECT,
8692 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8693 },
8694 {
8695 "bpf_exit with invalid return code. test7",
8696 .insns = {
8697 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8698 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
8699 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
8700 BPF_EXIT_INSN(),
8701 },
8702 .errstr = "R0 has unknown scalar value",
8703 .result = REJECT,
8704 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8705 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008706 {
8707 "calls: basic sanity",
8708 .insns = {
8709 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8710 BPF_MOV64_IMM(BPF_REG_0, 1),
8711 BPF_EXIT_INSN(),
8712 BPF_MOV64_IMM(BPF_REG_0, 2),
8713 BPF_EXIT_INSN(),
8714 },
8715 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8716 .result = ACCEPT,
8717 },
8718 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008719 "calls: not on unpriviledged",
8720 .insns = {
8721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8722 BPF_MOV64_IMM(BPF_REG_0, 1),
8723 BPF_EXIT_INSN(),
8724 BPF_MOV64_IMM(BPF_REG_0, 2),
8725 BPF_EXIT_INSN(),
8726 },
8727 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
8728 .result_unpriv = REJECT,
8729 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008730 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008731 },
8732 {
8733 "calls: overlapping caller/callee",
8734 .insns = {
8735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
8736 BPF_MOV64_IMM(BPF_REG_0, 1),
8737 BPF_EXIT_INSN(),
8738 },
8739 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8740 .errstr = "last insn is not an exit or jmp",
8741 .result = REJECT,
8742 },
8743 {
8744 "calls: wrong recursive calls",
8745 .insns = {
8746 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8747 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8748 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8749 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8750 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8751 BPF_MOV64_IMM(BPF_REG_0, 1),
8752 BPF_EXIT_INSN(),
8753 },
8754 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8755 .errstr = "jump out of range",
8756 .result = REJECT,
8757 },
8758 {
8759 "calls: wrong src reg",
8760 .insns = {
8761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
8762 BPF_MOV64_IMM(BPF_REG_0, 1),
8763 BPF_EXIT_INSN(),
8764 },
8765 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8766 .errstr = "BPF_CALL uses reserved fields",
8767 .result = REJECT,
8768 },
8769 {
8770 "calls: wrong off value",
8771 .insns = {
8772 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
8773 BPF_MOV64_IMM(BPF_REG_0, 1),
8774 BPF_EXIT_INSN(),
8775 BPF_MOV64_IMM(BPF_REG_0, 2),
8776 BPF_EXIT_INSN(),
8777 },
8778 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8779 .errstr = "BPF_CALL uses reserved fields",
8780 .result = REJECT,
8781 },
8782 {
8783 "calls: jump back loop",
8784 .insns = {
8785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
8786 BPF_MOV64_IMM(BPF_REG_0, 1),
8787 BPF_EXIT_INSN(),
8788 },
8789 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8790 .errstr = "back-edge from insn 0 to 0",
8791 .result = REJECT,
8792 },
8793 {
8794 "calls: conditional call",
8795 .insns = {
8796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8797 offsetof(struct __sk_buff, mark)),
8798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8799 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8800 BPF_MOV64_IMM(BPF_REG_0, 1),
8801 BPF_EXIT_INSN(),
8802 BPF_MOV64_IMM(BPF_REG_0, 2),
8803 BPF_EXIT_INSN(),
8804 },
8805 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8806 .errstr = "jump out of range",
8807 .result = REJECT,
8808 },
8809 {
8810 "calls: conditional call 2",
8811 .insns = {
8812 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8813 offsetof(struct __sk_buff, mark)),
8814 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8815 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8816 BPF_MOV64_IMM(BPF_REG_0, 1),
8817 BPF_EXIT_INSN(),
8818 BPF_MOV64_IMM(BPF_REG_0, 2),
8819 BPF_EXIT_INSN(),
8820 BPF_MOV64_IMM(BPF_REG_0, 3),
8821 BPF_EXIT_INSN(),
8822 },
8823 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8824 .result = ACCEPT,
8825 },
8826 {
8827 "calls: conditional call 3",
8828 .insns = {
8829 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8830 offsetof(struct __sk_buff, mark)),
8831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8832 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8833 BPF_MOV64_IMM(BPF_REG_0, 1),
8834 BPF_EXIT_INSN(),
8835 BPF_MOV64_IMM(BPF_REG_0, 1),
8836 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8837 BPF_MOV64_IMM(BPF_REG_0, 3),
8838 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8839 },
8840 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8841 .errstr = "back-edge from insn",
8842 .result = REJECT,
8843 },
8844 {
8845 "calls: conditional call 4",
8846 .insns = {
8847 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8848 offsetof(struct __sk_buff, mark)),
8849 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8850 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8851 BPF_MOV64_IMM(BPF_REG_0, 1),
8852 BPF_EXIT_INSN(),
8853 BPF_MOV64_IMM(BPF_REG_0, 1),
8854 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
8855 BPF_MOV64_IMM(BPF_REG_0, 3),
8856 BPF_EXIT_INSN(),
8857 },
8858 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8859 .result = ACCEPT,
8860 },
8861 {
8862 "calls: conditional call 5",
8863 .insns = {
8864 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8865 offsetof(struct __sk_buff, mark)),
8866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8867 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8868 BPF_MOV64_IMM(BPF_REG_0, 1),
8869 BPF_EXIT_INSN(),
8870 BPF_MOV64_IMM(BPF_REG_0, 1),
8871 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8872 BPF_MOV64_IMM(BPF_REG_0, 3),
8873 BPF_EXIT_INSN(),
8874 },
8875 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8876 .errstr = "back-edge from insn",
8877 .result = REJECT,
8878 },
8879 {
8880 "calls: conditional call 6",
8881 .insns = {
8882 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
8884 BPF_EXIT_INSN(),
8885 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8886 offsetof(struct __sk_buff, mark)),
8887 BPF_EXIT_INSN(),
8888 },
8889 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8890 .errstr = "back-edge from insn",
8891 .result = REJECT,
8892 },
8893 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008894 "calls: using r0 returned by callee",
8895 .insns = {
8896 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8897 BPF_EXIT_INSN(),
8898 BPF_MOV64_IMM(BPF_REG_0, 2),
8899 BPF_EXIT_INSN(),
8900 },
8901 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8902 .result = ACCEPT,
8903 },
8904 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008905 "calls: using uninit r0 from callee",
8906 .insns = {
8907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8908 BPF_EXIT_INSN(),
8909 BPF_EXIT_INSN(),
8910 },
8911 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8912 .errstr = "!read_ok",
8913 .result = REJECT,
8914 },
8915 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008916 "calls: callee is using r1",
8917 .insns = {
8918 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8919 BPF_EXIT_INSN(),
8920 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8921 offsetof(struct __sk_buff, len)),
8922 BPF_EXIT_INSN(),
8923 },
8924 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
8925 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008926 .retval = TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008927 },
8928 {
8929 "calls: callee using args1",
8930 .insns = {
8931 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8932 BPF_EXIT_INSN(),
8933 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8934 BPF_EXIT_INSN(),
8935 },
8936 .errstr_unpriv = "allowed for root only",
8937 .result_unpriv = REJECT,
8938 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008939 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008940 },
8941 {
8942 "calls: callee using wrong args2",
8943 .insns = {
8944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8945 BPF_EXIT_INSN(),
8946 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8947 BPF_EXIT_INSN(),
8948 },
8949 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8950 .errstr = "R2 !read_ok",
8951 .result = REJECT,
8952 },
8953 {
8954 "calls: callee using two args",
8955 .insns = {
8956 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8957 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
8958 offsetof(struct __sk_buff, len)),
8959 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
8960 offsetof(struct __sk_buff, len)),
8961 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8962 BPF_EXIT_INSN(),
8963 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8964 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8965 BPF_EXIT_INSN(),
8966 },
8967 .errstr_unpriv = "allowed for root only",
8968 .result_unpriv = REJECT,
8969 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08008970 .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008971 },
8972 {
8973 "calls: callee changing pkt pointers",
8974 .insns = {
8975 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8976 offsetof(struct xdp_md, data)),
8977 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
8978 offsetof(struct xdp_md, data_end)),
8979 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
8980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
8981 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
8982 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8983 /* clear_all_pkt_pointers() has to walk all frames
8984 * to make sure that pkt pointers in the caller
8985 * are cleared when callee is calling a helper that
8986 * adjusts packet size
8987 */
8988 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
8989 BPF_MOV32_IMM(BPF_REG_0, 0),
8990 BPF_EXIT_INSN(),
8991 BPF_MOV64_IMM(BPF_REG_2, 0),
8992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8993 BPF_FUNC_xdp_adjust_head),
8994 BPF_EXIT_INSN(),
8995 },
8996 .result = REJECT,
8997 .errstr = "R6 invalid mem access 'inv'",
8998 .prog_type = BPF_PROG_TYPE_XDP,
8999 },
9000 {
9001 "calls: two calls with args",
9002 .insns = {
9003 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9004 BPF_EXIT_INSN(),
9005 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9006 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9007 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9008 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9010 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9011 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9012 BPF_EXIT_INSN(),
9013 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9014 offsetof(struct __sk_buff, len)),
9015 BPF_EXIT_INSN(),
9016 },
9017 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9018 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009019 .retval = TEST_DATA_LEN + TEST_DATA_LEN,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009020 },
9021 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009022 "calls: calls with stack arith",
9023 .insns = {
9024 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9026 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9027 BPF_EXIT_INSN(),
9028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9029 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9030 BPF_EXIT_INSN(),
9031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
9032 BPF_MOV64_IMM(BPF_REG_0, 42),
9033 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9034 BPF_EXIT_INSN(),
9035 },
9036 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9037 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009038 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009039 },
9040 {
9041 "calls: calls with misaligned stack access",
9042 .insns = {
9043 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9046 BPF_EXIT_INSN(),
9047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
9048 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9049 BPF_EXIT_INSN(),
9050 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
9051 BPF_MOV64_IMM(BPF_REG_0, 42),
9052 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
9053 BPF_EXIT_INSN(),
9054 },
9055 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9056 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
9057 .errstr = "misaligned stack access",
9058 .result = REJECT,
9059 },
9060 {
9061 "calls: calls control flow, jump test",
9062 .insns = {
9063 BPF_MOV64_IMM(BPF_REG_0, 42),
9064 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9065 BPF_MOV64_IMM(BPF_REG_0, 43),
9066 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9067 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9068 BPF_EXIT_INSN(),
9069 },
9070 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9071 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009072 .retval = 43,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009073 },
9074 {
9075 "calls: calls control flow, jump test 2",
9076 .insns = {
9077 BPF_MOV64_IMM(BPF_REG_0, 42),
9078 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9079 BPF_MOV64_IMM(BPF_REG_0, 43),
9080 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9082 BPF_EXIT_INSN(),
9083 },
9084 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9085 .errstr = "jump out of range from insn 1 to 4",
9086 .result = REJECT,
9087 },
9088 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009089 "calls: two calls with bad jump",
9090 .insns = {
9091 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9092 BPF_EXIT_INSN(),
9093 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9094 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9095 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9096 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9097 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9098 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9100 BPF_EXIT_INSN(),
9101 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9102 offsetof(struct __sk_buff, len)),
9103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
9104 BPF_EXIT_INSN(),
9105 },
9106 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9107 .errstr = "jump out of range from insn 11 to 9",
9108 .result = REJECT,
9109 },
9110 {
9111 "calls: recursive call. test1",
9112 .insns = {
9113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9114 BPF_EXIT_INSN(),
9115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
9116 BPF_EXIT_INSN(),
9117 },
9118 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9119 .errstr = "back-edge",
9120 .result = REJECT,
9121 },
9122 {
9123 "calls: recursive call. test2",
9124 .insns = {
9125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9126 BPF_EXIT_INSN(),
9127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
9128 BPF_EXIT_INSN(),
9129 },
9130 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9131 .errstr = "back-edge",
9132 .result = REJECT,
9133 },
9134 {
9135 "calls: unreachable code",
9136 .insns = {
9137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9138 BPF_EXIT_INSN(),
9139 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9140 BPF_EXIT_INSN(),
9141 BPF_MOV64_IMM(BPF_REG_0, 0),
9142 BPF_EXIT_INSN(),
9143 BPF_MOV64_IMM(BPF_REG_0, 0),
9144 BPF_EXIT_INSN(),
9145 },
9146 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9147 .errstr = "unreachable insn 6",
9148 .result = REJECT,
9149 },
9150 {
9151 "calls: invalid call",
9152 .insns = {
9153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9154 BPF_EXIT_INSN(),
9155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
9156 BPF_EXIT_INSN(),
9157 },
9158 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9159 .errstr = "invalid destination",
9160 .result = REJECT,
9161 },
9162 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009163 "calls: invalid call 2",
9164 .insns = {
9165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9166 BPF_EXIT_INSN(),
9167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
9168 BPF_EXIT_INSN(),
9169 },
9170 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9171 .errstr = "invalid destination",
9172 .result = REJECT,
9173 },
9174 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009175 "calls: jumping across function bodies. test1",
9176 .insns = {
9177 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9178 BPF_MOV64_IMM(BPF_REG_0, 0),
9179 BPF_EXIT_INSN(),
9180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
9181 BPF_EXIT_INSN(),
9182 },
9183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9184 .errstr = "jump out of range",
9185 .result = REJECT,
9186 },
9187 {
9188 "calls: jumping across function bodies. test2",
9189 .insns = {
9190 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
9191 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9192 BPF_MOV64_IMM(BPF_REG_0, 0),
9193 BPF_EXIT_INSN(),
9194 BPF_EXIT_INSN(),
9195 },
9196 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9197 .errstr = "jump out of range",
9198 .result = REJECT,
9199 },
9200 {
9201 "calls: call without exit",
9202 .insns = {
9203 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9204 BPF_EXIT_INSN(),
9205 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9206 BPF_EXIT_INSN(),
9207 BPF_MOV64_IMM(BPF_REG_0, 0),
9208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
9209 },
9210 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9211 .errstr = "not an exit",
9212 .result = REJECT,
9213 },
9214 {
9215 "calls: call into middle of ld_imm64",
9216 .insns = {
9217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9218 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9219 BPF_MOV64_IMM(BPF_REG_0, 0),
9220 BPF_EXIT_INSN(),
9221 BPF_LD_IMM64(BPF_REG_0, 0),
9222 BPF_EXIT_INSN(),
9223 },
9224 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9225 .errstr = "last insn",
9226 .result = REJECT,
9227 },
9228 {
9229 "calls: call into middle of other call",
9230 .insns = {
9231 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9232 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9233 BPF_MOV64_IMM(BPF_REG_0, 0),
9234 BPF_EXIT_INSN(),
9235 BPF_MOV64_IMM(BPF_REG_0, 0),
9236 BPF_MOV64_IMM(BPF_REG_0, 0),
9237 BPF_EXIT_INSN(),
9238 },
9239 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9240 .errstr = "last insn",
9241 .result = REJECT,
9242 },
9243 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009244 "calls: ld_abs with changing ctx data in callee",
9245 .insns = {
9246 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9247 BPF_LD_ABS(BPF_B, 0),
9248 BPF_LD_ABS(BPF_H, 0),
9249 BPF_LD_ABS(BPF_W, 0),
9250 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
9251 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9252 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
9253 BPF_LD_ABS(BPF_B, 0),
9254 BPF_LD_ABS(BPF_H, 0),
9255 BPF_LD_ABS(BPF_W, 0),
9256 BPF_EXIT_INSN(),
9257 BPF_MOV64_IMM(BPF_REG_2, 1),
9258 BPF_MOV64_IMM(BPF_REG_3, 2),
9259 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9260 BPF_FUNC_skb_vlan_push),
9261 BPF_EXIT_INSN(),
9262 },
9263 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9264 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
9265 .result = REJECT,
9266 },
9267 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009268 "calls: two calls with bad fallthrough",
9269 .insns = {
9270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9271 BPF_EXIT_INSN(),
9272 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9274 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9275 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9277 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9278 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9279 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
9280 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9281 offsetof(struct __sk_buff, len)),
9282 BPF_EXIT_INSN(),
9283 },
9284 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9285 .errstr = "not an exit",
9286 .result = REJECT,
9287 },
9288 {
9289 "calls: two calls with stack read",
9290 .insns = {
9291 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9294 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9295 BPF_EXIT_INSN(),
9296 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
9298 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
9299 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9301 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
9302 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
9303 BPF_EXIT_INSN(),
9304 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9305 BPF_EXIT_INSN(),
9306 },
9307 .prog_type = BPF_PROG_TYPE_XDP,
9308 .result = ACCEPT,
9309 },
9310 {
9311 "calls: two calls with stack write",
9312 .insns = {
9313 /* main prog */
9314 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9315 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9320 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9321 BPF_EXIT_INSN(),
9322
9323 /* subprog 1 */
9324 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9325 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9326 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
9327 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
9328 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9329 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9330 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
9331 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
9332 /* write into stack frame of main prog */
9333 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9334 BPF_EXIT_INSN(),
9335
9336 /* subprog 2 */
9337 /* read from stack frame of main prog */
9338 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9339 BPF_EXIT_INSN(),
9340 },
9341 .prog_type = BPF_PROG_TYPE_XDP,
9342 .result = ACCEPT,
9343 },
9344 {
Jann Horn6b80ad22017-12-22 19:12:35 +01009345 "calls: stack overflow using two frames (pre-call access)",
9346 .insns = {
9347 /* prog 1 */
9348 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9349 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9350 BPF_EXIT_INSN(),
9351
9352 /* prog 2 */
9353 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9354 BPF_MOV64_IMM(BPF_REG_0, 0),
9355 BPF_EXIT_INSN(),
9356 },
9357 .prog_type = BPF_PROG_TYPE_XDP,
9358 .errstr = "combined stack size",
9359 .result = REJECT,
9360 },
9361 {
9362 "calls: stack overflow using two frames (post-call access)",
9363 .insns = {
9364 /* prog 1 */
9365 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9366 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9367 BPF_EXIT_INSN(),
9368
9369 /* prog 2 */
9370 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9371 BPF_MOV64_IMM(BPF_REG_0, 0),
9372 BPF_EXIT_INSN(),
9373 },
9374 .prog_type = BPF_PROG_TYPE_XDP,
9375 .errstr = "combined stack size",
9376 .result = REJECT,
9377 },
9378 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08009379 "calls: stack depth check using three frames. test1",
9380 .insns = {
9381 /* main */
9382 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9383 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9384 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9385 BPF_MOV64_IMM(BPF_REG_0, 0),
9386 BPF_EXIT_INSN(),
9387 /* A */
9388 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9389 BPF_EXIT_INSN(),
9390 /* B */
9391 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9392 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9393 BPF_EXIT_INSN(),
9394 },
9395 .prog_type = BPF_PROG_TYPE_XDP,
9396 /* stack_main=32, stack_A=256, stack_B=64
9397 * and max(main+A, main+A+B) < 512
9398 */
9399 .result = ACCEPT,
9400 },
9401 {
9402 "calls: stack depth check using three frames. test2",
9403 .insns = {
9404 /* main */
9405 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9406 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9407 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9408 BPF_MOV64_IMM(BPF_REG_0, 0),
9409 BPF_EXIT_INSN(),
9410 /* A */
9411 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9412 BPF_EXIT_INSN(),
9413 /* B */
9414 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9415 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9416 BPF_EXIT_INSN(),
9417 },
9418 .prog_type = BPF_PROG_TYPE_XDP,
9419 /* stack_main=32, stack_A=64, stack_B=256
9420 * and max(main+A, main+A+B) < 512
9421 */
9422 .result = ACCEPT,
9423 },
9424 {
9425 "calls: stack depth check using three frames. test3",
9426 .insns = {
9427 /* main */
9428 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9429 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9430 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9431 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
9432 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
9433 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9434 BPF_MOV64_IMM(BPF_REG_0, 0),
9435 BPF_EXIT_INSN(),
9436 /* A */
9437 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
9438 BPF_EXIT_INSN(),
9439 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
9440 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9441 /* B */
9442 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
9443 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
9444 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9445 BPF_EXIT_INSN(),
9446 },
9447 .prog_type = BPF_PROG_TYPE_XDP,
9448 /* stack_main=64, stack_A=224, stack_B=256
9449 * and max(main+A, main+A+B) > 512
9450 */
9451 .errstr = "combined stack",
9452 .result = REJECT,
9453 },
9454 {
9455 "calls: stack depth check using three frames. test4",
9456 /* void main(void) {
9457 * func1(0);
9458 * func1(1);
9459 * func2(1);
9460 * }
9461 * void func1(int alloc_or_recurse) {
9462 * if (alloc_or_recurse) {
9463 * frame_pointer[-300] = 1;
9464 * } else {
9465 * func2(alloc_or_recurse);
9466 * }
9467 * }
9468 * void func2(int alloc_or_recurse) {
9469 * if (alloc_or_recurse) {
9470 * frame_pointer[-300] = 1;
9471 * }
9472 * }
9473 */
9474 .insns = {
9475 /* main */
9476 BPF_MOV64_IMM(BPF_REG_1, 0),
9477 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9478 BPF_MOV64_IMM(BPF_REG_1, 1),
9479 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9480 BPF_MOV64_IMM(BPF_REG_1, 1),
9481 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
9482 BPF_MOV64_IMM(BPF_REG_0, 0),
9483 BPF_EXIT_INSN(),
9484 /* A */
9485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
9486 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9487 BPF_EXIT_INSN(),
9488 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9489 BPF_EXIT_INSN(),
9490 /* B */
9491 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
9492 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9493 BPF_EXIT_INSN(),
9494 },
9495 .prog_type = BPF_PROG_TYPE_XDP,
9496 .result = REJECT,
9497 .errstr = "combined stack",
9498 },
9499 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -08009500 "calls: stack depth check using three frames. test5",
9501 .insns = {
9502 /* main */
9503 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
9504 BPF_EXIT_INSN(),
9505 /* A */
9506 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9507 BPF_EXIT_INSN(),
9508 /* B */
9509 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
9510 BPF_EXIT_INSN(),
9511 /* C */
9512 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
9513 BPF_EXIT_INSN(),
9514 /* D */
9515 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
9516 BPF_EXIT_INSN(),
9517 /* E */
9518 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
9519 BPF_EXIT_INSN(),
9520 /* F */
9521 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
9522 BPF_EXIT_INSN(),
9523 /* G */
9524 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
9525 BPF_EXIT_INSN(),
9526 /* H */
9527 BPF_MOV64_IMM(BPF_REG_0, 0),
9528 BPF_EXIT_INSN(),
9529 },
9530 .prog_type = BPF_PROG_TYPE_XDP,
9531 .errstr = "call stack",
9532 .result = REJECT,
9533 },
9534 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009535 "calls: spill into caller stack frame",
9536 .insns = {
9537 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9538 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9541 BPF_EXIT_INSN(),
9542 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
9543 BPF_MOV64_IMM(BPF_REG_0, 0),
9544 BPF_EXIT_INSN(),
9545 },
9546 .prog_type = BPF_PROG_TYPE_XDP,
9547 .errstr = "cannot spill",
9548 .result = REJECT,
9549 },
9550 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009551 "calls: write into caller stack frame",
9552 .insns = {
9553 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9555 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9557 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9558 BPF_EXIT_INSN(),
9559 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
9560 BPF_MOV64_IMM(BPF_REG_0, 0),
9561 BPF_EXIT_INSN(),
9562 },
9563 .prog_type = BPF_PROG_TYPE_XDP,
9564 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -08009565 .retval = 42,
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009566 },
9567 {
9568 "calls: write into callee stack frame",
9569 .insns = {
9570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9571 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
9572 BPF_EXIT_INSN(),
9573 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9574 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
9575 BPF_EXIT_INSN(),
9576 },
9577 .prog_type = BPF_PROG_TYPE_XDP,
9578 .errstr = "cannot return stack pointer",
9579 .result = REJECT,
9580 },
9581 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08009582 "calls: two calls with stack write and void return",
9583 .insns = {
9584 /* main prog */
9585 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9588 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9590 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9591 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9592 BPF_EXIT_INSN(),
9593
9594 /* subprog 1 */
9595 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9596 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9600 BPF_EXIT_INSN(),
9601
9602 /* subprog 2 */
9603 /* write into stack frame of main prog */
9604 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
9605 BPF_EXIT_INSN(), /* void return */
9606 },
9607 .prog_type = BPF_PROG_TYPE_XDP,
9608 .result = ACCEPT,
9609 },
9610 {
9611 "calls: ambiguous return value",
9612 .insns = {
9613 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9614 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9616 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9618 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9619 BPF_EXIT_INSN(),
9620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
9621 BPF_MOV64_IMM(BPF_REG_0, 0),
9622 BPF_EXIT_INSN(),
9623 },
9624 .errstr_unpriv = "allowed for root only",
9625 .result_unpriv = REJECT,
9626 .errstr = "R0 !read_ok",
9627 .result = REJECT,
9628 },
9629 {
9630 "calls: two calls that return map_value",
9631 .insns = {
9632 /* main prog */
9633 /* pass fp-16, fp-8 into a function */
9634 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9636 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9639
9640 /* fetch map_value_ptr from the stack of this function */
9641 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9643 /* write into map value */
9644 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9645 /* fetch secound map_value_ptr from the stack */
9646 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9648 /* write into map value */
9649 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9650 BPF_MOV64_IMM(BPF_REG_0, 0),
9651 BPF_EXIT_INSN(),
9652
9653 /* subprog 1 */
9654 /* call 3rd function twice */
9655 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9656 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9657 /* first time with fp-8 */
9658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9659 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9660 /* second time with fp-16 */
9661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9662 BPF_EXIT_INSN(),
9663
9664 /* subprog 2 */
9665 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9666 /* lookup from map */
9667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9670 BPF_LD_MAP_FD(BPF_REG_1, 0),
9671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9672 BPF_FUNC_map_lookup_elem),
9673 /* write map_value_ptr into stack frame of main prog */
9674 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9675 BPF_MOV64_IMM(BPF_REG_0, 0),
9676 BPF_EXIT_INSN(), /* return 0 */
9677 },
9678 .prog_type = BPF_PROG_TYPE_XDP,
9679 .fixup_map1 = { 23 },
9680 .result = ACCEPT,
9681 },
9682 {
9683 "calls: two calls that return map_value with bool condition",
9684 .insns = {
9685 /* main prog */
9686 /* pass fp-16, fp-8 into a function */
9687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9691 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9692 BPF_MOV64_IMM(BPF_REG_0, 0),
9693 BPF_EXIT_INSN(),
9694
9695 /* subprog 1 */
9696 /* call 3rd function twice */
9697 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9698 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9699 /* first time with fp-8 */
9700 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
9701 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9702 /* fetch map_value_ptr from the stack of this function */
9703 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9704 /* write into map value */
9705 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9706 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9707 /* second time with fp-16 */
9708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9709 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9710 /* fetch secound map_value_ptr from the stack */
9711 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
9712 /* write into map value */
9713 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9714 BPF_EXIT_INSN(),
9715
9716 /* subprog 2 */
9717 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9718 /* lookup from map */
9719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9722 BPF_LD_MAP_FD(BPF_REG_1, 0),
9723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9724 BPF_FUNC_map_lookup_elem),
9725 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9726 BPF_MOV64_IMM(BPF_REG_0, 0),
9727 BPF_EXIT_INSN(), /* return 0 */
9728 /* write map_value_ptr into stack frame of main prog */
9729 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9730 BPF_MOV64_IMM(BPF_REG_0, 1),
9731 BPF_EXIT_INSN(), /* return 1 */
9732 },
9733 .prog_type = BPF_PROG_TYPE_XDP,
9734 .fixup_map1 = { 23 },
9735 .result = ACCEPT,
9736 },
9737 {
9738 "calls: two calls that return map_value with incorrect bool check",
9739 .insns = {
9740 /* main prog */
9741 /* pass fp-16, fp-8 into a function */
9742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9744 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9746 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9747 BPF_MOV64_IMM(BPF_REG_0, 0),
9748 BPF_EXIT_INSN(),
9749
9750 /* subprog 1 */
9751 /* call 3rd function twice */
9752 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9753 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9754 /* first time with fp-8 */
9755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
9756 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9757 /* fetch map_value_ptr from the stack of this function */
9758 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9759 /* write into map value */
9760 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9762 /* second time with fp-16 */
9763 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9764 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9765 /* fetch secound map_value_ptr from the stack */
9766 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
9767 /* write into map value */
9768 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9769 BPF_EXIT_INSN(),
9770
9771 /* subprog 2 */
9772 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9773 /* lookup from map */
9774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9777 BPF_LD_MAP_FD(BPF_REG_1, 0),
9778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9779 BPF_FUNC_map_lookup_elem),
9780 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9781 BPF_MOV64_IMM(BPF_REG_0, 0),
9782 BPF_EXIT_INSN(), /* return 0 */
9783 /* write map_value_ptr into stack frame of main prog */
9784 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9785 BPF_MOV64_IMM(BPF_REG_0, 1),
9786 BPF_EXIT_INSN(), /* return 1 */
9787 },
9788 .prog_type = BPF_PROG_TYPE_XDP,
9789 .fixup_map1 = { 23 },
9790 .result = REJECT,
9791 .errstr = "invalid read from stack off -16+0 size 8",
9792 },
9793 {
9794 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
9795 .insns = {
9796 /* main prog */
9797 /* pass fp-16, fp-8 into a function */
9798 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9802 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9803 BPF_MOV64_IMM(BPF_REG_0, 0),
9804 BPF_EXIT_INSN(),
9805
9806 /* subprog 1 */
9807 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9808 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9809 /* 1st lookup from map */
9810 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9811 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9813 BPF_LD_MAP_FD(BPF_REG_1, 0),
9814 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9815 BPF_FUNC_map_lookup_elem),
9816 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9817 BPF_MOV64_IMM(BPF_REG_8, 0),
9818 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9819 /* write map_value_ptr into stack frame of main prog at fp-8 */
9820 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9821 BPF_MOV64_IMM(BPF_REG_8, 1),
9822
9823 /* 2nd lookup from map */
9824 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
9825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9826 BPF_LD_MAP_FD(BPF_REG_1, 0),
9827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
9828 BPF_FUNC_map_lookup_elem),
9829 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9830 BPF_MOV64_IMM(BPF_REG_9, 0),
9831 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9832 /* write map_value_ptr into stack frame of main prog at fp-16 */
9833 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9834 BPF_MOV64_IMM(BPF_REG_9, 1),
9835
9836 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
9838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9839 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9840 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
9842 BPF_EXIT_INSN(),
9843
9844 /* subprog 2 */
9845 /* if arg2 == 1 do *arg1 = 0 */
9846 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9847 /* fetch map_value_ptr from the stack of this function */
9848 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9849 /* write into map value */
9850 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9851
9852 /* if arg4 == 1 do *arg3 = 0 */
9853 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9854 /* fetch map_value_ptr from the stack of this function */
9855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9856 /* write into map value */
9857 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
9858 BPF_EXIT_INSN(),
9859 },
9860 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9861 .fixup_map1 = { 12, 22 },
9862 .result = REJECT,
9863 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
9864 },
9865 {
9866 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
9867 .insns = {
9868 /* main prog */
9869 /* pass fp-16, fp-8 into a function */
9870 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9874 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9875 BPF_MOV64_IMM(BPF_REG_0, 0),
9876 BPF_EXIT_INSN(),
9877
9878 /* subprog 1 */
9879 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9880 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9881 /* 1st lookup from map */
9882 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9883 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9884 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9885 BPF_LD_MAP_FD(BPF_REG_1, 0),
9886 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9887 BPF_FUNC_map_lookup_elem),
9888 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9889 BPF_MOV64_IMM(BPF_REG_8, 0),
9890 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9891 /* write map_value_ptr into stack frame of main prog at fp-8 */
9892 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9893 BPF_MOV64_IMM(BPF_REG_8, 1),
9894
9895 /* 2nd lookup from map */
9896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
9897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9898 BPF_LD_MAP_FD(BPF_REG_1, 0),
9899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
9900 BPF_FUNC_map_lookup_elem),
9901 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9902 BPF_MOV64_IMM(BPF_REG_9, 0),
9903 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9904 /* write map_value_ptr into stack frame of main prog at fp-16 */
9905 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9906 BPF_MOV64_IMM(BPF_REG_9, 1),
9907
9908 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9909 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
9910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9911 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9912 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9913 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
9914 BPF_EXIT_INSN(),
9915
9916 /* subprog 2 */
9917 /* if arg2 == 1 do *arg1 = 0 */
9918 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9919 /* fetch map_value_ptr from the stack of this function */
9920 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9921 /* write into map value */
9922 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9923
9924 /* if arg4 == 1 do *arg3 = 0 */
9925 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9926 /* fetch map_value_ptr from the stack of this function */
9927 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9928 /* write into map value */
9929 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9930 BPF_EXIT_INSN(),
9931 },
9932 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9933 .fixup_map1 = { 12, 22 },
9934 .result = ACCEPT,
9935 },
9936 {
9937 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
9938 .insns = {
9939 /* main prog */
9940 /* pass fp-16, fp-8 into a function */
9941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9945 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
9946 BPF_MOV64_IMM(BPF_REG_0, 0),
9947 BPF_EXIT_INSN(),
9948
9949 /* subprog 1 */
9950 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9951 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9952 /* 1st lookup from map */
9953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
9954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
9956 BPF_LD_MAP_FD(BPF_REG_1, 0),
9957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9958 BPF_FUNC_map_lookup_elem),
9959 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9960 BPF_MOV64_IMM(BPF_REG_8, 0),
9961 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9962 /* write map_value_ptr into stack frame of main prog at fp-8 */
9963 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9964 BPF_MOV64_IMM(BPF_REG_8, 1),
9965
9966 /* 2nd lookup from map */
9967 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
9969 BPF_LD_MAP_FD(BPF_REG_1, 0),
9970 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9971 BPF_FUNC_map_lookup_elem),
9972 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9973 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
9974 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9975 /* write map_value_ptr into stack frame of main prog at fp-16 */
9976 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9977 BPF_MOV64_IMM(BPF_REG_9, 1),
9978
9979 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9980 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
9981 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9982 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9983 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9984 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
9985 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
9986
9987 /* subprog 2 */
9988 /* if arg2 == 1 do *arg1 = 0 */
9989 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9990 /* fetch map_value_ptr from the stack of this function */
9991 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9992 /* write into map value */
9993 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9994
9995 /* if arg4 == 1 do *arg3 = 0 */
9996 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9997 /* fetch map_value_ptr from the stack of this function */
9998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9999 /* write into map value */
10000 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
10001 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
10002 },
10003 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10004 .fixup_map1 = { 12, 22 },
10005 .result = REJECT,
10006 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
10007 },
10008 {
10009 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
10010 .insns = {
10011 /* main prog */
10012 /* pass fp-16, fp-8 into a function */
10013 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10015 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10018 BPF_MOV64_IMM(BPF_REG_0, 0),
10019 BPF_EXIT_INSN(),
10020
10021 /* subprog 1 */
10022 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10023 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10024 /* 1st lookup from map */
10025 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10026 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10027 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10028 BPF_LD_MAP_FD(BPF_REG_1, 0),
10029 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10030 BPF_FUNC_map_lookup_elem),
10031 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10032 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10033 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10034 BPF_MOV64_IMM(BPF_REG_8, 0),
10035 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10036 BPF_MOV64_IMM(BPF_REG_8, 1),
10037
10038 /* 2nd lookup from map */
10039 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10041 BPF_LD_MAP_FD(BPF_REG_1, 0),
10042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10043 BPF_FUNC_map_lookup_elem),
10044 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10045 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10046 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10047 BPF_MOV64_IMM(BPF_REG_9, 0),
10048 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10049 BPF_MOV64_IMM(BPF_REG_9, 1),
10050
10051 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10054 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10055 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10056 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10057 BPF_EXIT_INSN(),
10058
10059 /* subprog 2 */
10060 /* if arg2 == 1 do *arg1 = 0 */
10061 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10062 /* fetch map_value_ptr from the stack of this function */
10063 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10064 /* write into map value */
10065 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10066
10067 /* if arg4 == 1 do *arg3 = 0 */
10068 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
10069 /* fetch map_value_ptr from the stack of this function */
10070 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10071 /* write into map value */
10072 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10073 BPF_EXIT_INSN(),
10074 },
10075 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10076 .fixup_map1 = { 12, 22 },
10077 .result = ACCEPT,
10078 },
10079 {
10080 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
10081 .insns = {
10082 /* main prog */
10083 /* pass fp-16, fp-8 into a function */
10084 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10086 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10088 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10089 BPF_MOV64_IMM(BPF_REG_0, 0),
10090 BPF_EXIT_INSN(),
10091
10092 /* subprog 1 */
10093 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10094 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10095 /* 1st lookup from map */
10096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10097 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10099 BPF_LD_MAP_FD(BPF_REG_1, 0),
10100 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10101 BPF_FUNC_map_lookup_elem),
10102 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10103 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10104 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10105 BPF_MOV64_IMM(BPF_REG_8, 0),
10106 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10107 BPF_MOV64_IMM(BPF_REG_8, 1),
10108
10109 /* 2nd lookup from map */
10110 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10112 BPF_LD_MAP_FD(BPF_REG_1, 0),
10113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10114 BPF_FUNC_map_lookup_elem),
10115 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
10116 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10117 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
10118 BPF_MOV64_IMM(BPF_REG_9, 0),
10119 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10120 BPF_MOV64_IMM(BPF_REG_9, 1),
10121
10122 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
10123 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
10125 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
10126 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
10127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10128 BPF_EXIT_INSN(),
10129
10130 /* subprog 2 */
10131 /* if arg2 == 1 do *arg1 = 0 */
10132 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
10133 /* fetch map_value_ptr from the stack of this function */
10134 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
10135 /* write into map value */
10136 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10137
10138 /* if arg4 == 0 do *arg3 = 0 */
10139 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
10140 /* fetch map_value_ptr from the stack of this function */
10141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
10142 /* write into map value */
10143 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
10144 BPF_EXIT_INSN(),
10145 },
10146 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10147 .fixup_map1 = { 12, 22 },
10148 .result = REJECT,
10149 .errstr = "R0 invalid mem access 'inv'",
10150 },
10151 {
10152 "calls: pkt_ptr spill into caller stack",
10153 .insns = {
10154 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10157 BPF_EXIT_INSN(),
10158
10159 /* subprog 1 */
10160 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10161 offsetof(struct __sk_buff, data)),
10162 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10163 offsetof(struct __sk_buff, data_end)),
10164 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10165 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10166 /* spill unchecked pkt_ptr into stack of caller */
10167 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10168 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10169 /* now the pkt range is verified, read pkt_ptr from stack */
10170 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10171 /* write 4 bytes into packet */
10172 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10173 BPF_EXIT_INSN(),
10174 },
10175 .result = ACCEPT,
10176 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010177 .retval = POINTER_VALUE,
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -080010178 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010179 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010180 "calls: pkt_ptr spill into caller stack 2",
10181 .insns = {
10182 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10184 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10185 /* Marking is still kept, but not in all cases safe. */
10186 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10187 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10188 BPF_EXIT_INSN(),
10189
10190 /* subprog 1 */
10191 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10192 offsetof(struct __sk_buff, data)),
10193 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10194 offsetof(struct __sk_buff, data_end)),
10195 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10197 /* spill unchecked pkt_ptr into stack of caller */
10198 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10199 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10200 /* now the pkt range is verified, read pkt_ptr from stack */
10201 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10202 /* write 4 bytes into packet */
10203 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10204 BPF_EXIT_INSN(),
10205 },
10206 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10207 .errstr = "invalid access to packet",
10208 .result = REJECT,
10209 },
10210 {
10211 "calls: pkt_ptr spill into caller stack 3",
10212 .insns = {
10213 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10215 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10216 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10217 /* Marking is still kept and safe here. */
10218 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10219 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10220 BPF_EXIT_INSN(),
10221
10222 /* subprog 1 */
10223 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10224 offsetof(struct __sk_buff, data)),
10225 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10226 offsetof(struct __sk_buff, data_end)),
10227 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10229 /* spill unchecked pkt_ptr into stack of caller */
10230 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10231 BPF_MOV64_IMM(BPF_REG_5, 0),
10232 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10233 BPF_MOV64_IMM(BPF_REG_5, 1),
10234 /* now the pkt range is verified, read pkt_ptr from stack */
10235 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
10236 /* write 4 bytes into packet */
10237 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10238 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10239 BPF_EXIT_INSN(),
10240 },
10241 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10242 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010243 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010244 },
10245 {
10246 "calls: pkt_ptr spill into caller stack 4",
10247 .insns = {
10248 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10249 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10250 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10252 /* Check marking propagated. */
10253 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10254 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
10255 BPF_EXIT_INSN(),
10256
10257 /* subprog 1 */
10258 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10259 offsetof(struct __sk_buff, data)),
10260 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10261 offsetof(struct __sk_buff, data_end)),
10262 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10264 /* spill unchecked pkt_ptr into stack of caller */
10265 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10266 BPF_MOV64_IMM(BPF_REG_5, 0),
10267 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10268 BPF_MOV64_IMM(BPF_REG_5, 1),
10269 /* don't read back pkt_ptr from stack here */
10270 /* write 4 bytes into packet */
10271 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10272 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10273 BPF_EXIT_INSN(),
10274 },
10275 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10276 .result = ACCEPT,
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010277 .retval = 1,
Daniel Borkmann28ab1732017-12-14 17:55:17 -080010278 },
10279 {
10280 "calls: pkt_ptr spill into caller stack 5",
10281 .insns = {
10282 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10284 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
10285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10286 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10287 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10288 BPF_EXIT_INSN(),
10289
10290 /* subprog 1 */
10291 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10292 offsetof(struct __sk_buff, data)),
10293 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10294 offsetof(struct __sk_buff, data_end)),
10295 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10297 BPF_MOV64_IMM(BPF_REG_5, 0),
10298 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10299 /* spill checked pkt_ptr into stack of caller */
10300 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10301 BPF_MOV64_IMM(BPF_REG_5, 1),
10302 /* don't read back pkt_ptr from stack here */
10303 /* write 4 bytes into packet */
10304 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10305 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10306 BPF_EXIT_INSN(),
10307 },
10308 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10309 .errstr = "same insn cannot be used with different",
10310 .result = REJECT,
10311 },
10312 {
10313 "calls: pkt_ptr spill into caller stack 6",
10314 .insns = {
10315 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10316 offsetof(struct __sk_buff, data_end)),
10317 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10319 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10321 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10322 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10323 BPF_EXIT_INSN(),
10324
10325 /* subprog 1 */
10326 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10327 offsetof(struct __sk_buff, data)),
10328 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10329 offsetof(struct __sk_buff, data_end)),
10330 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10332 BPF_MOV64_IMM(BPF_REG_5, 0),
10333 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10334 /* spill checked pkt_ptr into stack of caller */
10335 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10336 BPF_MOV64_IMM(BPF_REG_5, 1),
10337 /* don't read back pkt_ptr from stack here */
10338 /* write 4 bytes into packet */
10339 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10340 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10341 BPF_EXIT_INSN(),
10342 },
10343 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10344 .errstr = "R4 invalid mem access",
10345 .result = REJECT,
10346 },
10347 {
10348 "calls: pkt_ptr spill into caller stack 7",
10349 .insns = {
10350 BPF_MOV64_IMM(BPF_REG_2, 0),
10351 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10353 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10355 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10357 BPF_EXIT_INSN(),
10358
10359 /* subprog 1 */
10360 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10361 offsetof(struct __sk_buff, data)),
10362 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10363 offsetof(struct __sk_buff, data_end)),
10364 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10366 BPF_MOV64_IMM(BPF_REG_5, 0),
10367 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10368 /* spill checked pkt_ptr into stack of caller */
10369 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10370 BPF_MOV64_IMM(BPF_REG_5, 1),
10371 /* don't read back pkt_ptr from stack here */
10372 /* write 4 bytes into packet */
10373 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10374 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10375 BPF_EXIT_INSN(),
10376 },
10377 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10378 .errstr = "R4 invalid mem access",
10379 .result = REJECT,
10380 },
10381 {
10382 "calls: pkt_ptr spill into caller stack 8",
10383 .insns = {
10384 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10385 offsetof(struct __sk_buff, data)),
10386 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10387 offsetof(struct __sk_buff, data_end)),
10388 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10390 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10391 BPF_EXIT_INSN(),
10392 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10394 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10395 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10396 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10397 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10398 BPF_EXIT_INSN(),
10399
10400 /* subprog 1 */
10401 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10402 offsetof(struct __sk_buff, data)),
10403 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10404 offsetof(struct __sk_buff, data_end)),
10405 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10407 BPF_MOV64_IMM(BPF_REG_5, 0),
10408 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
10409 /* spill checked pkt_ptr into stack of caller */
10410 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10411 BPF_MOV64_IMM(BPF_REG_5, 1),
10412 /* don't read back pkt_ptr from stack here */
10413 /* write 4 bytes into packet */
10414 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10415 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10416 BPF_EXIT_INSN(),
10417 },
10418 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10419 .result = ACCEPT,
10420 },
10421 {
10422 "calls: pkt_ptr spill into caller stack 9",
10423 .insns = {
10424 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10425 offsetof(struct __sk_buff, data)),
10426 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10427 offsetof(struct __sk_buff, data_end)),
10428 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10430 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
10431 BPF_EXIT_INSN(),
10432 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
10433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
10434 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10435 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10436 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
10437 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
10438 BPF_EXIT_INSN(),
10439
10440 /* subprog 1 */
10441 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10442 offsetof(struct __sk_buff, data)),
10443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10444 offsetof(struct __sk_buff, data_end)),
10445 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
10447 BPF_MOV64_IMM(BPF_REG_5, 0),
10448 /* spill unchecked pkt_ptr into stack of caller */
10449 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
10450 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
10451 BPF_MOV64_IMM(BPF_REG_5, 1),
10452 /* don't read back pkt_ptr from stack here */
10453 /* write 4 bytes into packet */
10454 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
10455 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
10456 BPF_EXIT_INSN(),
10457 },
10458 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
10459 .errstr = "invalid access to packet",
10460 .result = REJECT,
10461 },
10462 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -080010463 "calls: caller stack init to zero or map_value_or_null",
10464 .insns = {
10465 BPF_MOV64_IMM(BPF_REG_0, 0),
10466 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
10467 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10468 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10470 /* fetch map_value_or_null or const_zero from stack */
10471 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
10472 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
10473 /* store into map_value */
10474 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
10475 BPF_EXIT_INSN(),
10476
10477 /* subprog 1 */
10478 /* if (ctx == 0) return; */
10479 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
10480 /* else bpf_map_lookup() and *(fp - 8) = r0 */
10481 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
10482 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10484 BPF_LD_MAP_FD(BPF_REG_1, 0),
10485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10487 BPF_FUNC_map_lookup_elem),
10488 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
10489 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
10490 BPF_EXIT_INSN(),
10491 },
10492 .fixup_map1 = { 13 },
10493 .result = ACCEPT,
10494 .prog_type = BPF_PROG_TYPE_XDP,
10495 },
10496 {
10497 "calls: stack init to zero and pruning",
10498 .insns = {
10499 /* first make allocated_stack 16 byte */
10500 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
10501 /* now fork the execution such that the false branch
10502 * of JGT insn will be verified second and it skisp zero
10503 * init of fp-8 stack slot. If stack liveness marking
10504 * is missing live_read marks from call map_lookup
10505 * processing then pruning will incorrectly assume
10506 * that fp-8 stack slot was unused in the fall-through
10507 * branch and will accept the program incorrectly
10508 */
10509 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
10510 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10511 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
10512 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10514 BPF_LD_MAP_FD(BPF_REG_1, 0),
10515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10516 BPF_FUNC_map_lookup_elem),
10517 BPF_EXIT_INSN(),
10518 },
10519 .fixup_map2 = { 6 },
10520 .errstr = "invalid indirect read from stack off -8+0 size 8",
10521 .result = REJECT,
10522 .prog_type = BPF_PROG_TYPE_XDP,
10523 },
Gianluca Borellofd05e572017-12-23 10:09:55 +000010524 {
10525 "search pruning: all branches should be verified (nop operation)",
10526 .insns = {
10527 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10529 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10530 BPF_LD_MAP_FD(BPF_REG_1, 0),
10531 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10532 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
10533 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10534 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10535 BPF_MOV64_IMM(BPF_REG_4, 0),
10536 BPF_JMP_A(1),
10537 BPF_MOV64_IMM(BPF_REG_4, 1),
10538 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10539 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10540 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10541 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
10542 BPF_MOV64_IMM(BPF_REG_6, 0),
10543 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
10544 BPF_EXIT_INSN(),
10545 },
10546 .fixup_map1 = { 3 },
10547 .errstr = "R6 invalid mem access 'inv'",
10548 .result = REJECT,
10549 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10550 },
10551 {
10552 "search pruning: all branches should be verified (invalid stack access)",
10553 .insns = {
10554 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10556 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10557 BPF_LD_MAP_FD(BPF_REG_1, 0),
10558 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10559 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
10560 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10561 BPF_MOV64_IMM(BPF_REG_4, 0),
10562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10563 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10564 BPF_JMP_A(1),
10565 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
10566 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10567 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10568 BPF_EXIT_INSN(),
10569 },
10570 .fixup_map1 = { 3 },
10571 .errstr = "invalid read from stack off -16+0 size 8",
10572 .result = REJECT,
10573 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10574 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010575};
10576
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010577static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010578{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010579 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010580
10581 for (len = MAX_INSNS - 1; len > 0; --len)
10582 if (fp[len].code != 0 || fp[len].imm != 0)
10583 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010584 return len + 1;
10585}
10586
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010587static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010588{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010589 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010590
Mickaël Salaünf4874d02017-02-10 00:21:43 +010010591 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010592 size_value, max_elem, BPF_F_NO_PREALLOC);
10593 if (fd < 0)
10594 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010595
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010596 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010597}
10598
10599static int create_prog_array(void)
10600{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010601 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010602
Mickaël Salaünf4874d02017-02-10 00:21:43 +010010603 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010604 sizeof(int), 4, 0);
10605 if (fd < 0)
10606 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010607
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010608 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010609}
10610
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010611static int create_map_in_map(void)
10612{
10613 int inner_map_fd, outer_map_fd;
10614
10615 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
10616 sizeof(int), 1, 0);
10617 if (inner_map_fd < 0) {
10618 printf("Failed to create array '%s'!\n", strerror(errno));
10619 return inner_map_fd;
10620 }
10621
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070010622 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010623 sizeof(int), inner_map_fd, 1, 0);
10624 if (outer_map_fd < 0)
10625 printf("Failed to create array of maps '%s'!\n",
10626 strerror(errno));
10627
10628 close(inner_map_fd);
10629
10630 return outer_map_fd;
10631}
10632
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010633static char bpf_vlog[32768];
10634
10635static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010636 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010637{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010638 int *fixup_map1 = test->fixup_map1;
10639 int *fixup_map2 = test->fixup_map2;
10640 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010641 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010642
10643 /* Allocating HTs with 1 elem is fine here, since we only test
10644 * for verifier and not do a runtime lookup, so the only thing
10645 * that really matters is value size in this case.
10646 */
10647 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010648 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010649 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010650 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010651 fixup_map1++;
10652 } while (*fixup_map1);
10653 }
10654
10655 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010656 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010657 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010658 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010659 fixup_map2++;
10660 } while (*fixup_map2);
10661 }
10662
10663 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010664 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010665 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010666 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010667 fixup_prog++;
10668 } while (*fixup_prog);
10669 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010670
10671 if (*fixup_map_in_map) {
10672 map_fds[3] = create_map_in_map();
10673 do {
10674 prog[*fixup_map_in_map].imm = map_fds[3];
10675 fixup_map_in_map++;
10676 } while (*fixup_map_in_map);
10677 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010678}
10679
10680static void do_test_single(struct bpf_test *test, bool unpriv,
10681 int *passes, int *errors)
10682{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010683 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010684 struct bpf_insn *prog = test->insns;
10685 int prog_len = probe_filter_length(prog);
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010686 char data_in[TEST_DATA_LEN] = {};
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010687 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010688 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010689 const char *expected_err;
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010690 uint32_t retval;
10691 int i, err;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010692
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010693 for (i = 0; i < MAX_NR_MAPS; i++)
10694 map_fds[i] = -1;
10695
10696 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010697
Daniel Borkmann614d0d72017-05-25 01:05:09 +020010698 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
10699 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020010700 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010701
10702 expected_ret = unpriv && test->result_unpriv != UNDEF ?
10703 test->result_unpriv : test->result;
10704 expected_err = unpriv && test->errstr_unpriv ?
10705 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010706
10707 reject_from_alignment = fd_prog < 0 &&
10708 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
10709 strstr(bpf_vlog, "Unknown alignment.");
10710#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
10711 if (reject_from_alignment) {
10712 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
10713 strerror(errno));
10714 goto fail_log;
10715 }
10716#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010717 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010718 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010719 printf("FAIL\nFailed to load prog '%s'!\n",
10720 strerror(errno));
10721 goto fail_log;
10722 }
10723 } else {
10724 if (fd_prog >= 0) {
10725 printf("FAIL\nUnexpected success to load!\n");
10726 goto fail_log;
10727 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010728 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010729 printf("FAIL\nUnexpected error message!\n");
10730 goto fail_log;
10731 }
10732 }
10733
Alexei Starovoitov111e6b42018-01-17 16:52:03 -080010734 if (fd_prog >= 0) {
10735 err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
10736 NULL, NULL, &retval, NULL);
10737 if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
10738 printf("Unexpected bpf_prog_test_run error\n");
10739 goto fail_log;
10740 }
10741 if (!err && retval != test->retval &&
10742 test->retval != POINTER_VALUE) {
10743 printf("FAIL retval %d != %d\n", retval, test->retval);
10744 goto fail_log;
10745 }
10746 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010747 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010748 printf("OK%s\n", reject_from_alignment ?
10749 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010750close_fds:
10751 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010752 for (i = 0; i < MAX_NR_MAPS; i++)
10753 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010754 sched_yield();
10755 return;
10756fail_log:
10757 (*errors)++;
10758 printf("%s", bpf_vlog);
10759 goto close_fds;
10760}
10761
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010762static bool is_admin(void)
10763{
10764 cap_t caps;
10765 cap_flag_value_t sysadmin = CAP_CLEAR;
10766 const cap_value_t cap_val = CAP_SYS_ADMIN;
10767
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080010768#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010769 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
10770 perror("cap_get_flag");
10771 return false;
10772 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080010773#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010774 caps = cap_get_proc();
10775 if (!caps) {
10776 perror("cap_get_proc");
10777 return false;
10778 }
10779 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
10780 perror("cap_get_flag");
10781 if (cap_free(caps))
10782 perror("cap_free");
10783 return (sysadmin == CAP_SET);
10784}
10785
10786static int set_admin(bool admin)
10787{
10788 cap_t caps;
10789 const cap_value_t cap_val = CAP_SYS_ADMIN;
10790 int ret = -1;
10791
10792 caps = cap_get_proc();
10793 if (!caps) {
10794 perror("cap_get_proc");
10795 return -1;
10796 }
10797 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
10798 admin ? CAP_SET : CAP_CLEAR)) {
10799 perror("cap_set_flag");
10800 goto out;
10801 }
10802 if (cap_set_proc(caps)) {
10803 perror("cap_set_proc");
10804 goto out;
10805 }
10806 ret = 0;
10807out:
10808 if (cap_free(caps))
10809 perror("cap_free");
10810 return ret;
10811}
10812
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010813static int do_test(bool unpriv, unsigned int from, unsigned int to)
10814{
10815 int i, passes = 0, errors = 0;
10816
10817 for (i = from; i < to; i++) {
10818 struct bpf_test *test = &tests[i];
10819
10820 /* Program types that are not supported by non-root we
10821 * skip right away.
10822 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010823 if (!test->prog_type) {
10824 if (!unpriv)
10825 set_admin(false);
10826 printf("#%d/u %s ", i, test->descr);
10827 do_test_single(test, true, &passes, &errors);
10828 if (!unpriv)
10829 set_admin(true);
10830 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010831
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010832 if (!unpriv) {
10833 printf("#%d/p %s ", i, test->descr);
10834 do_test_single(test, false, &passes, &errors);
10835 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010836 }
10837
10838 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020010839 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010840}
10841
10842int main(int argc, char **argv)
10843{
10844 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
10845 struct rlimit rlim = { 1 << 20, 1 << 20 };
10846 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010847 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010848
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010849 if (argc == 3) {
10850 unsigned int l = atoi(argv[argc - 2]);
10851 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010852
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010853 if (l < to && u < to) {
10854 from = l;
10855 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010856 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010857 } else if (argc == 2) {
10858 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010859
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010860 if (t < to) {
10861 from = t;
10862 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010863 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010864 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010865
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010866 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
10867 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010868}