blob: 5d0a574ce2700fc2a99919e5db497787701de1d6 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08005 * Copyright (c) 2017 Facebook
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
10 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020011
Daniel Borkmann2c460622017-08-04 22:24:41 +020012#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080013#include <asm/types.h>
14#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010015#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070016#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010017#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070020#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070021#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070022#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020023#include <sched.h>
24
Mickaël Salaünd02d8982017-02-10 00:21:37 +010025#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070026#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070027
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020028#include <linux/unistd.h>
29#include <linux/filter.h>
30#include <linux/bpf_perf_event.h>
31#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070032
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010033#include <bpf/bpf.h>
34
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020035#ifdef HAVE_GENHDR
36# include "autoconf.h"
37#else
38# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
39# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
40# endif
41#endif
42
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020043#include "../../../include/linux/filter.h"
44
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020045#ifndef ARRAY_SIZE
46# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
47#endif
48
49#define MAX_INSNS 512
50#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070051#define MAX_NR_MAPS 4
Alexei Starovoitovbf508872015-10-07 22:23:23 -070052
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020053#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020054#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020055
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070056struct bpf_test {
57 const char *descr;
58 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020059 int fixup_map1[MAX_FIXUPS];
60 int fixup_map2[MAX_FIXUPS];
61 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070062 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070063 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070064 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070065 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070066 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070067 ACCEPT,
68 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070069 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070070 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020071 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070072};
73
Josef Bacik48461132016-09-28 10:54:32 -040074/* Note we want this to be 64 bit aligned so that the end of our array is
75 * actually the end of the structure.
76 */
77#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040078
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020079struct test_val {
80 unsigned int index;
81 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040082};
83
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070084static struct bpf_test tests[] = {
85 {
86 "add+sub+mul",
87 .insns = {
88 BPF_MOV64_IMM(BPF_REG_1, 1),
89 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
90 BPF_MOV64_IMM(BPF_REG_2, 3),
91 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
92 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
93 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
94 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
95 BPF_EXIT_INSN(),
96 },
97 .result = ACCEPT,
98 },
99 {
100 "unreachable",
101 .insns = {
102 BPF_EXIT_INSN(),
103 BPF_EXIT_INSN(),
104 },
105 .errstr = "unreachable",
106 .result = REJECT,
107 },
108 {
109 "unreachable2",
110 .insns = {
111 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
112 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
113 BPF_EXIT_INSN(),
114 },
115 .errstr = "unreachable",
116 .result = REJECT,
117 },
118 {
119 "out of range jump",
120 .insns = {
121 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
122 BPF_EXIT_INSN(),
123 },
124 .errstr = "jump out of range",
125 .result = REJECT,
126 },
127 {
128 "out of range jump2",
129 .insns = {
130 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
131 BPF_EXIT_INSN(),
132 },
133 .errstr = "jump out of range",
134 .result = REJECT,
135 },
136 {
137 "test1 ld_imm64",
138 .insns = {
139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 0),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_LD_IMM64(BPF_REG_0, 1),
144 BPF_MOV64_IMM(BPF_REG_0, 2),
145 BPF_EXIT_INSN(),
146 },
147 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700148 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700149 .result = REJECT,
150 },
151 {
152 "test2 ld_imm64",
153 .insns = {
154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 0),
157 BPF_LD_IMM64(BPF_REG_0, 1),
158 BPF_LD_IMM64(BPF_REG_0, 1),
159 BPF_EXIT_INSN(),
160 },
161 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700162 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700163 .result = REJECT,
164 },
165 {
166 "test3 ld_imm64",
167 .insns = {
168 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
169 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 0),
172 BPF_LD_IMM64(BPF_REG_0, 1),
173 BPF_LD_IMM64(BPF_REG_0, 1),
174 BPF_EXIT_INSN(),
175 },
176 .errstr = "invalid bpf_ld_imm64 insn",
177 .result = REJECT,
178 },
179 {
180 "test4 ld_imm64",
181 .insns = {
182 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
183 BPF_EXIT_INSN(),
184 },
185 .errstr = "invalid bpf_ld_imm64 insn",
186 .result = REJECT,
187 },
188 {
189 "test5 ld_imm64",
190 .insns = {
191 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
192 },
193 .errstr = "invalid bpf_ld_imm64 insn",
194 .result = REJECT,
195 },
196 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200197 "test6 ld_imm64",
198 .insns = {
199 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
200 BPF_RAW_INSN(0, 0, 0, 0, 0),
201 BPF_EXIT_INSN(),
202 },
203 .result = ACCEPT,
204 },
205 {
206 "test7 ld_imm64",
207 .insns = {
208 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
209 BPF_RAW_INSN(0, 0, 0, 0, 1),
210 BPF_EXIT_INSN(),
211 },
212 .result = ACCEPT,
213 },
214 {
215 "test8 ld_imm64",
216 .insns = {
217 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
218 BPF_RAW_INSN(0, 0, 0, 0, 1),
219 BPF_EXIT_INSN(),
220 },
221 .errstr = "uses reserved fields",
222 .result = REJECT,
223 },
224 {
225 "test9 ld_imm64",
226 .insns = {
227 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
228 BPF_RAW_INSN(0, 0, 0, 1, 1),
229 BPF_EXIT_INSN(),
230 },
231 .errstr = "invalid bpf_ld_imm64 insn",
232 .result = REJECT,
233 },
234 {
235 "test10 ld_imm64",
236 .insns = {
237 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
238 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
239 BPF_EXIT_INSN(),
240 },
241 .errstr = "invalid bpf_ld_imm64 insn",
242 .result = REJECT,
243 },
244 {
245 "test11 ld_imm64",
246 .insns = {
247 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
248 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
249 BPF_EXIT_INSN(),
250 },
251 .errstr = "invalid bpf_ld_imm64 insn",
252 .result = REJECT,
253 },
254 {
255 "test12 ld_imm64",
256 .insns = {
257 BPF_MOV64_IMM(BPF_REG_1, 0),
258 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
259 BPF_RAW_INSN(0, 0, 0, 0, 1),
260 BPF_EXIT_INSN(),
261 },
262 .errstr = "not pointing to valid bpf_map",
263 .result = REJECT,
264 },
265 {
266 "test13 ld_imm64",
267 .insns = {
268 BPF_MOV64_IMM(BPF_REG_1, 0),
269 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
270 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
271 BPF_EXIT_INSN(),
272 },
273 .errstr = "invalid bpf_ld_imm64 insn",
274 .result = REJECT,
275 },
276 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700277 "no bpf_exit",
278 .insns = {
279 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
280 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -0800281 .errstr = "not an exit",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700282 .result = REJECT,
283 },
284 {
285 "loop (back-edge)",
286 .insns = {
287 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
288 BPF_EXIT_INSN(),
289 },
290 .errstr = "back-edge",
291 .result = REJECT,
292 },
293 {
294 "loop2 (back-edge)",
295 .insns = {
296 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
298 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
299 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
300 BPF_EXIT_INSN(),
301 },
302 .errstr = "back-edge",
303 .result = REJECT,
304 },
305 {
306 "conditional loop",
307 .insns = {
308 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
309 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
310 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
311 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
312 BPF_EXIT_INSN(),
313 },
314 .errstr = "back-edge",
315 .result = REJECT,
316 },
317 {
318 "read uninitialized register",
319 .insns = {
320 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
321 BPF_EXIT_INSN(),
322 },
323 .errstr = "R2 !read_ok",
324 .result = REJECT,
325 },
326 {
327 "read invalid register",
328 .insns = {
329 BPF_MOV64_REG(BPF_REG_0, -1),
330 BPF_EXIT_INSN(),
331 },
332 .errstr = "R15 is invalid",
333 .result = REJECT,
334 },
335 {
336 "program doesn't init R0 before exit",
337 .insns = {
338 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
339 BPF_EXIT_INSN(),
340 },
341 .errstr = "R0 !read_ok",
342 .result = REJECT,
343 },
344 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700345 "program doesn't init R0 before exit in all branches",
346 .insns = {
347 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
348 BPF_MOV64_IMM(BPF_REG_0, 1),
349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
350 BPF_EXIT_INSN(),
351 },
352 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700353 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700354 .result = REJECT,
355 },
356 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700357 "stack out of bounds",
358 .insns = {
359 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
360 BPF_EXIT_INSN(),
361 },
362 .errstr = "invalid stack",
363 .result = REJECT,
364 },
365 {
366 "invalid call insn1",
367 .insns = {
368 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
369 BPF_EXIT_INSN(),
370 },
371 .errstr = "BPF_CALL uses reserved",
372 .result = REJECT,
373 },
374 {
375 "invalid call insn2",
376 .insns = {
377 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
378 BPF_EXIT_INSN(),
379 },
380 .errstr = "BPF_CALL uses reserved",
381 .result = REJECT,
382 },
383 {
384 "invalid function call",
385 .insns = {
386 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
387 BPF_EXIT_INSN(),
388 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100389 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700390 .result = REJECT,
391 },
392 {
393 "uninitialized stack1",
394 .insns = {
395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
397 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
399 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700400 BPF_EXIT_INSN(),
401 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200402 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700403 .errstr = "invalid indirect read from stack",
404 .result = REJECT,
405 },
406 {
407 "uninitialized stack2",
408 .insns = {
409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
410 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
411 BPF_EXIT_INSN(),
412 },
413 .errstr = "invalid read from stack",
414 .result = REJECT,
415 },
416 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200417 "invalid fp arithmetic",
418 /* If this gets ever changed, make sure JITs can deal with it. */
419 .insns = {
420 BPF_MOV64_IMM(BPF_REG_0, 0),
421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
422 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
423 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
424 BPF_EXIT_INSN(),
425 },
Edward Creef65b1842017-08-07 15:27:12 +0100426 .errstr_unpriv = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200427 .result_unpriv = REJECT,
428 .errstr = "R1 invalid mem access",
429 .result = REJECT,
430 },
431 {
432 "non-invalid fp arithmetic",
433 .insns = {
434 BPF_MOV64_IMM(BPF_REG_0, 0),
435 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
436 BPF_EXIT_INSN(),
437 },
438 .result = ACCEPT,
439 },
440 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200441 "invalid argument register",
442 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
444 BPF_FUNC_get_cgroup_classid),
445 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
446 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200447 BPF_EXIT_INSN(),
448 },
449 .errstr = "R1 !read_ok",
450 .result = REJECT,
451 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
452 },
453 {
454 "non-invalid argument register",
455 .insns = {
456 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200457 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
458 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200459 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200460 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
461 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200462 BPF_EXIT_INSN(),
463 },
464 .result = ACCEPT,
465 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
466 },
467 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700468 "check valid spill/fill",
469 .insns = {
470 /* spill R1(ctx) into stack */
471 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700472 /* fill it back into R2 */
473 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700474 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100475 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
476 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700477 BPF_EXIT_INSN(),
478 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700479 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700480 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700481 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700482 },
483 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200484 "check valid spill/fill, skb mark",
485 .insns = {
486 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
487 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
488 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
490 offsetof(struct __sk_buff, mark)),
491 BPF_EXIT_INSN(),
492 },
493 .result = ACCEPT,
494 .result_unpriv = ACCEPT,
495 },
496 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700497 "check corrupted spill/fill",
498 .insns = {
499 /* spill R1(ctx) into stack */
500 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700501 /* mess up with R1 pointer on stack */
502 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700503 /* fill back into R0 should fail */
504 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700505 BPF_EXIT_INSN(),
506 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700507 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700508 .errstr = "corrupted spill",
509 .result = REJECT,
510 },
511 {
512 "invalid src register in STX",
513 .insns = {
514 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
515 BPF_EXIT_INSN(),
516 },
517 .errstr = "R15 is invalid",
518 .result = REJECT,
519 },
520 {
521 "invalid dst register in STX",
522 .insns = {
523 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
524 BPF_EXIT_INSN(),
525 },
526 .errstr = "R14 is invalid",
527 .result = REJECT,
528 },
529 {
530 "invalid dst register in ST",
531 .insns = {
532 BPF_ST_MEM(BPF_B, 14, -1, -1),
533 BPF_EXIT_INSN(),
534 },
535 .errstr = "R14 is invalid",
536 .result = REJECT,
537 },
538 {
539 "invalid src register in LDX",
540 .insns = {
541 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
542 BPF_EXIT_INSN(),
543 },
544 .errstr = "R12 is invalid",
545 .result = REJECT,
546 },
547 {
548 "invalid dst register in LDX",
549 .insns = {
550 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
551 BPF_EXIT_INSN(),
552 },
553 .errstr = "R11 is invalid",
554 .result = REJECT,
555 },
556 {
557 "junk insn",
558 .insns = {
559 BPF_RAW_INSN(0, 0, 0, 0, 0),
560 BPF_EXIT_INSN(),
561 },
562 .errstr = "invalid BPF_LD_IMM",
563 .result = REJECT,
564 },
565 {
566 "junk insn2",
567 .insns = {
568 BPF_RAW_INSN(1, 0, 0, 0, 0),
569 BPF_EXIT_INSN(),
570 },
571 .errstr = "BPF_LDX uses reserved fields",
572 .result = REJECT,
573 },
574 {
575 "junk insn3",
576 .insns = {
577 BPF_RAW_INSN(-1, 0, 0, 0, 0),
578 BPF_EXIT_INSN(),
579 },
580 .errstr = "invalid BPF_ALU opcode f0",
581 .result = REJECT,
582 },
583 {
584 "junk insn4",
585 .insns = {
586 BPF_RAW_INSN(-1, -1, -1, -1, -1),
587 BPF_EXIT_INSN(),
588 },
589 .errstr = "invalid BPF_ALU opcode f0",
590 .result = REJECT,
591 },
592 {
593 "junk insn5",
594 .insns = {
595 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
596 BPF_EXIT_INSN(),
597 },
598 .errstr = "BPF_ALU uses reserved fields",
599 .result = REJECT,
600 },
601 {
602 "misaligned read from stack",
603 .insns = {
604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
605 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
606 BPF_EXIT_INSN(),
607 },
Edward Creef65b1842017-08-07 15:27:12 +0100608 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700609 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100610 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700611 },
612 {
613 "invalid map_fd for function call",
614 .insns = {
615 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
616 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
618 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
620 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700621 BPF_EXIT_INSN(),
622 },
623 .errstr = "fd 0 is not pointing to valid bpf_map",
624 .result = REJECT,
625 },
626 {
627 "don't check return value before access",
628 .insns = {
629 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
630 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
632 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
634 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700635 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
636 BPF_EXIT_INSN(),
637 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200638 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700639 .errstr = "R0 invalid mem access 'map_value_or_null'",
640 .result = REJECT,
641 },
642 {
643 "access memory with incorrect alignment",
644 .insns = {
645 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
648 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
650 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
652 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
653 BPF_EXIT_INSN(),
654 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200655 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +0100656 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700657 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100658 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700659 },
660 {
661 "sometimes access memory with incorrect alignment",
662 .insns = {
663 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
664 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
666 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200667 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
668 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
671 BPF_EXIT_INSN(),
672 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
673 BPF_EXIT_INSN(),
674 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200675 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700676 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700677 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700678 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100679 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700680 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700681 {
682 "jump test 1",
683 .insns = {
684 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
685 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
687 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
689 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
691 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
693 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
695 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
696 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
697 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
698 BPF_MOV64_IMM(BPF_REG_0, 0),
699 BPF_EXIT_INSN(),
700 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700701 .errstr_unpriv = "R1 pointer comparison",
702 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700703 .result = ACCEPT,
704 },
705 {
706 "jump test 2",
707 .insns = {
708 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
709 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
710 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
711 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
712 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
713 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
714 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
716 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
718 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
719 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
720 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
721 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
722 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
723 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
725 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
726 BPF_MOV64_IMM(BPF_REG_0, 0),
727 BPF_EXIT_INSN(),
728 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700729 .errstr_unpriv = "R1 pointer comparison",
730 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700731 .result = ACCEPT,
732 },
733 {
734 "jump test 3",
735 .insns = {
736 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
738 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
739 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
740 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
741 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
742 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
744 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
746 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
748 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
749 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
750 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
752 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
753 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
754 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
756 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
758 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
760 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200761 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
762 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700763 BPF_EXIT_INSN(),
764 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200765 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700766 .errstr_unpriv = "R1 pointer comparison",
767 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700768 .result = ACCEPT,
769 },
770 {
771 "jump test 4",
772 .insns = {
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
813 BPF_MOV64_IMM(BPF_REG_0, 0),
814 BPF_EXIT_INSN(),
815 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700816 .errstr_unpriv = "R1 pointer comparison",
817 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700818 .result = ACCEPT,
819 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700820 {
821 "jump test 5",
822 .insns = {
823 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
824 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
825 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
826 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
827 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
828 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
829 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
830 BPF_MOV64_IMM(BPF_REG_0, 0),
831 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
832 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
833 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
834 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
835 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
836 BPF_MOV64_IMM(BPF_REG_0, 0),
837 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
838 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
839 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
840 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
841 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
842 BPF_MOV64_IMM(BPF_REG_0, 0),
843 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
844 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
845 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
846 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
847 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
848 BPF_MOV64_IMM(BPF_REG_0, 0),
849 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
850 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
851 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
852 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
853 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
854 BPF_MOV64_IMM(BPF_REG_0, 0),
855 BPF_EXIT_INSN(),
856 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700857 .errstr_unpriv = "R1 pointer comparison",
858 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700859 .result = ACCEPT,
860 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700861 {
862 "access skb fields ok",
863 .insns = {
864 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
865 offsetof(struct __sk_buff, len)),
866 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
867 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
868 offsetof(struct __sk_buff, mark)),
869 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
870 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
871 offsetof(struct __sk_buff, pkt_type)),
872 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
873 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
874 offsetof(struct __sk_buff, queue_mapping)),
875 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700876 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
877 offsetof(struct __sk_buff, protocol)),
878 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
879 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
880 offsetof(struct __sk_buff, vlan_present)),
881 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
882 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
883 offsetof(struct __sk_buff, vlan_tci)),
884 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200885 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
886 offsetof(struct __sk_buff, napi_id)),
887 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700888 BPF_EXIT_INSN(),
889 },
890 .result = ACCEPT,
891 },
892 {
893 "access skb fields bad1",
894 .insns = {
895 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
896 BPF_EXIT_INSN(),
897 },
898 .errstr = "invalid bpf_context access",
899 .result = REJECT,
900 },
901 {
902 "access skb fields bad2",
903 .insns = {
904 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
905 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
906 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
908 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200909 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
910 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700911 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
912 BPF_EXIT_INSN(),
913 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
914 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
915 offsetof(struct __sk_buff, pkt_type)),
916 BPF_EXIT_INSN(),
917 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200918 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700919 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700920 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700921 .result = REJECT,
922 },
923 {
924 "access skb fields bad3",
925 .insns = {
926 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
927 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
928 offsetof(struct __sk_buff, pkt_type)),
929 BPF_EXIT_INSN(),
930 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
933 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
935 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700936 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
937 BPF_EXIT_INSN(),
938 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
939 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
940 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200941 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700942 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700943 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700944 .result = REJECT,
945 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700946 {
947 "access skb fields bad4",
948 .insns = {
949 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
950 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
951 offsetof(struct __sk_buff, len)),
952 BPF_MOV64_IMM(BPF_REG_0, 0),
953 BPF_EXIT_INSN(),
954 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
955 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
957 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200958 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
959 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700960 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
961 BPF_EXIT_INSN(),
962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
963 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
964 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200965 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700966 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700967 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700968 .result = REJECT,
969 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700970 {
John Fastabend41bc94f2017-08-15 22:33:56 -0700971 "invalid access __sk_buff family",
972 .insns = {
973 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
974 offsetof(struct __sk_buff, family)),
975 BPF_EXIT_INSN(),
976 },
977 .errstr = "invalid bpf_context access",
978 .result = REJECT,
979 },
980 {
981 "invalid access __sk_buff remote_ip4",
982 .insns = {
983 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
984 offsetof(struct __sk_buff, remote_ip4)),
985 BPF_EXIT_INSN(),
986 },
987 .errstr = "invalid bpf_context access",
988 .result = REJECT,
989 },
990 {
991 "invalid access __sk_buff local_ip4",
992 .insns = {
993 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
994 offsetof(struct __sk_buff, local_ip4)),
995 BPF_EXIT_INSN(),
996 },
997 .errstr = "invalid bpf_context access",
998 .result = REJECT,
999 },
1000 {
1001 "invalid access __sk_buff remote_ip6",
1002 .insns = {
1003 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1004 offsetof(struct __sk_buff, remote_ip6)),
1005 BPF_EXIT_INSN(),
1006 },
1007 .errstr = "invalid bpf_context access",
1008 .result = REJECT,
1009 },
1010 {
1011 "invalid access __sk_buff local_ip6",
1012 .insns = {
1013 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1014 offsetof(struct __sk_buff, local_ip6)),
1015 BPF_EXIT_INSN(),
1016 },
1017 .errstr = "invalid bpf_context access",
1018 .result = REJECT,
1019 },
1020 {
1021 "invalid access __sk_buff remote_port",
1022 .insns = {
1023 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1024 offsetof(struct __sk_buff, remote_port)),
1025 BPF_EXIT_INSN(),
1026 },
1027 .errstr = "invalid bpf_context access",
1028 .result = REJECT,
1029 },
1030 {
1031 "invalid access __sk_buff remote_port",
1032 .insns = {
1033 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1034 offsetof(struct __sk_buff, local_port)),
1035 BPF_EXIT_INSN(),
1036 },
1037 .errstr = "invalid bpf_context access",
1038 .result = REJECT,
1039 },
1040 {
1041 "valid access __sk_buff family",
1042 .insns = {
1043 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1044 offsetof(struct __sk_buff, family)),
1045 BPF_EXIT_INSN(),
1046 },
1047 .result = ACCEPT,
1048 .prog_type = BPF_PROG_TYPE_SK_SKB,
1049 },
1050 {
1051 "valid access __sk_buff remote_ip4",
1052 .insns = {
1053 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, remote_ip4)),
1055 BPF_EXIT_INSN(),
1056 },
1057 .result = ACCEPT,
1058 .prog_type = BPF_PROG_TYPE_SK_SKB,
1059 },
1060 {
1061 "valid access __sk_buff local_ip4",
1062 .insns = {
1063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, local_ip4)),
1065 BPF_EXIT_INSN(),
1066 },
1067 .result = ACCEPT,
1068 .prog_type = BPF_PROG_TYPE_SK_SKB,
1069 },
1070 {
1071 "valid access __sk_buff remote_ip6",
1072 .insns = {
1073 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1074 offsetof(struct __sk_buff, remote_ip6[0])),
1075 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1076 offsetof(struct __sk_buff, remote_ip6[1])),
1077 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1078 offsetof(struct __sk_buff, remote_ip6[2])),
1079 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1080 offsetof(struct __sk_buff, remote_ip6[3])),
1081 BPF_EXIT_INSN(),
1082 },
1083 .result = ACCEPT,
1084 .prog_type = BPF_PROG_TYPE_SK_SKB,
1085 },
1086 {
1087 "valid access __sk_buff local_ip6",
1088 .insns = {
1089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1090 offsetof(struct __sk_buff, local_ip6[0])),
1091 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1092 offsetof(struct __sk_buff, local_ip6[1])),
1093 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1094 offsetof(struct __sk_buff, local_ip6[2])),
1095 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1096 offsetof(struct __sk_buff, local_ip6[3])),
1097 BPF_EXIT_INSN(),
1098 },
1099 .result = ACCEPT,
1100 .prog_type = BPF_PROG_TYPE_SK_SKB,
1101 },
1102 {
1103 "valid access __sk_buff remote_port",
1104 .insns = {
1105 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1106 offsetof(struct __sk_buff, remote_port)),
1107 BPF_EXIT_INSN(),
1108 },
1109 .result = ACCEPT,
1110 .prog_type = BPF_PROG_TYPE_SK_SKB,
1111 },
1112 {
1113 "valid access __sk_buff remote_port",
1114 .insns = {
1115 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1116 offsetof(struct __sk_buff, local_port)),
1117 BPF_EXIT_INSN(),
1118 },
1119 .result = ACCEPT,
1120 .prog_type = BPF_PROG_TYPE_SK_SKB,
1121 },
1122 {
John Fastabended850542017-08-28 07:11:24 -07001123 "invalid access of tc_classid for SK_SKB",
1124 .insns = {
1125 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1126 offsetof(struct __sk_buff, tc_classid)),
1127 BPF_EXIT_INSN(),
1128 },
1129 .result = REJECT,
1130 .prog_type = BPF_PROG_TYPE_SK_SKB,
1131 .errstr = "invalid bpf_context access",
1132 },
1133 {
John Fastabendf7e9cb12017-10-18 07:10:58 -07001134 "invalid access of skb->mark for SK_SKB",
1135 .insns = {
1136 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1137 offsetof(struct __sk_buff, mark)),
1138 BPF_EXIT_INSN(),
1139 },
1140 .result = REJECT,
1141 .prog_type = BPF_PROG_TYPE_SK_SKB,
1142 .errstr = "invalid bpf_context access",
1143 },
1144 {
1145 "check skb->mark is not writeable by SK_SKB",
John Fastabended850542017-08-28 07:11:24 -07001146 .insns = {
1147 BPF_MOV64_IMM(BPF_REG_0, 0),
1148 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1149 offsetof(struct __sk_buff, mark)),
1150 BPF_EXIT_INSN(),
1151 },
John Fastabendf7e9cb12017-10-18 07:10:58 -07001152 .result = REJECT,
John Fastabended850542017-08-28 07:11:24 -07001153 .prog_type = BPF_PROG_TYPE_SK_SKB,
John Fastabendf7e9cb12017-10-18 07:10:58 -07001154 .errstr = "invalid bpf_context access",
John Fastabended850542017-08-28 07:11:24 -07001155 },
1156 {
1157 "check skb->tc_index is writeable by SK_SKB",
1158 .insns = {
1159 BPF_MOV64_IMM(BPF_REG_0, 0),
1160 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1161 offsetof(struct __sk_buff, tc_index)),
1162 BPF_EXIT_INSN(),
1163 },
1164 .result = ACCEPT,
1165 .prog_type = BPF_PROG_TYPE_SK_SKB,
1166 },
1167 {
1168 "check skb->priority is writeable by SK_SKB",
1169 .insns = {
1170 BPF_MOV64_IMM(BPF_REG_0, 0),
1171 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1172 offsetof(struct __sk_buff, priority)),
1173 BPF_EXIT_INSN(),
1174 },
1175 .result = ACCEPT,
1176 .prog_type = BPF_PROG_TYPE_SK_SKB,
1177 },
1178 {
1179 "direct packet read for SK_SKB",
1180 .insns = {
1181 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1182 offsetof(struct __sk_buff, data)),
1183 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1184 offsetof(struct __sk_buff, data_end)),
1185 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1187 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1188 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1189 BPF_MOV64_IMM(BPF_REG_0, 0),
1190 BPF_EXIT_INSN(),
1191 },
1192 .result = ACCEPT,
1193 .prog_type = BPF_PROG_TYPE_SK_SKB,
1194 },
1195 {
1196 "direct packet write for SK_SKB",
1197 .insns = {
1198 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1199 offsetof(struct __sk_buff, data)),
1200 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1201 offsetof(struct __sk_buff, data_end)),
1202 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1204 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1205 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1206 BPF_MOV64_IMM(BPF_REG_0, 0),
1207 BPF_EXIT_INSN(),
1208 },
1209 .result = ACCEPT,
1210 .prog_type = BPF_PROG_TYPE_SK_SKB,
1211 },
1212 {
1213 "overlapping checks for direct packet access SK_SKB",
1214 .insns = {
1215 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1216 offsetof(struct __sk_buff, data)),
1217 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1218 offsetof(struct __sk_buff, data_end)),
1219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1221 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1222 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1224 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1225 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1226 BPF_MOV64_IMM(BPF_REG_0, 0),
1227 BPF_EXIT_INSN(),
1228 },
1229 .result = ACCEPT,
1230 .prog_type = BPF_PROG_TYPE_SK_SKB,
1231 },
1232 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001233 "check skb->mark is not writeable by sockets",
1234 .insns = {
1235 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1236 offsetof(struct __sk_buff, mark)),
1237 BPF_EXIT_INSN(),
1238 },
1239 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001240 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001241 .result = REJECT,
1242 },
1243 {
1244 "check skb->tc_index is not writeable by sockets",
1245 .insns = {
1246 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1247 offsetof(struct __sk_buff, tc_index)),
1248 BPF_EXIT_INSN(),
1249 },
1250 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001251 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001252 .result = REJECT,
1253 },
1254 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001255 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001256 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001257 BPF_MOV64_IMM(BPF_REG_0, 0),
1258 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1259 offsetof(struct __sk_buff, cb[0])),
1260 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1261 offsetof(struct __sk_buff, cb[0]) + 1),
1262 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1263 offsetof(struct __sk_buff, cb[0]) + 2),
1264 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1265 offsetof(struct __sk_buff, cb[0]) + 3),
1266 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1267 offsetof(struct __sk_buff, cb[1])),
1268 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1269 offsetof(struct __sk_buff, cb[1]) + 1),
1270 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1271 offsetof(struct __sk_buff, cb[1]) + 2),
1272 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1273 offsetof(struct __sk_buff, cb[1]) + 3),
1274 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1275 offsetof(struct __sk_buff, cb[2])),
1276 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1277 offsetof(struct __sk_buff, cb[2]) + 1),
1278 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1279 offsetof(struct __sk_buff, cb[2]) + 2),
1280 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1281 offsetof(struct __sk_buff, cb[2]) + 3),
1282 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1283 offsetof(struct __sk_buff, cb[3])),
1284 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1285 offsetof(struct __sk_buff, cb[3]) + 1),
1286 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1287 offsetof(struct __sk_buff, cb[3]) + 2),
1288 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1289 offsetof(struct __sk_buff, cb[3]) + 3),
1290 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1291 offsetof(struct __sk_buff, cb[4])),
1292 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1293 offsetof(struct __sk_buff, cb[4]) + 1),
1294 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1295 offsetof(struct __sk_buff, cb[4]) + 2),
1296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1297 offsetof(struct __sk_buff, cb[4]) + 3),
1298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1299 offsetof(struct __sk_buff, cb[0])),
1300 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1301 offsetof(struct __sk_buff, cb[0]) + 1),
1302 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1303 offsetof(struct __sk_buff, cb[0]) + 2),
1304 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1305 offsetof(struct __sk_buff, cb[0]) + 3),
1306 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1307 offsetof(struct __sk_buff, cb[1])),
1308 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1309 offsetof(struct __sk_buff, cb[1]) + 1),
1310 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1311 offsetof(struct __sk_buff, cb[1]) + 2),
1312 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1313 offsetof(struct __sk_buff, cb[1]) + 3),
1314 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1315 offsetof(struct __sk_buff, cb[2])),
1316 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1317 offsetof(struct __sk_buff, cb[2]) + 1),
1318 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1319 offsetof(struct __sk_buff, cb[2]) + 2),
1320 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1321 offsetof(struct __sk_buff, cb[2]) + 3),
1322 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1323 offsetof(struct __sk_buff, cb[3])),
1324 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1325 offsetof(struct __sk_buff, cb[3]) + 1),
1326 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1327 offsetof(struct __sk_buff, cb[3]) + 2),
1328 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1329 offsetof(struct __sk_buff, cb[3]) + 3),
1330 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1331 offsetof(struct __sk_buff, cb[4])),
1332 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1333 offsetof(struct __sk_buff, cb[4]) + 1),
1334 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1335 offsetof(struct __sk_buff, cb[4]) + 2),
1336 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1337 offsetof(struct __sk_buff, cb[4]) + 3),
1338 BPF_EXIT_INSN(),
1339 },
1340 .result = ACCEPT,
1341 },
1342 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001343 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001344 .insns = {
1345 BPF_MOV64_IMM(BPF_REG_0, 0),
1346 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001347 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001348 BPF_EXIT_INSN(),
1349 },
1350 .errstr = "invalid bpf_context access",
1351 .result = REJECT,
1352 },
1353 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001354 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001355 .insns = {
1356 BPF_MOV64_IMM(BPF_REG_0, 0),
1357 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001358 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001359 BPF_EXIT_INSN(),
1360 },
1361 .errstr = "invalid bpf_context access",
1362 .result = REJECT,
1363 },
1364 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001365 "check skb->hash byte load permitted",
1366 .insns = {
1367 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001368#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001369 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1370 offsetof(struct __sk_buff, hash)),
1371#else
1372 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1373 offsetof(struct __sk_buff, hash) + 3),
1374#endif
1375 BPF_EXIT_INSN(),
1376 },
1377 .result = ACCEPT,
1378 },
1379 {
1380 "check skb->hash byte load not permitted 1",
1381 .insns = {
1382 BPF_MOV64_IMM(BPF_REG_0, 0),
1383 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1384 offsetof(struct __sk_buff, hash) + 1),
1385 BPF_EXIT_INSN(),
1386 },
1387 .errstr = "invalid bpf_context access",
1388 .result = REJECT,
1389 },
1390 {
1391 "check skb->hash byte load not permitted 2",
1392 .insns = {
1393 BPF_MOV64_IMM(BPF_REG_0, 0),
1394 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1395 offsetof(struct __sk_buff, hash) + 2),
1396 BPF_EXIT_INSN(),
1397 },
1398 .errstr = "invalid bpf_context access",
1399 .result = REJECT,
1400 },
1401 {
1402 "check skb->hash byte load not permitted 3",
1403 .insns = {
1404 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001405#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001406 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1407 offsetof(struct __sk_buff, hash) + 3),
1408#else
1409 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1410 offsetof(struct __sk_buff, hash)),
1411#endif
1412 BPF_EXIT_INSN(),
1413 },
1414 .errstr = "invalid bpf_context access",
1415 .result = REJECT,
1416 },
1417 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001418 "check cb access: byte, wrong type",
1419 .insns = {
1420 BPF_MOV64_IMM(BPF_REG_0, 0),
1421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001422 offsetof(struct __sk_buff, cb[0])),
1423 BPF_EXIT_INSN(),
1424 },
1425 .errstr = "invalid bpf_context access",
1426 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001427 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1428 },
1429 {
1430 "check cb access: half",
1431 .insns = {
1432 BPF_MOV64_IMM(BPF_REG_0, 0),
1433 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1434 offsetof(struct __sk_buff, cb[0])),
1435 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1436 offsetof(struct __sk_buff, cb[0]) + 2),
1437 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1438 offsetof(struct __sk_buff, cb[1])),
1439 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1440 offsetof(struct __sk_buff, cb[1]) + 2),
1441 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1442 offsetof(struct __sk_buff, cb[2])),
1443 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1444 offsetof(struct __sk_buff, cb[2]) + 2),
1445 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1446 offsetof(struct __sk_buff, cb[3])),
1447 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1448 offsetof(struct __sk_buff, cb[3]) + 2),
1449 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1450 offsetof(struct __sk_buff, cb[4])),
1451 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1452 offsetof(struct __sk_buff, cb[4]) + 2),
1453 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1454 offsetof(struct __sk_buff, cb[0])),
1455 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1456 offsetof(struct __sk_buff, cb[0]) + 2),
1457 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1458 offsetof(struct __sk_buff, cb[1])),
1459 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, cb[1]) + 2),
1461 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1462 offsetof(struct __sk_buff, cb[2])),
1463 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1464 offsetof(struct __sk_buff, cb[2]) + 2),
1465 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1466 offsetof(struct __sk_buff, cb[3])),
1467 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1468 offsetof(struct __sk_buff, cb[3]) + 2),
1469 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1470 offsetof(struct __sk_buff, cb[4])),
1471 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1472 offsetof(struct __sk_buff, cb[4]) + 2),
1473 BPF_EXIT_INSN(),
1474 },
1475 .result = ACCEPT,
1476 },
1477 {
1478 "check cb access: half, unaligned",
1479 .insns = {
1480 BPF_MOV64_IMM(BPF_REG_0, 0),
1481 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1482 offsetof(struct __sk_buff, cb[0]) + 1),
1483 BPF_EXIT_INSN(),
1484 },
Edward Creef65b1842017-08-07 15:27:12 +01001485 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001486 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001487 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001488 },
1489 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001490 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001491 .insns = {
1492 BPF_MOV64_IMM(BPF_REG_0, 0),
1493 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001494 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001495 BPF_EXIT_INSN(),
1496 },
1497 .errstr = "invalid bpf_context access",
1498 .result = REJECT,
1499 },
1500 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001501 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001502 .insns = {
1503 BPF_MOV64_IMM(BPF_REG_0, 0),
1504 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001505 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001506 BPF_EXIT_INSN(),
1507 },
1508 .errstr = "invalid bpf_context access",
1509 .result = REJECT,
1510 },
1511 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001512 "check skb->hash half load permitted",
1513 .insns = {
1514 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001515#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001516 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1517 offsetof(struct __sk_buff, hash)),
1518#else
1519 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1520 offsetof(struct __sk_buff, hash) + 2),
1521#endif
1522 BPF_EXIT_INSN(),
1523 },
1524 .result = ACCEPT,
1525 },
1526 {
1527 "check skb->hash half load not permitted",
1528 .insns = {
1529 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001530#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001531 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1532 offsetof(struct __sk_buff, hash) + 2),
1533#else
1534 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1535 offsetof(struct __sk_buff, hash)),
1536#endif
1537 BPF_EXIT_INSN(),
1538 },
1539 .errstr = "invalid bpf_context access",
1540 .result = REJECT,
1541 },
1542 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001543 "check cb access: half, wrong type",
1544 .insns = {
1545 BPF_MOV64_IMM(BPF_REG_0, 0),
1546 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1547 offsetof(struct __sk_buff, cb[0])),
1548 BPF_EXIT_INSN(),
1549 },
1550 .errstr = "invalid bpf_context access",
1551 .result = REJECT,
1552 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1553 },
1554 {
1555 "check cb access: word",
1556 .insns = {
1557 BPF_MOV64_IMM(BPF_REG_0, 0),
1558 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1559 offsetof(struct __sk_buff, cb[0])),
1560 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1561 offsetof(struct __sk_buff, cb[1])),
1562 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1563 offsetof(struct __sk_buff, cb[2])),
1564 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1565 offsetof(struct __sk_buff, cb[3])),
1566 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1567 offsetof(struct __sk_buff, cb[4])),
1568 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1569 offsetof(struct __sk_buff, cb[0])),
1570 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1571 offsetof(struct __sk_buff, cb[1])),
1572 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1573 offsetof(struct __sk_buff, cb[2])),
1574 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 offsetof(struct __sk_buff, cb[3])),
1576 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1577 offsetof(struct __sk_buff, cb[4])),
1578 BPF_EXIT_INSN(),
1579 },
1580 .result = ACCEPT,
1581 },
1582 {
1583 "check cb access: word, unaligned 1",
1584 .insns = {
1585 BPF_MOV64_IMM(BPF_REG_0, 0),
1586 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1587 offsetof(struct __sk_buff, cb[0]) + 2),
1588 BPF_EXIT_INSN(),
1589 },
Edward Creef65b1842017-08-07 15:27:12 +01001590 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001591 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001592 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001593 },
1594 {
1595 "check cb access: word, unaligned 2",
1596 .insns = {
1597 BPF_MOV64_IMM(BPF_REG_0, 0),
1598 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1599 offsetof(struct __sk_buff, cb[4]) + 1),
1600 BPF_EXIT_INSN(),
1601 },
Edward Creef65b1842017-08-07 15:27:12 +01001602 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001603 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001604 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001605 },
1606 {
1607 "check cb access: word, unaligned 3",
1608 .insns = {
1609 BPF_MOV64_IMM(BPF_REG_0, 0),
1610 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1611 offsetof(struct __sk_buff, cb[4]) + 2),
1612 BPF_EXIT_INSN(),
1613 },
Edward Creef65b1842017-08-07 15:27:12 +01001614 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001615 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001616 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001617 },
1618 {
1619 "check cb access: word, unaligned 4",
1620 .insns = {
1621 BPF_MOV64_IMM(BPF_REG_0, 0),
1622 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1623 offsetof(struct __sk_buff, cb[4]) + 3),
1624 BPF_EXIT_INSN(),
1625 },
Edward Creef65b1842017-08-07 15:27:12 +01001626 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001627 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001628 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001629 },
1630 {
1631 "check cb access: double",
1632 .insns = {
1633 BPF_MOV64_IMM(BPF_REG_0, 0),
1634 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1635 offsetof(struct __sk_buff, cb[0])),
1636 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1637 offsetof(struct __sk_buff, cb[2])),
1638 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1639 offsetof(struct __sk_buff, cb[0])),
1640 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1641 offsetof(struct __sk_buff, cb[2])),
1642 BPF_EXIT_INSN(),
1643 },
1644 .result = ACCEPT,
1645 },
1646 {
1647 "check cb access: double, unaligned 1",
1648 .insns = {
1649 BPF_MOV64_IMM(BPF_REG_0, 0),
1650 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1651 offsetof(struct __sk_buff, cb[1])),
1652 BPF_EXIT_INSN(),
1653 },
Edward Creef65b1842017-08-07 15:27:12 +01001654 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001655 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001656 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001657 },
1658 {
1659 "check cb access: double, unaligned 2",
1660 .insns = {
1661 BPF_MOV64_IMM(BPF_REG_0, 0),
1662 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1663 offsetof(struct __sk_buff, cb[3])),
1664 BPF_EXIT_INSN(),
1665 },
Edward Creef65b1842017-08-07 15:27:12 +01001666 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001667 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001668 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001669 },
1670 {
1671 "check cb access: double, oob 1",
1672 .insns = {
1673 BPF_MOV64_IMM(BPF_REG_0, 0),
1674 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1675 offsetof(struct __sk_buff, cb[4])),
1676 BPF_EXIT_INSN(),
1677 },
1678 .errstr = "invalid bpf_context access",
1679 .result = REJECT,
1680 },
1681 {
1682 "check cb access: double, oob 2",
1683 .insns = {
1684 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1686 offsetof(struct __sk_buff, cb[4])),
1687 BPF_EXIT_INSN(),
1688 },
1689 .errstr = "invalid bpf_context access",
1690 .result = REJECT,
1691 },
1692 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001693 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001694 .insns = {
1695 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07001696 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1697 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001698 BPF_EXIT_INSN(),
1699 },
1700 .errstr = "invalid bpf_context access",
1701 .result = REJECT,
1702 },
1703 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001704 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001705 .insns = {
1706 BPF_MOV64_IMM(BPF_REG_0, 0),
1707 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07001708 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001709 BPF_EXIT_INSN(),
1710 },
1711 .errstr = "invalid bpf_context access",
1712 .result = REJECT,
1713 },
1714 {
1715 "check cb access: double, wrong type",
1716 .insns = {
1717 BPF_MOV64_IMM(BPF_REG_0, 0),
1718 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1719 offsetof(struct __sk_buff, cb[0])),
1720 BPF_EXIT_INSN(),
1721 },
1722 .errstr = "invalid bpf_context access",
1723 .result = REJECT,
1724 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001725 },
1726 {
1727 "check out of range skb->cb access",
1728 .insns = {
1729 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001730 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001731 BPF_EXIT_INSN(),
1732 },
1733 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001734 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001735 .result = REJECT,
1736 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1737 },
1738 {
1739 "write skb fields from socket prog",
1740 .insns = {
1741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1742 offsetof(struct __sk_buff, cb[4])),
1743 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1745 offsetof(struct __sk_buff, mark)),
1746 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1747 offsetof(struct __sk_buff, tc_index)),
1748 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1749 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1750 offsetof(struct __sk_buff, cb[0])),
1751 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1752 offsetof(struct __sk_buff, cb[2])),
1753 BPF_EXIT_INSN(),
1754 },
1755 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001756 .errstr_unpriv = "R1 leaks addr",
1757 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001758 },
1759 {
1760 "write skb fields from tc_cls_act prog",
1761 .insns = {
1762 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1763 offsetof(struct __sk_buff, cb[0])),
1764 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1765 offsetof(struct __sk_buff, mark)),
1766 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1767 offsetof(struct __sk_buff, tc_index)),
1768 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1769 offsetof(struct __sk_buff, tc_index)),
1770 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1771 offsetof(struct __sk_buff, cb[3])),
1772 BPF_EXIT_INSN(),
1773 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001774 .errstr_unpriv = "",
1775 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001776 .result = ACCEPT,
1777 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1778 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001779 {
1780 "PTR_TO_STACK store/load",
1781 .insns = {
1782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1784 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1785 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1786 BPF_EXIT_INSN(),
1787 },
1788 .result = ACCEPT,
1789 },
1790 {
1791 "PTR_TO_STACK store/load - bad alignment on off",
1792 .insns = {
1793 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1795 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1796 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1797 BPF_EXIT_INSN(),
1798 },
1799 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001800 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1801 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001802 },
1803 {
1804 "PTR_TO_STACK store/load - bad alignment on reg",
1805 .insns = {
1806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1808 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1809 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1810 BPF_EXIT_INSN(),
1811 },
1812 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001813 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1814 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001815 },
1816 {
1817 "PTR_TO_STACK store/load - out of bounds low",
1818 .insns = {
1819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1820 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1821 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1822 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1823 BPF_EXIT_INSN(),
1824 },
1825 .result = REJECT,
1826 .errstr = "invalid stack off=-79992 size=8",
1827 },
1828 {
1829 "PTR_TO_STACK store/load - out of bounds high",
1830 .insns = {
1831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1833 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1834 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1835 BPF_EXIT_INSN(),
1836 },
1837 .result = REJECT,
1838 .errstr = "invalid stack off=0 size=8",
1839 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001840 {
1841 "unpriv: return pointer",
1842 .insns = {
1843 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1844 BPF_EXIT_INSN(),
1845 },
1846 .result = ACCEPT,
1847 .result_unpriv = REJECT,
1848 .errstr_unpriv = "R0 leaks addr",
1849 },
1850 {
1851 "unpriv: add const to pointer",
1852 .insns = {
1853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1854 BPF_MOV64_IMM(BPF_REG_0, 0),
1855 BPF_EXIT_INSN(),
1856 },
1857 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001858 },
1859 {
1860 "unpriv: add pointer to pointer",
1861 .insns = {
1862 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1863 BPF_MOV64_IMM(BPF_REG_0, 0),
1864 BPF_EXIT_INSN(),
1865 },
1866 .result = ACCEPT,
1867 .result_unpriv = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001868 .errstr_unpriv = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001869 },
1870 {
1871 "unpriv: neg pointer",
1872 .insns = {
1873 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1874 BPF_MOV64_IMM(BPF_REG_0, 0),
1875 BPF_EXIT_INSN(),
1876 },
1877 .result = ACCEPT,
1878 .result_unpriv = REJECT,
1879 .errstr_unpriv = "R1 pointer arithmetic",
1880 },
1881 {
1882 "unpriv: cmp pointer with const",
1883 .insns = {
1884 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1885 BPF_MOV64_IMM(BPF_REG_0, 0),
1886 BPF_EXIT_INSN(),
1887 },
1888 .result = ACCEPT,
1889 .result_unpriv = REJECT,
1890 .errstr_unpriv = "R1 pointer comparison",
1891 },
1892 {
1893 "unpriv: cmp pointer with pointer",
1894 .insns = {
1895 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1896 BPF_MOV64_IMM(BPF_REG_0, 0),
1897 BPF_EXIT_INSN(),
1898 },
1899 .result = ACCEPT,
1900 .result_unpriv = REJECT,
1901 .errstr_unpriv = "R10 pointer comparison",
1902 },
1903 {
1904 "unpriv: check that printk is disallowed",
1905 .insns = {
1906 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1909 BPF_MOV64_IMM(BPF_REG_2, 8),
1910 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001911 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1912 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001913 BPF_MOV64_IMM(BPF_REG_0, 0),
1914 BPF_EXIT_INSN(),
1915 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001916 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001917 .result_unpriv = REJECT,
1918 .result = ACCEPT,
1919 },
1920 {
1921 "unpriv: pass pointer to helper function",
1922 .insns = {
1923 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1924 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1926 BPF_LD_MAP_FD(BPF_REG_1, 0),
1927 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1928 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001929 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1930 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001931 BPF_MOV64_IMM(BPF_REG_0, 0),
1932 BPF_EXIT_INSN(),
1933 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001934 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001935 .errstr_unpriv = "R4 leaks addr",
1936 .result_unpriv = REJECT,
1937 .result = ACCEPT,
1938 },
1939 {
1940 "unpriv: indirectly pass pointer on stack to helper function",
1941 .insns = {
1942 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1945 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1947 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001948 BPF_MOV64_IMM(BPF_REG_0, 0),
1949 BPF_EXIT_INSN(),
1950 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001951 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001952 .errstr = "invalid indirect read from stack off -8+0 size 8",
1953 .result = REJECT,
1954 },
1955 {
1956 "unpriv: mangle pointer on stack 1",
1957 .insns = {
1958 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1959 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1960 BPF_MOV64_IMM(BPF_REG_0, 0),
1961 BPF_EXIT_INSN(),
1962 },
1963 .errstr_unpriv = "attempt to corrupt spilled",
1964 .result_unpriv = REJECT,
1965 .result = ACCEPT,
1966 },
1967 {
1968 "unpriv: mangle pointer on stack 2",
1969 .insns = {
1970 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1971 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1972 BPF_MOV64_IMM(BPF_REG_0, 0),
1973 BPF_EXIT_INSN(),
1974 },
1975 .errstr_unpriv = "attempt to corrupt spilled",
1976 .result_unpriv = REJECT,
1977 .result = ACCEPT,
1978 },
1979 {
1980 "unpriv: read pointer from stack in small chunks",
1981 .insns = {
1982 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1983 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1984 BPF_MOV64_IMM(BPF_REG_0, 0),
1985 BPF_EXIT_INSN(),
1986 },
1987 .errstr = "invalid size",
1988 .result = REJECT,
1989 },
1990 {
1991 "unpriv: write pointer into ctx",
1992 .insns = {
1993 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1994 BPF_MOV64_IMM(BPF_REG_0, 0),
1995 BPF_EXIT_INSN(),
1996 },
1997 .errstr_unpriv = "R1 leaks addr",
1998 .result_unpriv = REJECT,
1999 .errstr = "invalid bpf_context access",
2000 .result = REJECT,
2001 },
2002 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002003 "unpriv: spill/fill of ctx",
2004 .insns = {
2005 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2007 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2008 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2009 BPF_MOV64_IMM(BPF_REG_0, 0),
2010 BPF_EXIT_INSN(),
2011 },
2012 .result = ACCEPT,
2013 },
2014 {
2015 "unpriv: spill/fill of ctx 2",
2016 .insns = {
2017 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2019 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2020 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2022 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002023 BPF_EXIT_INSN(),
2024 },
2025 .result = ACCEPT,
2026 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2027 },
2028 {
2029 "unpriv: spill/fill of ctx 3",
2030 .insns = {
2031 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2033 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2034 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2035 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002036 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2037 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002038 BPF_EXIT_INSN(),
2039 },
2040 .result = REJECT,
2041 .errstr = "R1 type=fp expected=ctx",
2042 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2043 },
2044 {
2045 "unpriv: spill/fill of ctx 4",
2046 .insns = {
2047 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2048 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2049 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2050 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002051 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2052 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002053 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002054 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2055 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002056 BPF_EXIT_INSN(),
2057 },
2058 .result = REJECT,
2059 .errstr = "R1 type=inv expected=ctx",
2060 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2061 },
2062 {
2063 "unpriv: spill/fill of different pointers stx",
2064 .insns = {
2065 BPF_MOV64_IMM(BPF_REG_3, 42),
2066 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2068 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2071 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2072 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2073 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2074 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2075 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2076 offsetof(struct __sk_buff, mark)),
2077 BPF_MOV64_IMM(BPF_REG_0, 0),
2078 BPF_EXIT_INSN(),
2079 },
2080 .result = REJECT,
2081 .errstr = "same insn cannot be used with different pointers",
2082 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2083 },
2084 {
2085 "unpriv: spill/fill of different pointers ldx",
2086 .insns = {
2087 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2089 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2090 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2092 -(__s32)offsetof(struct bpf_perf_event_data,
2093 sample_period) - 8),
2094 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2095 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2096 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2097 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2098 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2099 offsetof(struct bpf_perf_event_data,
2100 sample_period)),
2101 BPF_MOV64_IMM(BPF_REG_0, 0),
2102 BPF_EXIT_INSN(),
2103 },
2104 .result = REJECT,
2105 .errstr = "same insn cannot be used with different pointers",
2106 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2107 },
2108 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002109 "unpriv: write pointer into map elem value",
2110 .insns = {
2111 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2112 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2114 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2116 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002117 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2118 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2119 BPF_EXIT_INSN(),
2120 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002121 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002122 .errstr_unpriv = "R0 leaks addr",
2123 .result_unpriv = REJECT,
2124 .result = ACCEPT,
2125 },
2126 {
2127 "unpriv: partial copy of pointer",
2128 .insns = {
2129 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2130 BPF_MOV64_IMM(BPF_REG_0, 0),
2131 BPF_EXIT_INSN(),
2132 },
2133 .errstr_unpriv = "R10 partial copy",
2134 .result_unpriv = REJECT,
2135 .result = ACCEPT,
2136 },
2137 {
2138 "unpriv: pass pointer to tail_call",
2139 .insns = {
2140 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2141 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2143 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002144 BPF_MOV64_IMM(BPF_REG_0, 0),
2145 BPF_EXIT_INSN(),
2146 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002147 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002148 .errstr_unpriv = "R3 leaks addr into helper",
2149 .result_unpriv = REJECT,
2150 .result = ACCEPT,
2151 },
2152 {
2153 "unpriv: cmp map pointer with zero",
2154 .insns = {
2155 BPF_MOV64_IMM(BPF_REG_1, 0),
2156 BPF_LD_MAP_FD(BPF_REG_1, 0),
2157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2158 BPF_MOV64_IMM(BPF_REG_0, 0),
2159 BPF_EXIT_INSN(),
2160 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002161 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002162 .errstr_unpriv = "R1 pointer comparison",
2163 .result_unpriv = REJECT,
2164 .result = ACCEPT,
2165 },
2166 {
2167 "unpriv: write into frame pointer",
2168 .insns = {
2169 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2170 BPF_MOV64_IMM(BPF_REG_0, 0),
2171 BPF_EXIT_INSN(),
2172 },
2173 .errstr = "frame pointer is read only",
2174 .result = REJECT,
2175 },
2176 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002177 "unpriv: spill/fill frame pointer",
2178 .insns = {
2179 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2181 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2182 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2183 BPF_MOV64_IMM(BPF_REG_0, 0),
2184 BPF_EXIT_INSN(),
2185 },
2186 .errstr = "frame pointer is read only",
2187 .result = REJECT,
2188 },
2189 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002190 "unpriv: cmp of frame pointer",
2191 .insns = {
2192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2193 BPF_MOV64_IMM(BPF_REG_0, 0),
2194 BPF_EXIT_INSN(),
2195 },
2196 .errstr_unpriv = "R10 pointer comparison",
2197 .result_unpriv = REJECT,
2198 .result = ACCEPT,
2199 },
2200 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002201 "unpriv: adding of fp",
2202 .insns = {
2203 BPF_MOV64_IMM(BPF_REG_0, 0),
2204 BPF_MOV64_IMM(BPF_REG_1, 0),
2205 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2206 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2207 BPF_EXIT_INSN(),
2208 },
Edward Creef65b1842017-08-07 15:27:12 +01002209 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002210 },
2211 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002212 "unpriv: cmp of stack pointer",
2213 .insns = {
2214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2216 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2217 BPF_MOV64_IMM(BPF_REG_0, 0),
2218 BPF_EXIT_INSN(),
2219 },
2220 .errstr_unpriv = "R2 pointer comparison",
2221 .result_unpriv = REJECT,
2222 .result = ACCEPT,
2223 },
2224 {
Yonghong Song332270f2017-04-29 22:52:42 -07002225 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002226 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002227 BPF_MOV64_IMM(BPF_REG_1, 4),
2228 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2229 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2233 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2234 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2235 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2237 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002238 BPF_MOV64_IMM(BPF_REG_0, 0),
2239 BPF_EXIT_INSN(),
2240 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002241 .result = ACCEPT,
2242 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002243 {
2244 "raw_stack: no skb_load_bytes",
2245 .insns = {
2246 BPF_MOV64_IMM(BPF_REG_2, 4),
2247 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2249 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2250 BPF_MOV64_IMM(BPF_REG_4, 8),
2251 /* Call to skb_load_bytes() omitted. */
2252 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2253 BPF_EXIT_INSN(),
2254 },
2255 .result = REJECT,
2256 .errstr = "invalid read from stack off -8+0 size 8",
2257 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2258 },
2259 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002260 "raw_stack: skb_load_bytes, negative len",
2261 .insns = {
2262 BPF_MOV64_IMM(BPF_REG_2, 4),
2263 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2265 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2266 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002267 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2268 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002269 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2270 BPF_EXIT_INSN(),
2271 },
2272 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002273 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2275 },
2276 {
2277 "raw_stack: skb_load_bytes, negative len 2",
2278 .insns = {
2279 BPF_MOV64_IMM(BPF_REG_2, 4),
2280 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2282 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2283 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002284 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2285 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002286 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2287 BPF_EXIT_INSN(),
2288 },
2289 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002290 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002291 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2292 },
2293 {
2294 "raw_stack: skb_load_bytes, zero len",
2295 .insns = {
2296 BPF_MOV64_IMM(BPF_REG_2, 4),
2297 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2299 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2300 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2302 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002303 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2304 BPF_EXIT_INSN(),
2305 },
2306 .result = REJECT,
2307 .errstr = "invalid stack type R3",
2308 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2309 },
2310 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002311 "raw_stack: skb_load_bytes, no init",
2312 .insns = {
2313 BPF_MOV64_IMM(BPF_REG_2, 4),
2314 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2316 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2317 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2319 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002320 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2321 BPF_EXIT_INSN(),
2322 },
2323 .result = ACCEPT,
2324 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2325 },
2326 {
2327 "raw_stack: skb_load_bytes, init",
2328 .insns = {
2329 BPF_MOV64_IMM(BPF_REG_2, 4),
2330 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2332 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2333 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2334 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2336 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002337 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2338 BPF_EXIT_INSN(),
2339 },
2340 .result = ACCEPT,
2341 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2342 },
2343 {
2344 "raw_stack: skb_load_bytes, spilled regs around bounds",
2345 .insns = {
2346 BPF_MOV64_IMM(BPF_REG_2, 4),
2347 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002349 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2350 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002351 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2352 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2354 BPF_FUNC_skb_load_bytes),
2355 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2356 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002357 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2358 offsetof(struct __sk_buff, mark)),
2359 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2360 offsetof(struct __sk_buff, priority)),
2361 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2362 BPF_EXIT_INSN(),
2363 },
2364 .result = ACCEPT,
2365 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2366 },
2367 {
2368 "raw_stack: skb_load_bytes, spilled regs corruption",
2369 .insns = {
2370 BPF_MOV64_IMM(BPF_REG_2, 4),
2371 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002373 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002374 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2375 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2377 BPF_FUNC_skb_load_bytes),
2378 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002379 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2380 offsetof(struct __sk_buff, mark)),
2381 BPF_EXIT_INSN(),
2382 },
2383 .result = REJECT,
2384 .errstr = "R0 invalid mem access 'inv'",
2385 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2386 },
2387 {
2388 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2389 .insns = {
2390 BPF_MOV64_IMM(BPF_REG_2, 4),
2391 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2392 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002393 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2394 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2395 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002396 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2397 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002398 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2399 BPF_FUNC_skb_load_bytes),
2400 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2401 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2402 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002403 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2404 offsetof(struct __sk_buff, mark)),
2405 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2406 offsetof(struct __sk_buff, priority)),
2407 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2408 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2409 offsetof(struct __sk_buff, pkt_type)),
2410 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2411 BPF_EXIT_INSN(),
2412 },
2413 .result = REJECT,
2414 .errstr = "R3 invalid mem access 'inv'",
2415 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2416 },
2417 {
2418 "raw_stack: skb_load_bytes, spilled regs + data",
2419 .insns = {
2420 BPF_MOV64_IMM(BPF_REG_2, 4),
2421 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2422 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002423 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2424 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2425 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002426 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2427 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002428 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2429 BPF_FUNC_skb_load_bytes),
2430 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2431 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2432 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002433 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2434 offsetof(struct __sk_buff, mark)),
2435 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2436 offsetof(struct __sk_buff, priority)),
2437 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2438 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2439 BPF_EXIT_INSN(),
2440 },
2441 .result = ACCEPT,
2442 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2443 },
2444 {
2445 "raw_stack: skb_load_bytes, invalid access 1",
2446 .insns = {
2447 BPF_MOV64_IMM(BPF_REG_2, 4),
2448 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2449 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2450 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2451 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002452 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2453 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002454 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2455 BPF_EXIT_INSN(),
2456 },
2457 .result = REJECT,
2458 .errstr = "invalid stack type R3 off=-513 access_size=8",
2459 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2460 },
2461 {
2462 "raw_stack: skb_load_bytes, invalid access 2",
2463 .insns = {
2464 BPF_MOV64_IMM(BPF_REG_2, 4),
2465 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2467 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2468 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2470 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002471 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2472 BPF_EXIT_INSN(),
2473 },
2474 .result = REJECT,
2475 .errstr = "invalid stack type R3 off=-1 access_size=8",
2476 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2477 },
2478 {
2479 "raw_stack: skb_load_bytes, invalid access 3",
2480 .insns = {
2481 BPF_MOV64_IMM(BPF_REG_2, 4),
2482 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2484 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2485 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2487 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002488 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2489 BPF_EXIT_INSN(),
2490 },
2491 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002492 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002493 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2494 },
2495 {
2496 "raw_stack: skb_load_bytes, invalid access 4",
2497 .insns = {
2498 BPF_MOV64_IMM(BPF_REG_2, 4),
2499 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2501 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2502 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2504 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002505 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2506 BPF_EXIT_INSN(),
2507 },
2508 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002509 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002510 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2511 },
2512 {
2513 "raw_stack: skb_load_bytes, invalid access 5",
2514 .insns = {
2515 BPF_MOV64_IMM(BPF_REG_2, 4),
2516 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2518 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2519 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2521 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2523 BPF_EXIT_INSN(),
2524 },
2525 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002526 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002527 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2528 },
2529 {
2530 "raw_stack: skb_load_bytes, invalid access 6",
2531 .insns = {
2532 BPF_MOV64_IMM(BPF_REG_2, 4),
2533 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2534 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2535 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2536 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2538 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002539 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2540 BPF_EXIT_INSN(),
2541 },
2542 .result = REJECT,
2543 .errstr = "invalid stack type R3 off=-512 access_size=0",
2544 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2545 },
2546 {
2547 "raw_stack: skb_load_bytes, large access",
2548 .insns = {
2549 BPF_MOV64_IMM(BPF_REG_2, 4),
2550 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2552 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2553 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002554 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2555 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002556 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2557 BPF_EXIT_INSN(),
2558 },
2559 .result = ACCEPT,
2560 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2561 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002562 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002563 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002564 .insns = {
2565 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2566 offsetof(struct __sk_buff, data)),
2567 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2568 offsetof(struct __sk_buff, data_end)),
2569 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2570 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2571 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2572 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2573 BPF_MOV64_IMM(BPF_REG_0, 0),
2574 BPF_EXIT_INSN(),
2575 },
2576 .result = ACCEPT,
2577 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2578 },
2579 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002580 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002581 .insns = {
2582 BPF_MOV64_IMM(BPF_REG_0, 1),
2583 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2584 offsetof(struct __sk_buff, data_end)),
2585 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2586 offsetof(struct __sk_buff, data)),
2587 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2589 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2590 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2591 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2592 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2593 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2594 offsetof(struct __sk_buff, data)),
2595 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
Edward Cree1f9ab382017-08-07 15:29:11 +01002597 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2598 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002599 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2600 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2601 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2602 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2603 offsetof(struct __sk_buff, data_end)),
2604 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2605 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2606 BPF_MOV64_IMM(BPF_REG_0, 0),
2607 BPF_EXIT_INSN(),
2608 },
2609 .result = ACCEPT,
2610 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2611 },
2612 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002613 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002614 .insns = {
2615 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2616 offsetof(struct __sk_buff, data)),
2617 BPF_MOV64_IMM(BPF_REG_0, 0),
2618 BPF_EXIT_INSN(),
2619 },
2620 .errstr = "invalid bpf_context access off=76",
2621 .result = REJECT,
2622 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2623 },
2624 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002625 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002626 .insns = {
2627 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2628 offsetof(struct __sk_buff, data)),
2629 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2630 offsetof(struct __sk_buff, data_end)),
2631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2633 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2634 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2635 BPF_MOV64_IMM(BPF_REG_0, 0),
2636 BPF_EXIT_INSN(),
2637 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002638 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2640 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002641 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002642 "direct packet access: test5 (pkt_end >= reg, good access)",
2643 .insns = {
2644 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2645 offsetof(struct __sk_buff, data)),
2646 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2647 offsetof(struct __sk_buff, data_end)),
2648 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2650 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2651 BPF_MOV64_IMM(BPF_REG_0, 1),
2652 BPF_EXIT_INSN(),
2653 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2654 BPF_MOV64_IMM(BPF_REG_0, 0),
2655 BPF_EXIT_INSN(),
2656 },
2657 .result = ACCEPT,
2658 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2659 },
2660 {
2661 "direct packet access: test6 (pkt_end >= reg, bad access)",
2662 .insns = {
2663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2664 offsetof(struct __sk_buff, data)),
2665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2666 offsetof(struct __sk_buff, data_end)),
2667 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2669 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2670 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2671 BPF_MOV64_IMM(BPF_REG_0, 1),
2672 BPF_EXIT_INSN(),
2673 BPF_MOV64_IMM(BPF_REG_0, 0),
2674 BPF_EXIT_INSN(),
2675 },
2676 .errstr = "invalid access to packet",
2677 .result = REJECT,
2678 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2679 },
2680 {
2681 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2682 .insns = {
2683 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2684 offsetof(struct __sk_buff, data)),
2685 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2686 offsetof(struct __sk_buff, data_end)),
2687 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2689 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2690 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2691 BPF_MOV64_IMM(BPF_REG_0, 1),
2692 BPF_EXIT_INSN(),
2693 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2694 BPF_MOV64_IMM(BPF_REG_0, 0),
2695 BPF_EXIT_INSN(),
2696 },
2697 .errstr = "invalid access to packet",
2698 .result = REJECT,
2699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2700 },
2701 {
2702 "direct packet access: test8 (double test, variant 1)",
2703 .insns = {
2704 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2705 offsetof(struct __sk_buff, data)),
2706 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2707 offsetof(struct __sk_buff, data_end)),
2708 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2710 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2711 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2712 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2713 BPF_MOV64_IMM(BPF_REG_0, 1),
2714 BPF_EXIT_INSN(),
2715 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2716 BPF_MOV64_IMM(BPF_REG_0, 0),
2717 BPF_EXIT_INSN(),
2718 },
2719 .result = ACCEPT,
2720 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2721 },
2722 {
2723 "direct packet access: test9 (double test, variant 2)",
2724 .insns = {
2725 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2726 offsetof(struct __sk_buff, data)),
2727 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2728 offsetof(struct __sk_buff, data_end)),
2729 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2731 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2732 BPF_MOV64_IMM(BPF_REG_0, 1),
2733 BPF_EXIT_INSN(),
2734 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2735 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2736 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2737 BPF_MOV64_IMM(BPF_REG_0, 0),
2738 BPF_EXIT_INSN(),
2739 },
2740 .result = ACCEPT,
2741 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2742 },
2743 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002744 "direct packet access: test10 (write invalid)",
2745 .insns = {
2746 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2747 offsetof(struct __sk_buff, data)),
2748 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2749 offsetof(struct __sk_buff, data_end)),
2750 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2752 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2753 BPF_MOV64_IMM(BPF_REG_0, 0),
2754 BPF_EXIT_INSN(),
2755 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2756 BPF_MOV64_IMM(BPF_REG_0, 0),
2757 BPF_EXIT_INSN(),
2758 },
2759 .errstr = "invalid access to packet",
2760 .result = REJECT,
2761 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2762 },
2763 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002764 "direct packet access: test11 (shift, good access)",
2765 .insns = {
2766 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2767 offsetof(struct __sk_buff, data)),
2768 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2769 offsetof(struct __sk_buff, data_end)),
2770 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2772 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2773 BPF_MOV64_IMM(BPF_REG_3, 144),
2774 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2775 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2776 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2777 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2778 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2779 BPF_MOV64_IMM(BPF_REG_0, 1),
2780 BPF_EXIT_INSN(),
2781 BPF_MOV64_IMM(BPF_REG_0, 0),
2782 BPF_EXIT_INSN(),
2783 },
2784 .result = ACCEPT,
2785 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2786 },
2787 {
2788 "direct packet access: test12 (and, good access)",
2789 .insns = {
2790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2791 offsetof(struct __sk_buff, data)),
2792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2793 offsetof(struct __sk_buff, data_end)),
2794 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2796 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2797 BPF_MOV64_IMM(BPF_REG_3, 144),
2798 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2799 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2800 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2801 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2802 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2803 BPF_MOV64_IMM(BPF_REG_0, 1),
2804 BPF_EXIT_INSN(),
2805 BPF_MOV64_IMM(BPF_REG_0, 0),
2806 BPF_EXIT_INSN(),
2807 },
2808 .result = ACCEPT,
2809 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2810 },
2811 {
2812 "direct packet access: test13 (branches, good access)",
2813 .insns = {
2814 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2815 offsetof(struct __sk_buff, data)),
2816 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2817 offsetof(struct __sk_buff, data_end)),
2818 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2820 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2821 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2822 offsetof(struct __sk_buff, mark)),
2823 BPF_MOV64_IMM(BPF_REG_4, 1),
2824 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2825 BPF_MOV64_IMM(BPF_REG_3, 14),
2826 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2827 BPF_MOV64_IMM(BPF_REG_3, 24),
2828 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2829 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2830 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2831 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2832 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2833 BPF_MOV64_IMM(BPF_REG_0, 1),
2834 BPF_EXIT_INSN(),
2835 BPF_MOV64_IMM(BPF_REG_0, 0),
2836 BPF_EXIT_INSN(),
2837 },
2838 .result = ACCEPT,
2839 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2840 },
2841 {
William Tu63dfef72017-02-04 08:37:29 -08002842 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2843 .insns = {
2844 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2845 offsetof(struct __sk_buff, data)),
2846 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2847 offsetof(struct __sk_buff, data_end)),
2848 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2850 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2851 BPF_MOV64_IMM(BPF_REG_5, 12),
2852 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2853 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2854 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2855 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2856 BPF_MOV64_IMM(BPF_REG_0, 1),
2857 BPF_EXIT_INSN(),
2858 BPF_MOV64_IMM(BPF_REG_0, 0),
2859 BPF_EXIT_INSN(),
2860 },
2861 .result = ACCEPT,
2862 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2863 },
2864 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002865 "direct packet access: test15 (spill with xadd)",
2866 .insns = {
2867 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2868 offsetof(struct __sk_buff, data)),
2869 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2870 offsetof(struct __sk_buff, data_end)),
2871 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2873 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2874 BPF_MOV64_IMM(BPF_REG_5, 4096),
2875 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2876 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2877 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2878 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2879 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2880 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2881 BPF_MOV64_IMM(BPF_REG_0, 0),
2882 BPF_EXIT_INSN(),
2883 },
2884 .errstr = "R2 invalid mem access 'inv'",
2885 .result = REJECT,
2886 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2887 },
2888 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002889 "direct packet access: test16 (arith on data_end)",
2890 .insns = {
2891 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2892 offsetof(struct __sk_buff, data)),
2893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2894 offsetof(struct __sk_buff, data_end)),
2895 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2898 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2899 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2900 BPF_MOV64_IMM(BPF_REG_0, 0),
2901 BPF_EXIT_INSN(),
2902 },
2903 .errstr = "invalid access to packet",
2904 .result = REJECT,
2905 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2906 },
2907 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002908 "direct packet access: test17 (pruning, alignment)",
2909 .insns = {
2910 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2911 offsetof(struct __sk_buff, data)),
2912 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2913 offsetof(struct __sk_buff, data_end)),
2914 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2915 offsetof(struct __sk_buff, mark)),
2916 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2918 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2919 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2920 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2921 BPF_MOV64_IMM(BPF_REG_0, 0),
2922 BPF_EXIT_INSN(),
2923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2924 BPF_JMP_A(-6),
2925 },
Edward Creef65b1842017-08-07 15:27:12 +01002926 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002927 .result = REJECT,
2928 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2929 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2930 },
2931 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002932 "direct packet access: test18 (imm += pkt_ptr, 1)",
2933 .insns = {
2934 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2935 offsetof(struct __sk_buff, data)),
2936 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2937 offsetof(struct __sk_buff, data_end)),
2938 BPF_MOV64_IMM(BPF_REG_0, 8),
2939 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2940 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2941 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2942 BPF_MOV64_IMM(BPF_REG_0, 0),
2943 BPF_EXIT_INSN(),
2944 },
2945 .result = ACCEPT,
2946 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2947 },
2948 {
2949 "direct packet access: test19 (imm += pkt_ptr, 2)",
2950 .insns = {
2951 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2952 offsetof(struct __sk_buff, data)),
2953 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2954 offsetof(struct __sk_buff, data_end)),
2955 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2957 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2958 BPF_MOV64_IMM(BPF_REG_4, 4),
2959 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2960 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2961 BPF_MOV64_IMM(BPF_REG_0, 0),
2962 BPF_EXIT_INSN(),
2963 },
2964 .result = ACCEPT,
2965 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2966 },
2967 {
2968 "direct packet access: test20 (x += pkt_ptr, 1)",
2969 .insns = {
2970 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2971 offsetof(struct __sk_buff, data)),
2972 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2973 offsetof(struct __sk_buff, data_end)),
2974 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2975 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2976 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01002977 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002978 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2979 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2980 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01002981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002982 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2983 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2984 BPF_MOV64_IMM(BPF_REG_0, 0),
2985 BPF_EXIT_INSN(),
2986 },
2987 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2988 .result = ACCEPT,
2989 },
2990 {
2991 "direct packet access: test21 (x += pkt_ptr, 2)",
2992 .insns = {
2993 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2994 offsetof(struct __sk_buff, data)),
2995 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2996 offsetof(struct __sk_buff, data_end)),
2997 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2998 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2999 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3000 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3001 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3002 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003003 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003004 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3005 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01003006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003007 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3008 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3009 BPF_MOV64_IMM(BPF_REG_0, 0),
3010 BPF_EXIT_INSN(),
3011 },
3012 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3013 .result = ACCEPT,
3014 },
3015 {
3016 "direct packet access: test22 (x += pkt_ptr, 3)",
3017 .insns = {
3018 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3019 offsetof(struct __sk_buff, data)),
3020 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3021 offsetof(struct __sk_buff, data_end)),
3022 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3024 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3025 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3026 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3027 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3028 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3029 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3030 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3031 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01003032 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003033 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3034 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3036 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3037 BPF_MOV64_IMM(BPF_REG_2, 1),
3038 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3039 BPF_MOV64_IMM(BPF_REG_0, 0),
3040 BPF_EXIT_INSN(),
3041 },
3042 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3043 .result = ACCEPT,
3044 },
3045 {
3046 "direct packet access: test23 (x += pkt_ptr, 4)",
3047 .insns = {
3048 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3049 offsetof(struct __sk_buff, data)),
3050 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3051 offsetof(struct __sk_buff, data_end)),
3052 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3053 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3055 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3056 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3057 BPF_MOV64_IMM(BPF_REG_0, 31),
3058 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3059 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3060 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3062 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3063 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3064 BPF_MOV64_IMM(BPF_REG_0, 0),
3065 BPF_EXIT_INSN(),
3066 },
3067 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3068 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003069 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003070 },
3071 {
3072 "direct packet access: test24 (x += pkt_ptr, 5)",
3073 .insns = {
3074 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3075 offsetof(struct __sk_buff, data)),
3076 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3077 offsetof(struct __sk_buff, data_end)),
3078 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3079 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3080 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3081 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3082 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3083 BPF_MOV64_IMM(BPF_REG_0, 64),
3084 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3085 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3086 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01003087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02003088 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3089 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3090 BPF_MOV64_IMM(BPF_REG_0, 0),
3091 BPF_EXIT_INSN(),
3092 },
3093 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3094 .result = ACCEPT,
3095 },
3096 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02003097 "direct packet access: test25 (marking on <, good access)",
3098 .insns = {
3099 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3100 offsetof(struct __sk_buff, data)),
3101 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3102 offsetof(struct __sk_buff, data_end)),
3103 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3105 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3106 BPF_MOV64_IMM(BPF_REG_0, 0),
3107 BPF_EXIT_INSN(),
3108 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3109 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3110 },
3111 .result = ACCEPT,
3112 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3113 },
3114 {
3115 "direct packet access: test26 (marking on <, bad access)",
3116 .insns = {
3117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3118 offsetof(struct __sk_buff, data)),
3119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3120 offsetof(struct __sk_buff, data_end)),
3121 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3123 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3124 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3125 BPF_MOV64_IMM(BPF_REG_0, 0),
3126 BPF_EXIT_INSN(),
3127 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3128 },
3129 .result = REJECT,
3130 .errstr = "invalid access to packet",
3131 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3132 },
3133 {
3134 "direct packet access: test27 (marking on <=, good access)",
3135 .insns = {
3136 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3137 offsetof(struct __sk_buff, data)),
3138 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3139 offsetof(struct __sk_buff, data_end)),
3140 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3142 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3143 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3144 BPF_MOV64_IMM(BPF_REG_0, 1),
3145 BPF_EXIT_INSN(),
3146 },
3147 .result = ACCEPT,
3148 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3149 },
3150 {
3151 "direct packet access: test28 (marking on <=, bad access)",
3152 .insns = {
3153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3154 offsetof(struct __sk_buff, data)),
3155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3156 offsetof(struct __sk_buff, data_end)),
3157 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3159 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3160 BPF_MOV64_IMM(BPF_REG_0, 1),
3161 BPF_EXIT_INSN(),
3162 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3163 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3164 },
3165 .result = REJECT,
3166 .errstr = "invalid access to packet",
3167 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3168 },
3169 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003170 "helper access to packet: test1, valid packet_ptr range",
3171 .insns = {
3172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3173 offsetof(struct xdp_md, data)),
3174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3175 offsetof(struct xdp_md, data_end)),
3176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3178 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3179 BPF_LD_MAP_FD(BPF_REG_1, 0),
3180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3181 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003184 BPF_MOV64_IMM(BPF_REG_0, 0),
3185 BPF_EXIT_INSN(),
3186 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003187 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003188 .result_unpriv = ACCEPT,
3189 .result = ACCEPT,
3190 .prog_type = BPF_PROG_TYPE_XDP,
3191 },
3192 {
3193 "helper access to packet: test2, unchecked packet_ptr",
3194 .insns = {
3195 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3196 offsetof(struct xdp_md, data)),
3197 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3199 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003200 BPF_MOV64_IMM(BPF_REG_0, 0),
3201 BPF_EXIT_INSN(),
3202 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003203 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003204 .result = REJECT,
3205 .errstr = "invalid access to packet",
3206 .prog_type = BPF_PROG_TYPE_XDP,
3207 },
3208 {
3209 "helper access to packet: test3, variable add",
3210 .insns = {
3211 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3212 offsetof(struct xdp_md, data)),
3213 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3214 offsetof(struct xdp_md, data_end)),
3215 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3217 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3218 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3219 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3220 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3221 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3223 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3224 BPF_LD_MAP_FD(BPF_REG_1, 0),
3225 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003226 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3227 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003228 BPF_MOV64_IMM(BPF_REG_0, 0),
3229 BPF_EXIT_INSN(),
3230 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003231 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003232 .result = ACCEPT,
3233 .prog_type = BPF_PROG_TYPE_XDP,
3234 },
3235 {
3236 "helper access to packet: test4, packet_ptr with bad range",
3237 .insns = {
3238 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3239 offsetof(struct xdp_md, data)),
3240 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3241 offsetof(struct xdp_md, data_end)),
3242 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3244 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3245 BPF_MOV64_IMM(BPF_REG_0, 0),
3246 BPF_EXIT_INSN(),
3247 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003248 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3249 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003250 BPF_MOV64_IMM(BPF_REG_0, 0),
3251 BPF_EXIT_INSN(),
3252 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003253 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003254 .result = REJECT,
3255 .errstr = "invalid access to packet",
3256 .prog_type = BPF_PROG_TYPE_XDP,
3257 },
3258 {
3259 "helper access to packet: test5, packet_ptr with too short range",
3260 .insns = {
3261 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3262 offsetof(struct xdp_md, data)),
3263 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3264 offsetof(struct xdp_md, data_end)),
3265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3266 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3268 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3269 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3271 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003272 BPF_MOV64_IMM(BPF_REG_0, 0),
3273 BPF_EXIT_INSN(),
3274 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003275 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003276 .result = REJECT,
3277 .errstr = "invalid access to packet",
3278 .prog_type = BPF_PROG_TYPE_XDP,
3279 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003280 {
3281 "helper access to packet: test6, cls valid packet_ptr range",
3282 .insns = {
3283 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3284 offsetof(struct __sk_buff, data)),
3285 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3286 offsetof(struct __sk_buff, data_end)),
3287 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3289 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3290 BPF_LD_MAP_FD(BPF_REG_1, 0),
3291 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3292 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3294 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003295 BPF_MOV64_IMM(BPF_REG_0, 0),
3296 BPF_EXIT_INSN(),
3297 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003298 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003299 .result = ACCEPT,
3300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3301 },
3302 {
3303 "helper access to packet: test7, cls unchecked packet_ptr",
3304 .insns = {
3305 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3306 offsetof(struct __sk_buff, data)),
3307 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003308 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3309 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003310 BPF_MOV64_IMM(BPF_REG_0, 0),
3311 BPF_EXIT_INSN(),
3312 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003313 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003314 .result = REJECT,
3315 .errstr = "invalid access to packet",
3316 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3317 },
3318 {
3319 "helper access to packet: test8, cls variable add",
3320 .insns = {
3321 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3322 offsetof(struct __sk_buff, data)),
3323 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3324 offsetof(struct __sk_buff, data_end)),
3325 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3327 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3328 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3329 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3330 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3331 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3334 BPF_LD_MAP_FD(BPF_REG_1, 0),
3335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3337 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003338 BPF_MOV64_IMM(BPF_REG_0, 0),
3339 BPF_EXIT_INSN(),
3340 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003341 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003342 .result = ACCEPT,
3343 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3344 },
3345 {
3346 "helper access to packet: test9, cls packet_ptr with bad range",
3347 .insns = {
3348 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3349 offsetof(struct __sk_buff, data)),
3350 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3351 offsetof(struct __sk_buff, data_end)),
3352 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3354 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3355 BPF_MOV64_IMM(BPF_REG_0, 0),
3356 BPF_EXIT_INSN(),
3357 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3359 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003360 BPF_MOV64_IMM(BPF_REG_0, 0),
3361 BPF_EXIT_INSN(),
3362 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003363 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003364 .result = REJECT,
3365 .errstr = "invalid access to packet",
3366 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3367 },
3368 {
3369 "helper access to packet: test10, cls packet_ptr with too short range",
3370 .insns = {
3371 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3372 offsetof(struct __sk_buff, data)),
3373 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3374 offsetof(struct __sk_buff, data_end)),
3375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3376 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3378 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3379 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003380 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3381 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003382 BPF_MOV64_IMM(BPF_REG_0, 0),
3383 BPF_EXIT_INSN(),
3384 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003385 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003386 .result = REJECT,
3387 .errstr = "invalid access to packet",
3388 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3389 },
3390 {
3391 "helper access to packet: test11, cls unsuitable helper 1",
3392 .insns = {
3393 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3394 offsetof(struct __sk_buff, data)),
3395 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3396 offsetof(struct __sk_buff, data_end)),
3397 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3398 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3400 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3401 BPF_MOV64_IMM(BPF_REG_2, 0),
3402 BPF_MOV64_IMM(BPF_REG_4, 42),
3403 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3405 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003406 BPF_MOV64_IMM(BPF_REG_0, 0),
3407 BPF_EXIT_INSN(),
3408 },
3409 .result = REJECT,
3410 .errstr = "helper access to the packet",
3411 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3412 },
3413 {
3414 "helper access to packet: test12, cls unsuitable helper 2",
3415 .insns = {
3416 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3417 offsetof(struct __sk_buff, data)),
3418 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3419 offsetof(struct __sk_buff, data_end)),
3420 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3422 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3423 BPF_MOV64_IMM(BPF_REG_2, 0),
3424 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003425 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3426 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003427 BPF_MOV64_IMM(BPF_REG_0, 0),
3428 BPF_EXIT_INSN(),
3429 },
3430 .result = REJECT,
3431 .errstr = "helper access to the packet",
3432 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3433 },
3434 {
3435 "helper access to packet: test13, cls helper ok",
3436 .insns = {
3437 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3438 offsetof(struct __sk_buff, data)),
3439 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3440 offsetof(struct __sk_buff, data_end)),
3441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3442 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3444 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3445 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3446 BPF_MOV64_IMM(BPF_REG_2, 4),
3447 BPF_MOV64_IMM(BPF_REG_3, 0),
3448 BPF_MOV64_IMM(BPF_REG_4, 0),
3449 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003450 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3451 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003452 BPF_MOV64_IMM(BPF_REG_0, 0),
3453 BPF_EXIT_INSN(),
3454 },
3455 .result = ACCEPT,
3456 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3457 },
3458 {
Edward Creef65b1842017-08-07 15:27:12 +01003459 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003460 .insns = {
3461 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3462 offsetof(struct __sk_buff, data)),
3463 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3464 offsetof(struct __sk_buff, data_end)),
3465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3468 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3469 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3470 BPF_MOV64_IMM(BPF_REG_2, 4),
3471 BPF_MOV64_IMM(BPF_REG_3, 0),
3472 BPF_MOV64_IMM(BPF_REG_4, 0),
3473 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003474 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3475 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003476 BPF_MOV64_IMM(BPF_REG_0, 0),
3477 BPF_EXIT_INSN(),
3478 },
Edward Creef65b1842017-08-07 15:27:12 +01003479 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003480 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3481 },
3482 {
Edward Creef65b1842017-08-07 15:27:12 +01003483 "helper access to packet: test15, cls helper fail sub",
3484 .insns = {
3485 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3486 offsetof(struct __sk_buff, data)),
3487 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3488 offsetof(struct __sk_buff, data_end)),
3489 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3492 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3493 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3494 BPF_MOV64_IMM(BPF_REG_2, 4),
3495 BPF_MOV64_IMM(BPF_REG_3, 0),
3496 BPF_MOV64_IMM(BPF_REG_4, 0),
3497 BPF_MOV64_IMM(BPF_REG_5, 0),
3498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3499 BPF_FUNC_csum_diff),
3500 BPF_MOV64_IMM(BPF_REG_0, 0),
3501 BPF_EXIT_INSN(),
3502 },
3503 .result = REJECT,
3504 .errstr = "invalid access to packet",
3505 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3506 },
3507 {
3508 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003509 .insns = {
3510 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3511 offsetof(struct __sk_buff, data)),
3512 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3513 offsetof(struct __sk_buff, data_end)),
3514 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3515 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3517 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3518 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3519 BPF_MOV64_IMM(BPF_REG_2, 8),
3520 BPF_MOV64_IMM(BPF_REG_3, 0),
3521 BPF_MOV64_IMM(BPF_REG_4, 0),
3522 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3524 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003525 BPF_MOV64_IMM(BPF_REG_0, 0),
3526 BPF_EXIT_INSN(),
3527 },
3528 .result = REJECT,
3529 .errstr = "invalid access to packet",
3530 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3531 },
3532 {
Edward Creef65b1842017-08-07 15:27:12 +01003533 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003534 .insns = {
3535 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3536 offsetof(struct __sk_buff, data)),
3537 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3538 offsetof(struct __sk_buff, data_end)),
3539 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3542 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3543 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3544 BPF_MOV64_IMM(BPF_REG_2, -9),
3545 BPF_MOV64_IMM(BPF_REG_3, 0),
3546 BPF_MOV64_IMM(BPF_REG_4, 0),
3547 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003548 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3549 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003550 BPF_MOV64_IMM(BPF_REG_0, 0),
3551 BPF_EXIT_INSN(),
3552 },
3553 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003554 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003555 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3556 },
3557 {
Edward Creef65b1842017-08-07 15:27:12 +01003558 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003559 .insns = {
3560 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3561 offsetof(struct __sk_buff, data)),
3562 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3563 offsetof(struct __sk_buff, data_end)),
3564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3565 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3567 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3569 BPF_MOV64_IMM(BPF_REG_2, ~0),
3570 BPF_MOV64_IMM(BPF_REG_3, 0),
3571 BPF_MOV64_IMM(BPF_REG_4, 0),
3572 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003573 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3574 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003575 BPF_MOV64_IMM(BPF_REG_0, 0),
3576 BPF_EXIT_INSN(),
3577 },
3578 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003579 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003580 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3581 },
3582 {
Yonghong Songb6ff6392017-11-12 14:49:11 -08003583 "helper access to packet: test19, cls helper range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003584 .insns = {
3585 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3586 offsetof(struct __sk_buff, data)),
3587 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3588 offsetof(struct __sk_buff, data_end)),
3589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3590 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3592 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3593 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3594 BPF_MOV64_IMM(BPF_REG_2, 0),
3595 BPF_MOV64_IMM(BPF_REG_3, 0),
3596 BPF_MOV64_IMM(BPF_REG_4, 0),
3597 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3599 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003600 BPF_MOV64_IMM(BPF_REG_0, 0),
3601 BPF_EXIT_INSN(),
3602 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08003603 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003604 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3605 },
3606 {
Edward Creef65b1842017-08-07 15:27:12 +01003607 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003608 .insns = {
3609 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3610 offsetof(struct __sk_buff, data)),
3611 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3612 offsetof(struct __sk_buff, data_end)),
3613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3616 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3617 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3618 BPF_MOV64_IMM(BPF_REG_2, 4),
3619 BPF_MOV64_IMM(BPF_REG_3, 0),
3620 BPF_MOV64_IMM(BPF_REG_4, 0),
3621 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003622 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3623 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003624 BPF_MOV64_IMM(BPF_REG_0, 0),
3625 BPF_EXIT_INSN(),
3626 },
3627 .result = REJECT,
3628 .errstr = "R1 type=pkt_end expected=fp",
3629 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3630 },
3631 {
Edward Creef65b1842017-08-07 15:27:12 +01003632 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003633 .insns = {
3634 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3635 offsetof(struct __sk_buff, data)),
3636 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3637 offsetof(struct __sk_buff, data_end)),
3638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3639 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3641 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3642 BPF_MOV64_IMM(BPF_REG_2, 4),
3643 BPF_MOV64_IMM(BPF_REG_3, 0),
3644 BPF_MOV64_IMM(BPF_REG_4, 0),
3645 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3647 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003648 BPF_MOV64_IMM(BPF_REG_0, 0),
3649 BPF_EXIT_INSN(),
3650 },
3651 .result = REJECT,
3652 .errstr = "invalid access to packet",
3653 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3654 },
Josef Bacik48461132016-09-28 10:54:32 -04003655 {
3656 "valid map access into an array with a constant",
3657 .insns = {
3658 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3659 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3661 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003662 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3663 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003665 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3666 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003667 BPF_EXIT_INSN(),
3668 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003669 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003670 .errstr_unpriv = "R0 leaks addr",
3671 .result_unpriv = REJECT,
3672 .result = ACCEPT,
3673 },
3674 {
3675 "valid map access into an array with a register",
3676 .insns = {
3677 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3678 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3679 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3680 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003681 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3682 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3684 BPF_MOV64_IMM(BPF_REG_1, 4),
3685 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3686 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003687 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3688 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003689 BPF_EXIT_INSN(),
3690 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003691 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003692 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003693 .result_unpriv = REJECT,
3694 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003695 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003696 },
3697 {
3698 "valid map access into an array with a variable",
3699 .insns = {
3700 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3701 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3703 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3705 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003706 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3707 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3708 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3709 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3710 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003711 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3712 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003713 BPF_EXIT_INSN(),
3714 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003715 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003716 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003717 .result_unpriv = REJECT,
3718 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003719 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003720 },
3721 {
3722 "valid map access into an array with a signed variable",
3723 .insns = {
3724 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3727 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003728 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3729 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003730 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3731 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3732 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3733 BPF_MOV32_IMM(BPF_REG_1, 0),
3734 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3735 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3736 BPF_MOV32_IMM(BPF_REG_1, 0),
3737 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3738 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003739 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3740 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003741 BPF_EXIT_INSN(),
3742 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003743 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003744 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003745 .result_unpriv = REJECT,
3746 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003747 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003748 },
3749 {
3750 "invalid map access into an array with a constant",
3751 .insns = {
3752 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3753 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3755 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003756 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3757 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003758 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3759 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3760 offsetof(struct test_val, foo)),
3761 BPF_EXIT_INSN(),
3762 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003763 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003764 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3765 .result = REJECT,
3766 },
3767 {
3768 "invalid map access into an array with a register",
3769 .insns = {
3770 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3771 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3772 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3773 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003774 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3775 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3777 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3778 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3779 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003780 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3781 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003782 BPF_EXIT_INSN(),
3783 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003784 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003785 .errstr = "R0 min value is outside of the array range",
3786 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003787 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003788 },
3789 {
3790 "invalid map access into an array with a variable",
3791 .insns = {
3792 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3793 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3795 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003796 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3797 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3799 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3800 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3801 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003802 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3803 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003804 BPF_EXIT_INSN(),
3805 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003806 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003807 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04003808 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003809 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003810 },
3811 {
3812 "invalid map access into an array with no floor check",
3813 .insns = {
3814 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3815 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3817 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3819 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003820 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01003821 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04003822 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3823 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3824 BPF_MOV32_IMM(BPF_REG_1, 0),
3825 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3826 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003827 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3828 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003829 BPF_EXIT_INSN(),
3830 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003831 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003832 .errstr_unpriv = "R0 leaks addr",
3833 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003834 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003835 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003836 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003837 },
3838 {
3839 "invalid map access into an array with a invalid max check",
3840 .insns = {
3841 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3844 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3846 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003847 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3848 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3849 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3850 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3851 BPF_MOV32_IMM(BPF_REG_1, 0),
3852 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3853 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003854 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3855 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003856 BPF_EXIT_INSN(),
3857 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003858 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003859 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003860 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003861 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003862 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003863 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003864 },
3865 {
3866 "invalid map access into an array with a invalid max check",
3867 .insns = {
3868 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3869 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3871 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003872 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3873 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003874 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3875 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3876 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3877 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3879 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003880 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3881 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003882 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3883 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3885 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003886 BPF_EXIT_INSN(),
3887 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003888 .fixup_map2 = { 3, 11 },
Edward Creef65b1842017-08-07 15:27:12 +01003889 .errstr_unpriv = "R0 pointer += pointer",
3890 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003891 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003892 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003893 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003894 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003895 {
3896 "multiple registers share map_lookup_elem result",
3897 .insns = {
3898 BPF_MOV64_IMM(BPF_REG_1, 10),
3899 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3900 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3901 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3902 BPF_LD_MAP_FD(BPF_REG_1, 0),
3903 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3904 BPF_FUNC_map_lookup_elem),
3905 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3907 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3908 BPF_EXIT_INSN(),
3909 },
3910 .fixup_map1 = { 4 },
3911 .result = ACCEPT,
3912 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3913 },
3914 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003915 "alu ops on ptr_to_map_value_or_null, 1",
3916 .insns = {
3917 BPF_MOV64_IMM(BPF_REG_1, 10),
3918 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3921 BPF_LD_MAP_FD(BPF_REG_1, 0),
3922 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3923 BPF_FUNC_map_lookup_elem),
3924 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3927 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3928 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3929 BPF_EXIT_INSN(),
3930 },
3931 .fixup_map1 = { 4 },
3932 .errstr = "R4 invalid mem access",
3933 .result = REJECT,
3934 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3935 },
3936 {
3937 "alu ops on ptr_to_map_value_or_null, 2",
3938 .insns = {
3939 BPF_MOV64_IMM(BPF_REG_1, 10),
3940 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3941 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3943 BPF_LD_MAP_FD(BPF_REG_1, 0),
3944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3945 BPF_FUNC_map_lookup_elem),
3946 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3947 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3949 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3950 BPF_EXIT_INSN(),
3951 },
3952 .fixup_map1 = { 4 },
3953 .errstr = "R4 invalid mem access",
3954 .result = REJECT,
3955 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3956 },
3957 {
3958 "alu ops on ptr_to_map_value_or_null, 3",
3959 .insns = {
3960 BPF_MOV64_IMM(BPF_REG_1, 10),
3961 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3964 BPF_LD_MAP_FD(BPF_REG_1, 0),
3965 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3966 BPF_FUNC_map_lookup_elem),
3967 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3968 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3969 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3970 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3971 BPF_EXIT_INSN(),
3972 },
3973 .fixup_map1 = { 4 },
3974 .errstr = "R4 invalid mem access",
3975 .result = REJECT,
3976 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3977 },
3978 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02003979 "invalid memory access with multiple map_lookup_elem calls",
3980 .insns = {
3981 BPF_MOV64_IMM(BPF_REG_1, 10),
3982 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3983 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3985 BPF_LD_MAP_FD(BPF_REG_1, 0),
3986 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3987 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3988 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3989 BPF_FUNC_map_lookup_elem),
3990 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3991 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3994 BPF_FUNC_map_lookup_elem),
3995 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3996 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3997 BPF_EXIT_INSN(),
3998 },
3999 .fixup_map1 = { 4 },
4000 .result = REJECT,
4001 .errstr = "R4 !read_ok",
4002 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4003 },
4004 {
4005 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4006 .insns = {
4007 BPF_MOV64_IMM(BPF_REG_1, 10),
4008 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4009 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4010 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4011 BPF_LD_MAP_FD(BPF_REG_1, 0),
4012 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4013 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4014 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4015 BPF_FUNC_map_lookup_elem),
4016 BPF_MOV64_IMM(BPF_REG_2, 10),
4017 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4018 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4019 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4021 BPF_FUNC_map_lookup_elem),
4022 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4023 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4024 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4025 BPF_EXIT_INSN(),
4026 },
4027 .fixup_map1 = { 4 },
4028 .result = ACCEPT,
4029 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4030 },
Josef Bacike9548902016-11-29 12:35:19 -05004031 {
4032 "invalid map access from else condition",
4033 .insns = {
4034 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4035 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4037 BPF_LD_MAP_FD(BPF_REG_1, 0),
4038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4039 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4040 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4041 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4043 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4044 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4045 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4046 BPF_EXIT_INSN(),
4047 },
4048 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004049 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05004050 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01004051 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05004052 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004053 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05004054 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08004055 {
4056 "constant register |= constant should keep constant type",
4057 .insns = {
4058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4060 BPF_MOV64_IMM(BPF_REG_2, 34),
4061 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4062 BPF_MOV64_IMM(BPF_REG_3, 0),
4063 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4064 BPF_EXIT_INSN(),
4065 },
4066 .result = ACCEPT,
4067 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4068 },
4069 {
4070 "constant register |= constant should not bypass stack boundary checks",
4071 .insns = {
4072 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4074 BPF_MOV64_IMM(BPF_REG_2, 34),
4075 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4076 BPF_MOV64_IMM(BPF_REG_3, 0),
4077 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4078 BPF_EXIT_INSN(),
4079 },
4080 .errstr = "invalid stack type R1 off=-48 access_size=58",
4081 .result = REJECT,
4082 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4083 },
4084 {
4085 "constant register |= constant register should keep constant type",
4086 .insns = {
4087 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4089 BPF_MOV64_IMM(BPF_REG_2, 34),
4090 BPF_MOV64_IMM(BPF_REG_4, 13),
4091 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4092 BPF_MOV64_IMM(BPF_REG_3, 0),
4093 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4094 BPF_EXIT_INSN(),
4095 },
4096 .result = ACCEPT,
4097 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4098 },
4099 {
4100 "constant register |= constant register should not bypass stack boundary checks",
4101 .insns = {
4102 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4104 BPF_MOV64_IMM(BPF_REG_2, 34),
4105 BPF_MOV64_IMM(BPF_REG_4, 24),
4106 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4107 BPF_MOV64_IMM(BPF_REG_3, 0),
4108 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4109 BPF_EXIT_INSN(),
4110 },
4111 .errstr = "invalid stack type R1 off=-48 access_size=58",
4112 .result = REJECT,
4113 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4114 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004115 {
4116 "invalid direct packet write for LWT_IN",
4117 .insns = {
4118 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4119 offsetof(struct __sk_buff, data)),
4120 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4121 offsetof(struct __sk_buff, data_end)),
4122 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4124 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4125 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4126 BPF_MOV64_IMM(BPF_REG_0, 0),
4127 BPF_EXIT_INSN(),
4128 },
4129 .errstr = "cannot write into packet",
4130 .result = REJECT,
4131 .prog_type = BPF_PROG_TYPE_LWT_IN,
4132 },
4133 {
4134 "invalid direct packet write for LWT_OUT",
4135 .insns = {
4136 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4137 offsetof(struct __sk_buff, data)),
4138 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4139 offsetof(struct __sk_buff, data_end)),
4140 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4142 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4143 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4144 BPF_MOV64_IMM(BPF_REG_0, 0),
4145 BPF_EXIT_INSN(),
4146 },
4147 .errstr = "cannot write into packet",
4148 .result = REJECT,
4149 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4150 },
4151 {
4152 "direct packet write for LWT_XMIT",
4153 .insns = {
4154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 offsetof(struct __sk_buff, data)),
4156 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4157 offsetof(struct __sk_buff, data_end)),
4158 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4160 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4161 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4162 BPF_MOV64_IMM(BPF_REG_0, 0),
4163 BPF_EXIT_INSN(),
4164 },
4165 .result = ACCEPT,
4166 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4167 },
4168 {
4169 "direct packet read for LWT_IN",
4170 .insns = {
4171 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4172 offsetof(struct __sk_buff, data)),
4173 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4174 offsetof(struct __sk_buff, data_end)),
4175 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4177 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4178 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4179 BPF_MOV64_IMM(BPF_REG_0, 0),
4180 BPF_EXIT_INSN(),
4181 },
4182 .result = ACCEPT,
4183 .prog_type = BPF_PROG_TYPE_LWT_IN,
4184 },
4185 {
4186 "direct packet read for LWT_OUT",
4187 .insns = {
4188 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4189 offsetof(struct __sk_buff, data)),
4190 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4191 offsetof(struct __sk_buff, data_end)),
4192 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4194 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4195 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4196 BPF_MOV64_IMM(BPF_REG_0, 0),
4197 BPF_EXIT_INSN(),
4198 },
4199 .result = ACCEPT,
4200 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4201 },
4202 {
4203 "direct packet read for LWT_XMIT",
4204 .insns = {
4205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4206 offsetof(struct __sk_buff, data)),
4207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4208 offsetof(struct __sk_buff, data_end)),
4209 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4211 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4212 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4213 BPF_MOV64_IMM(BPF_REG_0, 0),
4214 BPF_EXIT_INSN(),
4215 },
4216 .result = ACCEPT,
4217 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4218 },
4219 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004220 "overlapping checks for direct packet access",
4221 .insns = {
4222 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4223 offsetof(struct __sk_buff, data)),
4224 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4225 offsetof(struct __sk_buff, data_end)),
4226 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4228 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4229 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4231 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4232 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4233 BPF_MOV64_IMM(BPF_REG_0, 0),
4234 BPF_EXIT_INSN(),
4235 },
4236 .result = ACCEPT,
4237 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4238 },
4239 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004240 "invalid access of tc_classid for LWT_IN",
4241 .insns = {
4242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4243 offsetof(struct __sk_buff, tc_classid)),
4244 BPF_EXIT_INSN(),
4245 },
4246 .result = REJECT,
4247 .errstr = "invalid bpf_context access",
4248 },
4249 {
4250 "invalid access of tc_classid for LWT_OUT",
4251 .insns = {
4252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4253 offsetof(struct __sk_buff, tc_classid)),
4254 BPF_EXIT_INSN(),
4255 },
4256 .result = REJECT,
4257 .errstr = "invalid bpf_context access",
4258 },
4259 {
4260 "invalid access of tc_classid for LWT_XMIT",
4261 .insns = {
4262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4263 offsetof(struct __sk_buff, tc_classid)),
4264 BPF_EXIT_INSN(),
4265 },
4266 .result = REJECT,
4267 .errstr = "invalid bpf_context access",
4268 },
Gianluca Borello57225692017-01-09 10:19:47 -08004269 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004270 "leak pointer into ctx 1",
4271 .insns = {
4272 BPF_MOV64_IMM(BPF_REG_0, 0),
4273 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4274 offsetof(struct __sk_buff, cb[0])),
4275 BPF_LD_MAP_FD(BPF_REG_2, 0),
4276 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4277 offsetof(struct __sk_buff, cb[0])),
4278 BPF_EXIT_INSN(),
4279 },
4280 .fixup_map1 = { 2 },
4281 .errstr_unpriv = "R2 leaks addr into mem",
4282 .result_unpriv = REJECT,
4283 .result = ACCEPT,
4284 },
4285 {
4286 "leak pointer into ctx 2",
4287 .insns = {
4288 BPF_MOV64_IMM(BPF_REG_0, 0),
4289 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4290 offsetof(struct __sk_buff, cb[0])),
4291 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4292 offsetof(struct __sk_buff, cb[0])),
4293 BPF_EXIT_INSN(),
4294 },
4295 .errstr_unpriv = "R10 leaks addr into mem",
4296 .result_unpriv = REJECT,
4297 .result = ACCEPT,
4298 },
4299 {
4300 "leak pointer into ctx 3",
4301 .insns = {
4302 BPF_MOV64_IMM(BPF_REG_0, 0),
4303 BPF_LD_MAP_FD(BPF_REG_2, 0),
4304 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4305 offsetof(struct __sk_buff, cb[0])),
4306 BPF_EXIT_INSN(),
4307 },
4308 .fixup_map1 = { 1 },
4309 .errstr_unpriv = "R2 leaks addr into ctx",
4310 .result_unpriv = REJECT,
4311 .result = ACCEPT,
4312 },
4313 {
4314 "leak pointer into map val",
4315 .insns = {
4316 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4317 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4318 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4320 BPF_LD_MAP_FD(BPF_REG_1, 0),
4321 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4322 BPF_FUNC_map_lookup_elem),
4323 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4324 BPF_MOV64_IMM(BPF_REG_3, 0),
4325 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4326 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4327 BPF_MOV64_IMM(BPF_REG_0, 0),
4328 BPF_EXIT_INSN(),
4329 },
4330 .fixup_map1 = { 4 },
4331 .errstr_unpriv = "R6 leaks addr into mem",
4332 .result_unpriv = REJECT,
4333 .result = ACCEPT,
4334 },
4335 {
Gianluca Borello57225692017-01-09 10:19:47 -08004336 "helper access to map: full range",
4337 .insns = {
4338 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4340 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4341 BPF_LD_MAP_FD(BPF_REG_1, 0),
4342 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4343 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4344 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4345 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4346 BPF_MOV64_IMM(BPF_REG_3, 0),
4347 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4348 BPF_EXIT_INSN(),
4349 },
4350 .fixup_map2 = { 3 },
4351 .result = ACCEPT,
4352 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4353 },
4354 {
4355 "helper access to map: partial range",
4356 .insns = {
4357 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4359 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4360 BPF_LD_MAP_FD(BPF_REG_1, 0),
4361 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4362 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4363 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4364 BPF_MOV64_IMM(BPF_REG_2, 8),
4365 BPF_MOV64_IMM(BPF_REG_3, 0),
4366 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4367 BPF_EXIT_INSN(),
4368 },
4369 .fixup_map2 = { 3 },
4370 .result = ACCEPT,
4371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4372 },
4373 {
4374 "helper access to map: empty range",
4375 .insns = {
4376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4378 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4379 BPF_LD_MAP_FD(BPF_REG_1, 0),
4380 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4383 BPF_MOV64_IMM(BPF_REG_2, 0),
4384 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004385 BPF_EXIT_INSN(),
4386 },
4387 .fixup_map2 = { 3 },
4388 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4389 .result = REJECT,
4390 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4391 },
4392 {
4393 "helper access to map: out-of-bound range",
4394 .insns = {
4395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4397 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4398 BPF_LD_MAP_FD(BPF_REG_1, 0),
4399 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4400 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4401 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4402 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4403 BPF_MOV64_IMM(BPF_REG_3, 0),
4404 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4405 BPF_EXIT_INSN(),
4406 },
4407 .fixup_map2 = { 3 },
4408 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4409 .result = REJECT,
4410 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4411 },
4412 {
4413 "helper access to map: negative range",
4414 .insns = {
4415 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4417 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4418 BPF_LD_MAP_FD(BPF_REG_1, 0),
4419 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4420 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4422 BPF_MOV64_IMM(BPF_REG_2, -8),
4423 BPF_MOV64_IMM(BPF_REG_3, 0),
4424 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4425 BPF_EXIT_INSN(),
4426 },
4427 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004428 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004429 .result = REJECT,
4430 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4431 },
4432 {
4433 "helper access to adjusted map (via const imm): full range",
4434 .insns = {
4435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4437 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4438 BPF_LD_MAP_FD(BPF_REG_1, 0),
4439 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4443 offsetof(struct test_val, foo)),
4444 BPF_MOV64_IMM(BPF_REG_2,
4445 sizeof(struct test_val) -
4446 offsetof(struct test_val, foo)),
4447 BPF_MOV64_IMM(BPF_REG_3, 0),
4448 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4449 BPF_EXIT_INSN(),
4450 },
4451 .fixup_map2 = { 3 },
4452 .result = ACCEPT,
4453 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4454 },
4455 {
4456 "helper access to adjusted map (via const imm): partial range",
4457 .insns = {
4458 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4460 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4461 BPF_LD_MAP_FD(BPF_REG_1, 0),
4462 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4463 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4464 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4466 offsetof(struct test_val, foo)),
4467 BPF_MOV64_IMM(BPF_REG_2, 8),
4468 BPF_MOV64_IMM(BPF_REG_3, 0),
4469 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4470 BPF_EXIT_INSN(),
4471 },
4472 .fixup_map2 = { 3 },
4473 .result = ACCEPT,
4474 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4475 },
4476 {
4477 "helper access to adjusted map (via const imm): empty range",
4478 .insns = {
4479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4481 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4482 BPF_LD_MAP_FD(BPF_REG_1, 0),
4483 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Gianluca Borello57225692017-01-09 10:19:47 -08004485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4487 offsetof(struct test_val, foo)),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004488 BPF_MOV64_IMM(BPF_REG_2, 0),
4489 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004490 BPF_EXIT_INSN(),
4491 },
4492 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004493 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004494 .result = REJECT,
4495 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4496 },
4497 {
4498 "helper access to adjusted map (via const imm): out-of-bound range",
4499 .insns = {
4500 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4502 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4503 BPF_LD_MAP_FD(BPF_REG_1, 0),
4504 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4505 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4506 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4508 offsetof(struct test_val, foo)),
4509 BPF_MOV64_IMM(BPF_REG_2,
4510 sizeof(struct test_val) -
4511 offsetof(struct test_val, foo) + 8),
4512 BPF_MOV64_IMM(BPF_REG_3, 0),
4513 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4514 BPF_EXIT_INSN(),
4515 },
4516 .fixup_map2 = { 3 },
4517 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4518 .result = REJECT,
4519 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4520 },
4521 {
4522 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4523 .insns = {
4524 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4525 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4526 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4527 BPF_LD_MAP_FD(BPF_REG_1, 0),
4528 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4529 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4532 offsetof(struct test_val, foo)),
4533 BPF_MOV64_IMM(BPF_REG_2, -8),
4534 BPF_MOV64_IMM(BPF_REG_3, 0),
4535 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4536 BPF_EXIT_INSN(),
4537 },
4538 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004539 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004540 .result = REJECT,
4541 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4542 },
4543 {
4544 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4545 .insns = {
4546 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4548 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4549 BPF_LD_MAP_FD(BPF_REG_1, 0),
4550 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4551 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4552 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4554 offsetof(struct test_val, foo)),
4555 BPF_MOV64_IMM(BPF_REG_2, -1),
4556 BPF_MOV64_IMM(BPF_REG_3, 0),
4557 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4558 BPF_EXIT_INSN(),
4559 },
4560 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004561 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004562 .result = REJECT,
4563 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4564 },
4565 {
4566 "helper access to adjusted map (via const reg): full range",
4567 .insns = {
4568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4571 BPF_LD_MAP_FD(BPF_REG_1, 0),
4572 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4574 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4575 BPF_MOV64_IMM(BPF_REG_3,
4576 offsetof(struct test_val, foo)),
4577 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4578 BPF_MOV64_IMM(BPF_REG_2,
4579 sizeof(struct test_val) -
4580 offsetof(struct test_val, foo)),
4581 BPF_MOV64_IMM(BPF_REG_3, 0),
4582 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4583 BPF_EXIT_INSN(),
4584 },
4585 .fixup_map2 = { 3 },
4586 .result = ACCEPT,
4587 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4588 },
4589 {
4590 "helper access to adjusted map (via const reg): partial range",
4591 .insns = {
4592 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4593 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4594 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4595 BPF_LD_MAP_FD(BPF_REG_1, 0),
4596 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4597 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4598 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4599 BPF_MOV64_IMM(BPF_REG_3,
4600 offsetof(struct test_val, foo)),
4601 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4602 BPF_MOV64_IMM(BPF_REG_2, 8),
4603 BPF_MOV64_IMM(BPF_REG_3, 0),
4604 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4605 BPF_EXIT_INSN(),
4606 },
4607 .fixup_map2 = { 3 },
4608 .result = ACCEPT,
4609 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4610 },
4611 {
4612 "helper access to adjusted map (via const reg): empty range",
4613 .insns = {
4614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4616 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4617 BPF_LD_MAP_FD(BPF_REG_1, 0),
4618 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
Gianluca Borello57225692017-01-09 10:19:47 -08004620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4621 BPF_MOV64_IMM(BPF_REG_3, 0),
4622 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004623 BPF_MOV64_IMM(BPF_REG_2, 0),
4624 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004625 BPF_EXIT_INSN(),
4626 },
4627 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004628 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08004629 .result = REJECT,
4630 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4631 },
4632 {
4633 "helper access to adjusted map (via const reg): out-of-bound range",
4634 .insns = {
4635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4637 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4638 BPF_LD_MAP_FD(BPF_REG_1, 0),
4639 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4640 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4642 BPF_MOV64_IMM(BPF_REG_3,
4643 offsetof(struct test_val, foo)),
4644 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4645 BPF_MOV64_IMM(BPF_REG_2,
4646 sizeof(struct test_val) -
4647 offsetof(struct test_val, foo) + 8),
4648 BPF_MOV64_IMM(BPF_REG_3, 0),
4649 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4650 BPF_EXIT_INSN(),
4651 },
4652 .fixup_map2 = { 3 },
4653 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4654 .result = REJECT,
4655 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4656 },
4657 {
4658 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4659 .insns = {
4660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4662 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4663 BPF_LD_MAP_FD(BPF_REG_1, 0),
4664 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4667 BPF_MOV64_IMM(BPF_REG_3,
4668 offsetof(struct test_val, foo)),
4669 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4670 BPF_MOV64_IMM(BPF_REG_2, -8),
4671 BPF_MOV64_IMM(BPF_REG_3, 0),
4672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4673 BPF_EXIT_INSN(),
4674 },
4675 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004676 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004677 .result = REJECT,
4678 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4679 },
4680 {
4681 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4682 .insns = {
4683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4685 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4686 BPF_LD_MAP_FD(BPF_REG_1, 0),
4687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4690 BPF_MOV64_IMM(BPF_REG_3,
4691 offsetof(struct test_val, foo)),
4692 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4693 BPF_MOV64_IMM(BPF_REG_2, -1),
4694 BPF_MOV64_IMM(BPF_REG_3, 0),
4695 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4696 BPF_EXIT_INSN(),
4697 },
4698 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004699 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004700 .result = REJECT,
4701 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4702 },
4703 {
4704 "helper access to adjusted map (via variable): full range",
4705 .insns = {
4706 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4708 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4709 BPF_LD_MAP_FD(BPF_REG_1, 0),
4710 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4714 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4715 offsetof(struct test_val, foo), 4),
4716 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4717 BPF_MOV64_IMM(BPF_REG_2,
4718 sizeof(struct test_val) -
4719 offsetof(struct test_val, foo)),
4720 BPF_MOV64_IMM(BPF_REG_3, 0),
4721 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4722 BPF_EXIT_INSN(),
4723 },
4724 .fixup_map2 = { 3 },
4725 .result = ACCEPT,
4726 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4727 },
4728 {
4729 "helper access to adjusted map (via variable): partial range",
4730 .insns = {
4731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4733 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4734 BPF_LD_MAP_FD(BPF_REG_1, 0),
4735 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4738 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4739 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4740 offsetof(struct test_val, foo), 4),
4741 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4742 BPF_MOV64_IMM(BPF_REG_2, 8),
4743 BPF_MOV64_IMM(BPF_REG_3, 0),
4744 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4745 BPF_EXIT_INSN(),
4746 },
4747 .fixup_map2 = { 3 },
4748 .result = ACCEPT,
4749 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4750 },
4751 {
4752 "helper access to adjusted map (via variable): empty range",
4753 .insns = {
4754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4756 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4757 BPF_LD_MAP_FD(BPF_REG_1, 0),
4758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
Gianluca Borello57225692017-01-09 10:19:47 -08004760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4761 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4762 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004763 offsetof(struct test_val, foo), 3),
Gianluca Borello57225692017-01-09 10:19:47 -08004764 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004765 BPF_MOV64_IMM(BPF_REG_2, 0),
4766 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
Gianluca Borello57225692017-01-09 10:19:47 -08004767 BPF_EXIT_INSN(),
4768 },
4769 .fixup_map2 = { 3 },
Yonghong Songf1a8b8e2017-11-21 11:23:40 -08004770 .errstr = "R1 min value is outside of the array range",
Gianluca Borello57225692017-01-09 10:19:47 -08004771 .result = REJECT,
4772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4773 },
4774 {
4775 "helper access to adjusted map (via variable): no max check",
4776 .insns = {
4777 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4779 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4780 BPF_LD_MAP_FD(BPF_REG_1, 0),
4781 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4783 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4784 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4785 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01004786 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08004787 BPF_MOV64_IMM(BPF_REG_3, 0),
4788 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4789 BPF_EXIT_INSN(),
4790 },
4791 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004792 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08004793 .result = REJECT,
4794 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4795 },
4796 {
4797 "helper access to adjusted map (via variable): wrong max check",
4798 .insns = {
4799 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4801 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4802 BPF_LD_MAP_FD(BPF_REG_1, 0),
4803 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4806 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4807 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4808 offsetof(struct test_val, foo), 4),
4809 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4810 BPF_MOV64_IMM(BPF_REG_2,
4811 sizeof(struct test_val) -
4812 offsetof(struct test_val, foo) + 1),
4813 BPF_MOV64_IMM(BPF_REG_3, 0),
4814 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4815 BPF_EXIT_INSN(),
4816 },
4817 .fixup_map2 = { 3 },
4818 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4819 .result = REJECT,
4820 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4821 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004822 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02004823 "helper access to map: bounds check using <, good access",
4824 .insns = {
4825 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4827 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4828 BPF_LD_MAP_FD(BPF_REG_1, 0),
4829 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4830 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4832 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4833 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4834 BPF_MOV64_IMM(BPF_REG_0, 0),
4835 BPF_EXIT_INSN(),
4836 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4837 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4838 BPF_MOV64_IMM(BPF_REG_0, 0),
4839 BPF_EXIT_INSN(),
4840 },
4841 .fixup_map2 = { 3 },
4842 .result = ACCEPT,
4843 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4844 },
4845 {
4846 "helper access to map: bounds check using <, bad access",
4847 .insns = {
4848 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4850 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4851 BPF_LD_MAP_FD(BPF_REG_1, 0),
4852 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4853 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4854 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4855 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4856 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4857 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4858 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4859 BPF_MOV64_IMM(BPF_REG_0, 0),
4860 BPF_EXIT_INSN(),
4861 BPF_MOV64_IMM(BPF_REG_0, 0),
4862 BPF_EXIT_INSN(),
4863 },
4864 .fixup_map2 = { 3 },
4865 .result = REJECT,
4866 .errstr = "R1 unbounded memory access",
4867 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4868 },
4869 {
4870 "helper access to map: bounds check using <=, good access",
4871 .insns = {
4872 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4874 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4875 BPF_LD_MAP_FD(BPF_REG_1, 0),
4876 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4877 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4878 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4879 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4880 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4881 BPF_MOV64_IMM(BPF_REG_0, 0),
4882 BPF_EXIT_INSN(),
4883 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4884 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4885 BPF_MOV64_IMM(BPF_REG_0, 0),
4886 BPF_EXIT_INSN(),
4887 },
4888 .fixup_map2 = { 3 },
4889 .result = ACCEPT,
4890 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4891 },
4892 {
4893 "helper access to map: bounds check using <=, bad access",
4894 .insns = {
4895 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4897 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4898 BPF_LD_MAP_FD(BPF_REG_1, 0),
4899 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4900 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4901 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4902 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4903 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4904 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4905 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4906 BPF_MOV64_IMM(BPF_REG_0, 0),
4907 BPF_EXIT_INSN(),
4908 BPF_MOV64_IMM(BPF_REG_0, 0),
4909 BPF_EXIT_INSN(),
4910 },
4911 .fixup_map2 = { 3 },
4912 .result = REJECT,
4913 .errstr = "R1 unbounded memory access",
4914 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4915 },
4916 {
4917 "helper access to map: bounds check using s<, good access",
4918 .insns = {
4919 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4920 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4921 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4922 BPF_LD_MAP_FD(BPF_REG_1, 0),
4923 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4925 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4926 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4927 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4928 BPF_MOV64_IMM(BPF_REG_0, 0),
4929 BPF_EXIT_INSN(),
4930 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4931 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4932 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4933 BPF_MOV64_IMM(BPF_REG_0, 0),
4934 BPF_EXIT_INSN(),
4935 },
4936 .fixup_map2 = { 3 },
4937 .result = ACCEPT,
4938 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4939 },
4940 {
4941 "helper access to map: bounds check using s<, good access 2",
4942 .insns = {
4943 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4945 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4946 BPF_LD_MAP_FD(BPF_REG_1, 0),
4947 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4948 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4949 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4950 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4951 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4952 BPF_MOV64_IMM(BPF_REG_0, 0),
4953 BPF_EXIT_INSN(),
4954 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4955 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4956 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4957 BPF_MOV64_IMM(BPF_REG_0, 0),
4958 BPF_EXIT_INSN(),
4959 },
4960 .fixup_map2 = { 3 },
4961 .result = ACCEPT,
4962 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4963 },
4964 {
4965 "helper access to map: bounds check using s<, bad access",
4966 .insns = {
4967 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4969 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4970 BPF_LD_MAP_FD(BPF_REG_1, 0),
4971 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4972 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4974 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4975 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4976 BPF_MOV64_IMM(BPF_REG_0, 0),
4977 BPF_EXIT_INSN(),
4978 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4979 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4980 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4981 BPF_MOV64_IMM(BPF_REG_0, 0),
4982 BPF_EXIT_INSN(),
4983 },
4984 .fixup_map2 = { 3 },
4985 .result = REJECT,
4986 .errstr = "R1 min value is negative",
4987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4988 },
4989 {
4990 "helper access to map: bounds check using s<=, good access",
4991 .insns = {
4992 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4994 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4995 BPF_LD_MAP_FD(BPF_REG_1, 0),
4996 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4997 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4998 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4999 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5000 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5001 BPF_MOV64_IMM(BPF_REG_0, 0),
5002 BPF_EXIT_INSN(),
5003 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5004 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5005 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5006 BPF_MOV64_IMM(BPF_REG_0, 0),
5007 BPF_EXIT_INSN(),
5008 },
5009 .fixup_map2 = { 3 },
5010 .result = ACCEPT,
5011 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5012 },
5013 {
5014 "helper access to map: bounds check using s<=, good access 2",
5015 .insns = {
5016 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5018 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5019 BPF_LD_MAP_FD(BPF_REG_1, 0),
5020 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5021 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5022 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5023 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5024 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5025 BPF_MOV64_IMM(BPF_REG_0, 0),
5026 BPF_EXIT_INSN(),
5027 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5028 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5029 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5030 BPF_MOV64_IMM(BPF_REG_0, 0),
5031 BPF_EXIT_INSN(),
5032 },
5033 .fixup_map2 = { 3 },
5034 .result = ACCEPT,
5035 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5036 },
5037 {
5038 "helper access to map: bounds check using s<=, bad access",
5039 .insns = {
5040 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5042 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5043 BPF_LD_MAP_FD(BPF_REG_1, 0),
5044 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5045 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5047 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5048 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5049 BPF_MOV64_IMM(BPF_REG_0, 0),
5050 BPF_EXIT_INSN(),
5051 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5052 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5053 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5054 BPF_MOV64_IMM(BPF_REG_0, 0),
5055 BPF_EXIT_INSN(),
5056 },
5057 .fixup_map2 = { 3 },
5058 .result = REJECT,
5059 .errstr = "R1 min value is negative",
5060 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5061 },
5062 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08005063 "map element value is preserved across register spilling",
5064 .insns = {
5065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5067 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5068 BPF_LD_MAP_FD(BPF_REG_1, 0),
5069 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5071 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5072 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5073 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5074 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5075 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5076 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5077 BPF_EXIT_INSN(),
5078 },
5079 .fixup_map2 = { 3 },
5080 .errstr_unpriv = "R0 leaks addr",
5081 .result = ACCEPT,
5082 .result_unpriv = REJECT,
5083 },
5084 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005085 "map element value or null is marked on register spilling",
5086 .insns = {
5087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5089 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5090 BPF_LD_MAP_FD(BPF_REG_1, 0),
5091 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5092 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5093 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5094 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5095 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5096 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5097 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5098 BPF_EXIT_INSN(),
5099 },
5100 .fixup_map2 = { 3 },
5101 .errstr_unpriv = "R0 leaks addr",
5102 .result = ACCEPT,
5103 .result_unpriv = REJECT,
5104 },
5105 {
5106 "map element value store of cleared call register",
5107 .insns = {
5108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5110 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5111 BPF_LD_MAP_FD(BPF_REG_1, 0),
5112 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5114 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5115 BPF_EXIT_INSN(),
5116 },
5117 .fixup_map2 = { 3 },
5118 .errstr_unpriv = "R1 !read_ok",
5119 .errstr = "R1 !read_ok",
5120 .result = REJECT,
5121 .result_unpriv = REJECT,
5122 },
5123 {
5124 "map element value with unaligned store",
5125 .insns = {
5126 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5127 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5128 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5129 BPF_LD_MAP_FD(BPF_REG_1, 0),
5130 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5131 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5133 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5134 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5135 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5136 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5137 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5138 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5139 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5141 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5142 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5143 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5144 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5146 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5147 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5148 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5149 BPF_EXIT_INSN(),
5150 },
5151 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005152 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005153 .result = ACCEPT,
5154 .result_unpriv = REJECT,
5155 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5156 },
5157 {
5158 "map element value with unaligned load",
5159 .insns = {
5160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5162 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5163 BPF_LD_MAP_FD(BPF_REG_1, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5166 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5167 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5168 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5169 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5170 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5171 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5172 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5173 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5175 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5176 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5177 BPF_EXIT_INSN(),
5178 },
5179 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005180 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005181 .result = ACCEPT,
5182 .result_unpriv = REJECT,
5183 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5184 },
5185 {
5186 "map element value illegal alu op, 1",
5187 .insns = {
5188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5190 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5191 BPF_LD_MAP_FD(BPF_REG_1, 0),
5192 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5194 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5196 BPF_EXIT_INSN(),
5197 },
5198 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005199 .errstr_unpriv = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005200 .errstr = "invalid mem access 'inv'",
5201 .result = REJECT,
5202 .result_unpriv = REJECT,
5203 },
5204 {
5205 "map element value illegal alu op, 2",
5206 .insns = {
5207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5210 BPF_LD_MAP_FD(BPF_REG_1, 0),
5211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5213 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5214 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5215 BPF_EXIT_INSN(),
5216 },
5217 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005218 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005219 .errstr = "invalid mem access 'inv'",
5220 .result = REJECT,
5221 .result_unpriv = REJECT,
5222 },
5223 {
5224 "map element value illegal alu op, 3",
5225 .insns = {
5226 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5228 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5229 BPF_LD_MAP_FD(BPF_REG_1, 0),
5230 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5231 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5232 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5233 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5234 BPF_EXIT_INSN(),
5235 },
5236 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005237 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005238 .errstr = "invalid mem access 'inv'",
5239 .result = REJECT,
5240 .result_unpriv = REJECT,
5241 },
5242 {
5243 "map element value illegal alu op, 4",
5244 .insns = {
5245 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5247 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5248 BPF_LD_MAP_FD(BPF_REG_1, 0),
5249 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5250 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5251 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5252 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5253 BPF_EXIT_INSN(),
5254 },
5255 .fixup_map2 = { 3 },
5256 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5257 .errstr = "invalid mem access 'inv'",
5258 .result = REJECT,
5259 .result_unpriv = REJECT,
5260 },
5261 {
5262 "map element value illegal alu op, 5",
5263 .insns = {
5264 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5266 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5267 BPF_LD_MAP_FD(BPF_REG_1, 0),
5268 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5270 BPF_MOV64_IMM(BPF_REG_3, 4096),
5271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5273 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5274 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5275 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5276 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5277 BPF_EXIT_INSN(),
5278 },
5279 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005280 .errstr = "R0 invalid mem access 'inv'",
5281 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005282 },
5283 {
5284 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005285 .insns = {
5286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5288 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5289 BPF_LD_MAP_FD(BPF_REG_1, 0),
5290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5293 offsetof(struct test_val, foo)),
5294 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5296 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5297 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5298 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5299 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5300 BPF_EXIT_INSN(),
5301 },
5302 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005303 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005304 .result = ACCEPT,
5305 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005306 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005307 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005308 {
5309 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5310 .insns = {
5311 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5313 BPF_MOV64_IMM(BPF_REG_0, 0),
5314 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5316 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5317 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5318 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5319 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5320 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5321 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5322 BPF_MOV64_IMM(BPF_REG_2, 16),
5323 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5324 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5325 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5326 BPF_MOV64_IMM(BPF_REG_4, 0),
5327 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5328 BPF_MOV64_IMM(BPF_REG_3, 0),
5329 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5330 BPF_MOV64_IMM(BPF_REG_0, 0),
5331 BPF_EXIT_INSN(),
5332 },
5333 .result = ACCEPT,
5334 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5335 },
5336 {
5337 "helper access to variable memory: stack, bitwise AND, zero included",
5338 .insns = {
5339 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5341 BPF_MOV64_IMM(BPF_REG_2, 16),
5342 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5343 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5344 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5345 BPF_MOV64_IMM(BPF_REG_3, 0),
5346 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5347 BPF_EXIT_INSN(),
5348 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005349 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005350 .result = REJECT,
5351 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5352 },
5353 {
5354 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5355 .insns = {
5356 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5358 BPF_MOV64_IMM(BPF_REG_2, 16),
5359 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5360 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5361 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5362 BPF_MOV64_IMM(BPF_REG_4, 0),
5363 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5364 BPF_MOV64_IMM(BPF_REG_3, 0),
5365 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5366 BPF_MOV64_IMM(BPF_REG_0, 0),
5367 BPF_EXIT_INSN(),
5368 },
5369 .errstr = "invalid stack type R1 off=-64 access_size=65",
5370 .result = REJECT,
5371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5372 },
5373 {
5374 "helper access to variable memory: stack, JMP, correct bounds",
5375 .insns = {
5376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5378 BPF_MOV64_IMM(BPF_REG_0, 0),
5379 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5380 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5381 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5382 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5383 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5384 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5385 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5386 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5387 BPF_MOV64_IMM(BPF_REG_2, 16),
5388 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5389 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5390 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5391 BPF_MOV64_IMM(BPF_REG_4, 0),
5392 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5393 BPF_MOV64_IMM(BPF_REG_3, 0),
5394 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5396 BPF_EXIT_INSN(),
5397 },
5398 .result = ACCEPT,
5399 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5400 },
5401 {
5402 "helper access to variable memory: stack, JMP (signed), correct bounds",
5403 .insns = {
5404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5406 BPF_MOV64_IMM(BPF_REG_0, 0),
5407 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5408 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5409 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5410 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5411 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5412 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5415 BPF_MOV64_IMM(BPF_REG_2, 16),
5416 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5417 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5418 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5419 BPF_MOV64_IMM(BPF_REG_4, 0),
5420 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5421 BPF_MOV64_IMM(BPF_REG_3, 0),
5422 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5423 BPF_MOV64_IMM(BPF_REG_0, 0),
5424 BPF_EXIT_INSN(),
5425 },
5426 .result = ACCEPT,
5427 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5428 },
5429 {
5430 "helper access to variable memory: stack, JMP, bounds + offset",
5431 .insns = {
5432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5434 BPF_MOV64_IMM(BPF_REG_2, 16),
5435 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5436 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5437 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5438 BPF_MOV64_IMM(BPF_REG_4, 0),
5439 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5441 BPF_MOV64_IMM(BPF_REG_3, 0),
5442 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5443 BPF_MOV64_IMM(BPF_REG_0, 0),
5444 BPF_EXIT_INSN(),
5445 },
5446 .errstr = "invalid stack type R1 off=-64 access_size=65",
5447 .result = REJECT,
5448 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5449 },
5450 {
5451 "helper access to variable memory: stack, JMP, wrong max",
5452 .insns = {
5453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5454 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5455 BPF_MOV64_IMM(BPF_REG_2, 16),
5456 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5457 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5458 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5459 BPF_MOV64_IMM(BPF_REG_4, 0),
5460 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5461 BPF_MOV64_IMM(BPF_REG_3, 0),
5462 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5463 BPF_MOV64_IMM(BPF_REG_0, 0),
5464 BPF_EXIT_INSN(),
5465 },
5466 .errstr = "invalid stack type R1 off=-64 access_size=65",
5467 .result = REJECT,
5468 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5469 },
5470 {
5471 "helper access to variable memory: stack, JMP, no max check",
5472 .insns = {
5473 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5475 BPF_MOV64_IMM(BPF_REG_2, 16),
5476 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5477 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5478 BPF_MOV64_IMM(BPF_REG_4, 0),
5479 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5480 BPF_MOV64_IMM(BPF_REG_3, 0),
5481 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5482 BPF_MOV64_IMM(BPF_REG_0, 0),
5483 BPF_EXIT_INSN(),
5484 },
Edward Creef65b1842017-08-07 15:27:12 +01005485 /* because max wasn't checked, signed min is negative */
5486 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005487 .result = REJECT,
5488 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5489 },
5490 {
5491 "helper access to variable memory: stack, JMP, no min check",
5492 .insns = {
5493 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5494 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5495 BPF_MOV64_IMM(BPF_REG_2, 16),
5496 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5497 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5498 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5499 BPF_MOV64_IMM(BPF_REG_3, 0),
5500 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5501 BPF_MOV64_IMM(BPF_REG_0, 0),
5502 BPF_EXIT_INSN(),
5503 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005504 .errstr = "invalid indirect read from stack off -64+0 size 64",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005505 .result = REJECT,
5506 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5507 },
5508 {
5509 "helper access to variable memory: stack, JMP (signed), no min check",
5510 .insns = {
5511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5513 BPF_MOV64_IMM(BPF_REG_2, 16),
5514 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5515 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5516 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5517 BPF_MOV64_IMM(BPF_REG_3, 0),
5518 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5519 BPF_MOV64_IMM(BPF_REG_0, 0),
5520 BPF_EXIT_INSN(),
5521 },
5522 .errstr = "R2 min value is negative",
5523 .result = REJECT,
5524 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5525 },
5526 {
5527 "helper access to variable memory: map, JMP, correct bounds",
5528 .insns = {
5529 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5531 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5532 BPF_LD_MAP_FD(BPF_REG_1, 0),
5533 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5534 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5535 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5536 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5537 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5538 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5539 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5540 sizeof(struct test_val), 4),
5541 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005542 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005543 BPF_MOV64_IMM(BPF_REG_3, 0),
5544 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5545 BPF_MOV64_IMM(BPF_REG_0, 0),
5546 BPF_EXIT_INSN(),
5547 },
5548 .fixup_map2 = { 3 },
5549 .result = ACCEPT,
5550 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5551 },
5552 {
5553 "helper access to variable memory: map, JMP, wrong max",
5554 .insns = {
5555 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5557 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5558 BPF_LD_MAP_FD(BPF_REG_1, 0),
5559 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5562 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5563 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5564 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5565 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5566 sizeof(struct test_val) + 1, 4),
5567 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005568 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005569 BPF_MOV64_IMM(BPF_REG_3, 0),
5570 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5571 BPF_MOV64_IMM(BPF_REG_0, 0),
5572 BPF_EXIT_INSN(),
5573 },
5574 .fixup_map2 = { 3 },
5575 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5576 .result = REJECT,
5577 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5578 },
5579 {
5580 "helper access to variable memory: map adjusted, JMP, correct bounds",
5581 .insns = {
5582 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5583 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5584 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5585 BPF_LD_MAP_FD(BPF_REG_1, 0),
5586 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5588 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5590 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5591 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5592 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5593 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5594 sizeof(struct test_val) - 20, 4),
5595 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005596 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005597 BPF_MOV64_IMM(BPF_REG_3, 0),
5598 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5599 BPF_MOV64_IMM(BPF_REG_0, 0),
5600 BPF_EXIT_INSN(),
5601 },
5602 .fixup_map2 = { 3 },
5603 .result = ACCEPT,
5604 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5605 },
5606 {
5607 "helper access to variable memory: map adjusted, JMP, wrong max",
5608 .insns = {
5609 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5611 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5612 BPF_LD_MAP_FD(BPF_REG_1, 0),
5613 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5617 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5618 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5619 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5620 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5621 sizeof(struct test_val) - 19, 4),
5622 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005623 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005624 BPF_MOV64_IMM(BPF_REG_3, 0),
5625 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5626 BPF_MOV64_IMM(BPF_REG_0, 0),
5627 BPF_EXIT_INSN(),
5628 },
5629 .fixup_map2 = { 3 },
5630 .errstr = "R1 min value is outside of the array range",
5631 .result = REJECT,
5632 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5633 },
5634 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005635 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Edward Creef65b1842017-08-07 15:27:12 +01005636 .insns = {
5637 BPF_MOV64_IMM(BPF_REG_1, 0),
5638 BPF_MOV64_IMM(BPF_REG_2, 0),
5639 BPF_MOV64_IMM(BPF_REG_3, 0),
5640 BPF_MOV64_IMM(BPF_REG_4, 0),
5641 BPF_MOV64_IMM(BPF_REG_5, 0),
5642 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5643 BPF_EXIT_INSN(),
5644 },
5645 .result = ACCEPT,
5646 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5647 },
5648 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005649 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005650 .insns = {
5651 BPF_MOV64_IMM(BPF_REG_1, 0),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08005652 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5654 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005655 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5656 BPF_MOV64_IMM(BPF_REG_3, 0),
5657 BPF_MOV64_IMM(BPF_REG_4, 0),
5658 BPF_MOV64_IMM(BPF_REG_5, 0),
5659 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5660 BPF_EXIT_INSN(),
5661 },
Edward Creef65b1842017-08-07 15:27:12 +01005662 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005663 .result = REJECT,
5664 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5665 },
5666 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005667 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005668 .insns = {
5669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5671 BPF_MOV64_IMM(BPF_REG_2, 0),
5672 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5673 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5674 BPF_MOV64_IMM(BPF_REG_3, 0),
5675 BPF_MOV64_IMM(BPF_REG_4, 0),
5676 BPF_MOV64_IMM(BPF_REG_5, 0),
5677 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5678 BPF_EXIT_INSN(),
5679 },
Yonghong Songb6ff6392017-11-12 14:49:11 -08005680 .result = ACCEPT,
5681 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5682 },
5683 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005684 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005685 .insns = {
5686 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5687 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5689 BPF_LD_MAP_FD(BPF_REG_1, 0),
5690 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5691 BPF_FUNC_map_lookup_elem),
5692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5694 BPF_MOV64_IMM(BPF_REG_2, 0),
5695 BPF_MOV64_IMM(BPF_REG_3, 0),
5696 BPF_MOV64_IMM(BPF_REG_4, 0),
5697 BPF_MOV64_IMM(BPF_REG_5, 0),
5698 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5699 BPF_EXIT_INSN(),
5700 },
5701 .fixup_map1 = { 3 },
5702 .result = ACCEPT,
5703 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5704 },
5705 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005706 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005707 .insns = {
5708 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5711 BPF_LD_MAP_FD(BPF_REG_1, 0),
5712 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5713 BPF_FUNC_map_lookup_elem),
5714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5715 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5716 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
5717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5719 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5720 BPF_MOV64_IMM(BPF_REG_3, 0),
5721 BPF_MOV64_IMM(BPF_REG_4, 0),
5722 BPF_MOV64_IMM(BPF_REG_5, 0),
5723 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5724 BPF_EXIT_INSN(),
5725 },
5726 .fixup_map1 = { 3 },
5727 .result = ACCEPT,
5728 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5729 },
5730 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005731 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005732 .insns = {
5733 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5736 BPF_LD_MAP_FD(BPF_REG_1, 0),
5737 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5738 BPF_FUNC_map_lookup_elem),
5739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5741 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5742 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5743 BPF_MOV64_IMM(BPF_REG_3, 0),
5744 BPF_MOV64_IMM(BPF_REG_4, 0),
5745 BPF_MOV64_IMM(BPF_REG_5, 0),
5746 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5747 BPF_EXIT_INSN(),
5748 },
5749 .fixup_map1 = { 3 },
5750 .result = ACCEPT,
5751 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5752 },
5753 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005754 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
Yonghong Songb6ff6392017-11-12 14:49:11 -08005755 .insns = {
5756 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5757 offsetof(struct __sk_buff, data)),
5758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5759 offsetof(struct __sk_buff, data_end)),
5760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
5761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
5763 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5764 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
5765 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5766 BPF_MOV64_IMM(BPF_REG_3, 0),
5767 BPF_MOV64_IMM(BPF_REG_4, 0),
5768 BPF_MOV64_IMM(BPF_REG_5, 0),
5769 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5770 BPF_EXIT_INSN(),
5771 },
5772 .result = ACCEPT,
Gianluca Borello06c1c042017-01-09 10:19:49 -08005773 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5774 },
5775 {
Gianluca Borellodb1ac492017-11-22 18:32:53 +00005776 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5777 .insns = {
5778 BPF_MOV64_IMM(BPF_REG_1, 0),
5779 BPF_MOV64_IMM(BPF_REG_2, 0),
5780 BPF_MOV64_IMM(BPF_REG_3, 0),
5781 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5782 BPF_EXIT_INSN(),
5783 },
5784 .errstr = "R1 type=inv expected=fp",
5785 .result = REJECT,
5786 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5787 },
5788 {
5789 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5790 .insns = {
5791 BPF_MOV64_IMM(BPF_REG_1, 0),
5792 BPF_MOV64_IMM(BPF_REG_2, 1),
5793 BPF_MOV64_IMM(BPF_REG_3, 0),
5794 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5795 BPF_EXIT_INSN(),
5796 },
5797 .errstr = "R1 type=inv expected=fp",
5798 .result = REJECT,
5799 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5800 },
5801 {
5802 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5803 .insns = {
5804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5806 BPF_MOV64_IMM(BPF_REG_2, 0),
5807 BPF_MOV64_IMM(BPF_REG_3, 0),
5808 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5809 BPF_EXIT_INSN(),
5810 },
5811 .result = ACCEPT,
5812 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5813 },
5814 {
5815 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5816 .insns = {
5817 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5818 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5820 BPF_LD_MAP_FD(BPF_REG_1, 0),
5821 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5823 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5824 BPF_MOV64_IMM(BPF_REG_2, 0),
5825 BPF_MOV64_IMM(BPF_REG_3, 0),
5826 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5827 BPF_EXIT_INSN(),
5828 },
5829 .fixup_map1 = { 3 },
5830 .result = ACCEPT,
5831 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5832 },
5833 {
5834 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5835 .insns = {
5836 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5839 BPF_LD_MAP_FD(BPF_REG_1, 0),
5840 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5842 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5843 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5844 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5846 BPF_MOV64_IMM(BPF_REG_3, 0),
5847 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5848 BPF_EXIT_INSN(),
5849 },
5850 .fixup_map1 = { 3 },
5851 .result = ACCEPT,
5852 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5853 },
5854 {
5855 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5856 .insns = {
5857 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5858 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5860 BPF_LD_MAP_FD(BPF_REG_1, 0),
5861 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5862 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5864 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5865 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
5866 BPF_MOV64_IMM(BPF_REG_3, 0),
5867 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5868 BPF_EXIT_INSN(),
5869 },
5870 .fixup_map1 = { 3 },
5871 .result = ACCEPT,
5872 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5873 },
5874 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08005875 "helper access to variable memory: 8 bytes leak",
5876 .insns = {
5877 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5879 BPF_MOV64_IMM(BPF_REG_0, 0),
5880 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5881 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5882 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5883 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5884 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5885 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5886 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08005887 BPF_MOV64_IMM(BPF_REG_2, 1),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005888 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5889 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005890 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5892 BPF_MOV64_IMM(BPF_REG_3, 0),
5893 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5894 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5895 BPF_EXIT_INSN(),
5896 },
5897 .errstr = "invalid indirect read from stack off -64+32 size 64",
5898 .result = REJECT,
5899 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5900 },
5901 {
5902 "helper access to variable memory: 8 bytes no leak (init memory)",
5903 .insns = {
5904 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5905 BPF_MOV64_IMM(BPF_REG_0, 0),
5906 BPF_MOV64_IMM(BPF_REG_0, 0),
5907 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5908 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5909 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5910 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5911 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5912 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5913 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5914 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5915 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5916 BPF_MOV64_IMM(BPF_REG_2, 0),
5917 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5919 BPF_MOV64_IMM(BPF_REG_3, 0),
5920 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5921 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5922 BPF_EXIT_INSN(),
5923 },
5924 .result = ACCEPT,
5925 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5926 },
Josef Bacik29200c12017-02-03 16:25:23 -05005927 {
5928 "invalid and of negative number",
5929 .insns = {
5930 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5933 BPF_LD_MAP_FD(BPF_REG_1, 0),
5934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5935 BPF_FUNC_map_lookup_elem),
5936 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01005937 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05005938 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5939 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5940 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5941 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5942 offsetof(struct test_val, foo)),
5943 BPF_EXIT_INSN(),
5944 },
5945 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005946 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05005947 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005948 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05005949 },
5950 {
5951 "invalid range check",
5952 .insns = {
5953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5956 BPF_LD_MAP_FD(BPF_REG_1, 0),
5957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5958 BPF_FUNC_map_lookup_elem),
5959 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5960 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5961 BPF_MOV64_IMM(BPF_REG_9, 1),
5962 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5963 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5964 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5965 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5966 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5967 BPF_MOV32_IMM(BPF_REG_3, 1),
5968 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5969 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5970 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5971 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5972 BPF_MOV64_REG(BPF_REG_0, 0),
5973 BPF_EXIT_INSN(),
5974 },
5975 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005976 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05005977 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005978 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005979 },
5980 {
5981 "map in map access",
5982 .insns = {
5983 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5984 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5986 BPF_LD_MAP_FD(BPF_REG_1, 0),
5987 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5988 BPF_FUNC_map_lookup_elem),
5989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5990 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5993 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5995 BPF_FUNC_map_lookup_elem),
5996 BPF_MOV64_REG(BPF_REG_0, 0),
5997 BPF_EXIT_INSN(),
5998 },
5999 .fixup_map_in_map = { 3 },
6000 .result = ACCEPT,
6001 },
6002 {
6003 "invalid inner map pointer",
6004 .insns = {
6005 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6006 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6008 BPF_LD_MAP_FD(BPF_REG_1, 0),
6009 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6010 BPF_FUNC_map_lookup_elem),
6011 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6012 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6018 BPF_FUNC_map_lookup_elem),
6019 BPF_MOV64_REG(BPF_REG_0, 0),
6020 BPF_EXIT_INSN(),
6021 },
6022 .fixup_map_in_map = { 3 },
6023 .errstr = "R1 type=inv expected=map_ptr",
Edward Creef65b1842017-08-07 15:27:12 +01006024 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006025 .result = REJECT,
6026 },
6027 {
6028 "forgot null checking on the inner map pointer",
6029 .insns = {
6030 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6031 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6033 BPF_LD_MAP_FD(BPF_REG_1, 0),
6034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6035 BPF_FUNC_map_lookup_elem),
6036 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
6037 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6039 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6040 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6041 BPF_FUNC_map_lookup_elem),
6042 BPF_MOV64_REG(BPF_REG_0, 0),
6043 BPF_EXIT_INSN(),
6044 },
6045 .fixup_map_in_map = { 3 },
6046 .errstr = "R1 type=map_value_or_null expected=map_ptr",
6047 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006048 },
6049 {
6050 "ld_abs: check calling conv, r1",
6051 .insns = {
6052 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6053 BPF_MOV64_IMM(BPF_REG_1, 0),
6054 BPF_LD_ABS(BPF_W, -0x200000),
6055 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6056 BPF_EXIT_INSN(),
6057 },
6058 .errstr = "R1 !read_ok",
6059 .result = REJECT,
6060 },
6061 {
6062 "ld_abs: check calling conv, r2",
6063 .insns = {
6064 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6065 BPF_MOV64_IMM(BPF_REG_2, 0),
6066 BPF_LD_ABS(BPF_W, -0x200000),
6067 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6068 BPF_EXIT_INSN(),
6069 },
6070 .errstr = "R2 !read_ok",
6071 .result = REJECT,
6072 },
6073 {
6074 "ld_abs: check calling conv, r3",
6075 .insns = {
6076 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6077 BPF_MOV64_IMM(BPF_REG_3, 0),
6078 BPF_LD_ABS(BPF_W, -0x200000),
6079 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6080 BPF_EXIT_INSN(),
6081 },
6082 .errstr = "R3 !read_ok",
6083 .result = REJECT,
6084 },
6085 {
6086 "ld_abs: check calling conv, r4",
6087 .insns = {
6088 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6089 BPF_MOV64_IMM(BPF_REG_4, 0),
6090 BPF_LD_ABS(BPF_W, -0x200000),
6091 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6092 BPF_EXIT_INSN(),
6093 },
6094 .errstr = "R4 !read_ok",
6095 .result = REJECT,
6096 },
6097 {
6098 "ld_abs: check calling conv, r5",
6099 .insns = {
6100 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6101 BPF_MOV64_IMM(BPF_REG_5, 0),
6102 BPF_LD_ABS(BPF_W, -0x200000),
6103 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6104 BPF_EXIT_INSN(),
6105 },
6106 .errstr = "R5 !read_ok",
6107 .result = REJECT,
6108 },
6109 {
6110 "ld_abs: check calling conv, r7",
6111 .insns = {
6112 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6113 BPF_MOV64_IMM(BPF_REG_7, 0),
6114 BPF_LD_ABS(BPF_W, -0x200000),
6115 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6116 BPF_EXIT_INSN(),
6117 },
6118 .result = ACCEPT,
6119 },
6120 {
6121 "ld_ind: check calling conv, r1",
6122 .insns = {
6123 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6124 BPF_MOV64_IMM(BPF_REG_1, 1),
6125 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
6126 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
6127 BPF_EXIT_INSN(),
6128 },
6129 .errstr = "R1 !read_ok",
6130 .result = REJECT,
6131 },
6132 {
6133 "ld_ind: check calling conv, r2",
6134 .insns = {
6135 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6136 BPF_MOV64_IMM(BPF_REG_2, 1),
6137 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
6138 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6139 BPF_EXIT_INSN(),
6140 },
6141 .errstr = "R2 !read_ok",
6142 .result = REJECT,
6143 },
6144 {
6145 "ld_ind: check calling conv, r3",
6146 .insns = {
6147 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6148 BPF_MOV64_IMM(BPF_REG_3, 1),
6149 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
6150 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6151 BPF_EXIT_INSN(),
6152 },
6153 .errstr = "R3 !read_ok",
6154 .result = REJECT,
6155 },
6156 {
6157 "ld_ind: check calling conv, r4",
6158 .insns = {
6159 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6160 BPF_MOV64_IMM(BPF_REG_4, 1),
6161 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
6162 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6163 BPF_EXIT_INSN(),
6164 },
6165 .errstr = "R4 !read_ok",
6166 .result = REJECT,
6167 },
6168 {
6169 "ld_ind: check calling conv, r5",
6170 .insns = {
6171 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6172 BPF_MOV64_IMM(BPF_REG_5, 1),
6173 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
6174 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
6175 BPF_EXIT_INSN(),
6176 },
6177 .errstr = "R5 !read_ok",
6178 .result = REJECT,
6179 },
6180 {
6181 "ld_ind: check calling conv, r7",
6182 .insns = {
6183 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6184 BPF_MOV64_IMM(BPF_REG_7, 1),
6185 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
6186 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
6187 BPF_EXIT_INSN(),
6188 },
6189 .result = ACCEPT,
6190 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006191 {
6192 "check bpf_perf_event_data->sample_period byte load permitted",
6193 .insns = {
6194 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006195#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006196 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6197 offsetof(struct bpf_perf_event_data, sample_period)),
6198#else
6199 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6200 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6201#endif
6202 BPF_EXIT_INSN(),
6203 },
6204 .result = ACCEPT,
6205 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6206 },
6207 {
6208 "check bpf_perf_event_data->sample_period half load permitted",
6209 .insns = {
6210 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006211#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006212 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6213 offsetof(struct bpf_perf_event_data, sample_period)),
6214#else
6215 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6216 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6217#endif
6218 BPF_EXIT_INSN(),
6219 },
6220 .result = ACCEPT,
6221 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6222 },
6223 {
6224 "check bpf_perf_event_data->sample_period word load permitted",
6225 .insns = {
6226 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006227#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6229 offsetof(struct bpf_perf_event_data, sample_period)),
6230#else
6231 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6232 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6233#endif
6234 BPF_EXIT_INSN(),
6235 },
6236 .result = ACCEPT,
6237 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6238 },
6239 {
6240 "check bpf_perf_event_data->sample_period dword load permitted",
6241 .insns = {
6242 BPF_MOV64_IMM(BPF_REG_0, 0),
6243 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6244 offsetof(struct bpf_perf_event_data, sample_period)),
6245 BPF_EXIT_INSN(),
6246 },
6247 .result = ACCEPT,
6248 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6249 },
6250 {
6251 "check skb->data half load not permitted",
6252 .insns = {
6253 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006254#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006255 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6256 offsetof(struct __sk_buff, data)),
6257#else
6258 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6259 offsetof(struct __sk_buff, data) + 2),
6260#endif
6261 BPF_EXIT_INSN(),
6262 },
6263 .result = REJECT,
6264 .errstr = "invalid bpf_context access",
6265 },
6266 {
6267 "check skb->tc_classid half load not permitted for lwt prog",
6268 .insns = {
6269 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02006270#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07006271 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6272 offsetof(struct __sk_buff, tc_classid)),
6273#else
6274 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6275 offsetof(struct __sk_buff, tc_classid) + 2),
6276#endif
6277 BPF_EXIT_INSN(),
6278 },
6279 .result = REJECT,
6280 .errstr = "invalid bpf_context access",
6281 .prog_type = BPF_PROG_TYPE_LWT_IN,
6282 },
Edward Creeb712296a2017-07-21 00:00:24 +02006283 {
6284 "bounds checks mixing signed and unsigned, positive bounds",
6285 .insns = {
6286 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6289 BPF_LD_MAP_FD(BPF_REG_1, 0),
6290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6291 BPF_FUNC_map_lookup_elem),
6292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6293 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6294 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6295 BPF_MOV64_IMM(BPF_REG_2, 2),
6296 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6297 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6298 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6299 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6300 BPF_MOV64_IMM(BPF_REG_0, 0),
6301 BPF_EXIT_INSN(),
6302 },
6303 .fixup_map1 = { 3 },
Edward Creeb712296a2017-07-21 00:00:24 +02006304 .errstr = "R0 min value is negative",
6305 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006306 },
6307 {
6308 "bounds checks mixing signed and unsigned",
6309 .insns = {
6310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6313 BPF_LD_MAP_FD(BPF_REG_1, 0),
6314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6315 BPF_FUNC_map_lookup_elem),
6316 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6317 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6318 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6319 BPF_MOV64_IMM(BPF_REG_2, -1),
6320 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6321 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6322 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6323 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6324 BPF_MOV64_IMM(BPF_REG_0, 0),
6325 BPF_EXIT_INSN(),
6326 },
6327 .fixup_map1 = { 3 },
Edward Creeb712296a2017-07-21 00:00:24 +02006328 .errstr = "R0 min value is negative",
6329 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006330 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006331 {
6332 "bounds checks mixing signed and unsigned, variant 2",
6333 .insns = {
6334 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6337 BPF_LD_MAP_FD(BPF_REG_1, 0),
6338 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6339 BPF_FUNC_map_lookup_elem),
6340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6341 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6342 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6343 BPF_MOV64_IMM(BPF_REG_2, -1),
6344 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6345 BPF_MOV64_IMM(BPF_REG_8, 0),
6346 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6347 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6348 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6349 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6350 BPF_MOV64_IMM(BPF_REG_0, 0),
6351 BPF_EXIT_INSN(),
6352 },
6353 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006354 .errstr = "R8 invalid mem access 'inv'",
6355 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006356 },
6357 {
6358 "bounds checks mixing signed and unsigned, variant 3",
6359 .insns = {
6360 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6361 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6362 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6363 BPF_LD_MAP_FD(BPF_REG_1, 0),
6364 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6365 BPF_FUNC_map_lookup_elem),
6366 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6367 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6368 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6369 BPF_MOV64_IMM(BPF_REG_2, -1),
6370 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6371 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6372 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6373 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6374 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6375 BPF_MOV64_IMM(BPF_REG_0, 0),
6376 BPF_EXIT_INSN(),
6377 },
6378 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006379 .errstr = "R8 invalid mem access 'inv'",
6380 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006381 },
6382 {
6383 "bounds checks mixing signed and unsigned, variant 4",
6384 .insns = {
6385 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6388 BPF_LD_MAP_FD(BPF_REG_1, 0),
6389 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6390 BPF_FUNC_map_lookup_elem),
6391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6392 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6393 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6394 BPF_MOV64_IMM(BPF_REG_2, 1),
6395 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6396 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6397 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6398 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6399 BPF_MOV64_IMM(BPF_REG_0, 0),
6400 BPF_EXIT_INSN(),
6401 },
6402 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006403 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006404 },
6405 {
6406 "bounds checks mixing signed and unsigned, variant 5",
6407 .insns = {
6408 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6409 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6411 BPF_LD_MAP_FD(BPF_REG_1, 0),
6412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6413 BPF_FUNC_map_lookup_elem),
6414 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6416 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6417 BPF_MOV64_IMM(BPF_REG_2, -1),
6418 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6419 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6421 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6422 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6423 BPF_MOV64_IMM(BPF_REG_0, 0),
6424 BPF_EXIT_INSN(),
6425 },
6426 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006427 .errstr = "R0 min value is negative",
Daniel Borkmann86412502017-07-21 00:00:25 +02006428 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006429 },
6430 {
6431 "bounds checks mixing signed and unsigned, variant 6",
6432 .insns = {
6433 BPF_MOV64_IMM(BPF_REG_2, 0),
6434 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6435 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6436 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6437 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6438 BPF_MOV64_IMM(BPF_REG_6, -1),
6439 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6440 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6442 BPF_MOV64_IMM(BPF_REG_5, 0),
6443 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6445 BPF_FUNC_skb_load_bytes),
6446 BPF_MOV64_IMM(BPF_REG_0, 0),
6447 BPF_EXIT_INSN(),
6448 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006449 .errstr = "R4 min value is negative, either use unsigned",
6450 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006451 },
6452 {
6453 "bounds checks mixing signed and unsigned, variant 7",
6454 .insns = {
6455 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6458 BPF_LD_MAP_FD(BPF_REG_1, 0),
6459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6460 BPF_FUNC_map_lookup_elem),
6461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6462 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6463 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6464 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6465 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6466 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6467 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6468 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6469 BPF_MOV64_IMM(BPF_REG_0, 0),
6470 BPF_EXIT_INSN(),
6471 },
6472 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006473 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006474 },
6475 {
6476 "bounds checks mixing signed and unsigned, variant 8",
6477 .insns = {
6478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6481 BPF_LD_MAP_FD(BPF_REG_1, 0),
6482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6483 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6486 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6487 BPF_MOV64_IMM(BPF_REG_2, -1),
6488 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6489 BPF_MOV64_IMM(BPF_REG_0, 0),
6490 BPF_EXIT_INSN(),
6491 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6492 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6493 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6494 BPF_MOV64_IMM(BPF_REG_0, 0),
6495 BPF_EXIT_INSN(),
6496 },
6497 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006498 .errstr = "R0 min value is negative",
6499 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006500 },
6501 {
Edward Creef65b1842017-08-07 15:27:12 +01006502 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006503 .insns = {
6504 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6505 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6507 BPF_LD_MAP_FD(BPF_REG_1, 0),
6508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6509 BPF_FUNC_map_lookup_elem),
6510 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6511 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6512 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6513 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6514 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6515 BPF_MOV64_IMM(BPF_REG_0, 0),
6516 BPF_EXIT_INSN(),
6517 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6518 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6519 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6520 BPF_MOV64_IMM(BPF_REG_0, 0),
6521 BPF_EXIT_INSN(),
6522 },
6523 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006524 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006525 },
6526 {
Edward Creef65b1842017-08-07 15:27:12 +01006527 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006528 .insns = {
6529 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6532 BPF_LD_MAP_FD(BPF_REG_1, 0),
6533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6534 BPF_FUNC_map_lookup_elem),
6535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6537 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6538 BPF_MOV64_IMM(BPF_REG_2, 0),
6539 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6540 BPF_MOV64_IMM(BPF_REG_0, 0),
6541 BPF_EXIT_INSN(),
6542 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6543 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6544 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6545 BPF_MOV64_IMM(BPF_REG_0, 0),
6546 BPF_EXIT_INSN(),
6547 },
6548 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006549 .errstr = "R0 min value is negative",
6550 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006551 },
6552 {
Edward Creef65b1842017-08-07 15:27:12 +01006553 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006554 .insns = {
6555 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6558 BPF_LD_MAP_FD(BPF_REG_1, 0),
6559 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6560 BPF_FUNC_map_lookup_elem),
6561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6562 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6563 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6564 BPF_MOV64_IMM(BPF_REG_2, -1),
6565 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6566 /* Dead branch. */
6567 BPF_MOV64_IMM(BPF_REG_0, 0),
6568 BPF_EXIT_INSN(),
6569 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6570 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6571 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6572 BPF_MOV64_IMM(BPF_REG_0, 0),
6573 BPF_EXIT_INSN(),
6574 },
6575 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006576 .errstr = "R0 min value is negative",
6577 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006578 },
6579 {
Edward Creef65b1842017-08-07 15:27:12 +01006580 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02006581 .insns = {
6582 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6585 BPF_LD_MAP_FD(BPF_REG_1, 0),
6586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6587 BPF_FUNC_map_lookup_elem),
6588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6589 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6590 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6591 BPF_MOV64_IMM(BPF_REG_2, -6),
6592 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6593 BPF_MOV64_IMM(BPF_REG_0, 0),
6594 BPF_EXIT_INSN(),
6595 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6596 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6597 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6598 BPF_MOV64_IMM(BPF_REG_0, 0),
6599 BPF_EXIT_INSN(),
6600 },
6601 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006602 .errstr = "R0 min value is negative",
6603 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006604 },
6605 {
Edward Creef65b1842017-08-07 15:27:12 +01006606 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02006607 .insns = {
6608 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6609 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6611 BPF_LD_MAP_FD(BPF_REG_1, 0),
6612 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6613 BPF_FUNC_map_lookup_elem),
6614 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6615 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6616 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6617 BPF_MOV64_IMM(BPF_REG_2, 2),
6618 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6619 BPF_MOV64_IMM(BPF_REG_7, 1),
6620 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6621 BPF_MOV64_IMM(BPF_REG_0, 0),
6622 BPF_EXIT_INSN(),
6623 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6624 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6625 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6626 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6627 BPF_MOV64_IMM(BPF_REG_0, 0),
6628 BPF_EXIT_INSN(),
6629 },
6630 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006631 .errstr = "R0 min value is negative",
6632 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006633 },
6634 {
Edward Creef65b1842017-08-07 15:27:12 +01006635 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02006636 .insns = {
6637 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6638 offsetof(struct __sk_buff, mark)),
6639 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6640 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6641 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6642 BPF_LD_MAP_FD(BPF_REG_1, 0),
6643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6644 BPF_FUNC_map_lookup_elem),
6645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6646 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6647 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6648 BPF_MOV64_IMM(BPF_REG_2, -1),
6649 BPF_MOV64_IMM(BPF_REG_8, 2),
6650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6651 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6652 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6653 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6654 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6655 BPF_MOV64_IMM(BPF_REG_0, 0),
6656 BPF_EXIT_INSN(),
6657 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6658 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6659 },
6660 .fixup_map1 = { 4 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006661 .errstr = "R0 min value is negative",
6662 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006663 },
6664 {
Edward Creef65b1842017-08-07 15:27:12 +01006665 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02006666 .insns = {
6667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6670 BPF_LD_MAP_FD(BPF_REG_1, 0),
6671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6672 BPF_FUNC_map_lookup_elem),
6673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6674 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6675 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6676 BPF_MOV64_IMM(BPF_REG_2, -6),
6677 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6678 BPF_MOV64_IMM(BPF_REG_0, 0),
6679 BPF_EXIT_INSN(),
6680 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6681 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6682 BPF_MOV64_IMM(BPF_REG_0, 0),
6683 BPF_EXIT_INSN(),
6684 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6685 BPF_MOV64_IMM(BPF_REG_0, 0),
6686 BPF_EXIT_INSN(),
6687 },
6688 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006689 .errstr_unpriv = "R0 pointer comparison prohibited",
Daniel Borkmann86412502017-07-21 00:00:25 +02006690 .errstr = "R0 min value is negative",
6691 .result = REJECT,
6692 .result_unpriv = REJECT,
6693 },
Edward Cree545722c2017-07-21 14:36:57 +01006694 {
Edward Creef65b1842017-08-07 15:27:12 +01006695 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01006696 .insns = {
6697 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6700 BPF_LD_MAP_FD(BPF_REG_1, 0),
6701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6702 BPF_FUNC_map_lookup_elem),
6703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6704 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6705 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6706 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6707 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6708 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6709 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6710 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6711 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6712 BPF_EXIT_INSN(),
6713 BPF_MOV64_IMM(BPF_REG_0, 0),
6714 BPF_EXIT_INSN(),
6715 },
6716 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006717 .errstr = "R0 max value is outside of the array range",
6718 .result = REJECT,
6719 },
6720 {
6721 "subtraction bounds (map value) variant 2",
6722 .insns = {
6723 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6724 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6726 BPF_LD_MAP_FD(BPF_REG_1, 0),
6727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6728 BPF_FUNC_map_lookup_elem),
6729 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6730 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6731 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6732 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6733 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6734 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6735 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6736 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6737 BPF_EXIT_INSN(),
6738 BPF_MOV64_IMM(BPF_REG_0, 0),
6739 BPF_EXIT_INSN(),
6740 },
6741 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01006742 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6743 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01006744 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01006745 {
6746 "variable-offset ctx access",
6747 .insns = {
6748 /* Get an unknown value */
6749 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6750 /* Make it small and 4-byte aligned */
6751 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6752 /* add it to skb. We now have either &skb->len or
6753 * &skb->pkt_type, but we don't know which
6754 */
6755 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6756 /* dereference it */
6757 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6758 BPF_EXIT_INSN(),
6759 },
6760 .errstr = "variable ctx access var_off=(0x0; 0x4)",
6761 .result = REJECT,
6762 .prog_type = BPF_PROG_TYPE_LWT_IN,
6763 },
6764 {
6765 "variable-offset stack access",
6766 .insns = {
6767 /* Fill the top 8 bytes of the stack */
6768 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6769 /* Get an unknown value */
6770 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6771 /* Make it small and 4-byte aligned */
6772 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6773 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6774 /* add it to fp. We now have either fp-4 or fp-8, but
6775 * we don't know which
6776 */
6777 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6778 /* dereference it */
6779 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6780 BPF_EXIT_INSN(),
6781 },
6782 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6783 .result = REJECT,
6784 .prog_type = BPF_PROG_TYPE_LWT_IN,
6785 },
Edward Creed893dc22017-08-23 15:09:46 +01006786 {
6787 "liveness pruning and write screening",
6788 .insns = {
6789 /* Get an unknown value */
6790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6791 /* branch conditions teach us nothing about R2 */
6792 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6793 BPF_MOV64_IMM(BPF_REG_0, 0),
6794 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6795 BPF_MOV64_IMM(BPF_REG_0, 0),
6796 BPF_EXIT_INSN(),
6797 },
6798 .errstr = "R0 !read_ok",
6799 .result = REJECT,
6800 .prog_type = BPF_PROG_TYPE_LWT_IN,
6801 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01006802 {
6803 "varlen_map_value_access pruning",
6804 .insns = {
6805 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6806 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6808 BPF_LD_MAP_FD(BPF_REG_1, 0),
6809 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6810 BPF_FUNC_map_lookup_elem),
6811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6812 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6813 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
6814 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
6815 BPF_MOV32_IMM(BPF_REG_1, 0),
6816 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
6817 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6818 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
6819 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6820 offsetof(struct test_val, foo)),
6821 BPF_EXIT_INSN(),
6822 },
6823 .fixup_map2 = { 3 },
6824 .errstr_unpriv = "R0 leaks addr",
6825 .errstr = "R0 unbounded memory access",
6826 .result_unpriv = REJECT,
6827 .result = REJECT,
6828 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6829 },
Edward Creee67b8a62017-09-15 14:37:38 +01006830 {
6831 "invalid 64-bit BPF_END",
6832 .insns = {
6833 BPF_MOV32_IMM(BPF_REG_0, 0),
6834 {
6835 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
6836 .dst_reg = BPF_REG_0,
6837 .src_reg = 0,
6838 .off = 0,
6839 .imm = 32,
6840 },
6841 BPF_EXIT_INSN(),
6842 },
6843 .errstr = "BPF_END uses reserved fields",
6844 .result = REJECT,
6845 },
Daniel Borkmann22c88522017-09-25 02:25:53 +02006846 {
6847 "meta access, test1",
6848 .insns = {
6849 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6850 offsetof(struct xdp_md, data_meta)),
6851 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6852 offsetof(struct xdp_md, data)),
6853 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6854 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6855 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
6856 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6857 BPF_MOV64_IMM(BPF_REG_0, 0),
6858 BPF_EXIT_INSN(),
6859 },
6860 .result = ACCEPT,
6861 .prog_type = BPF_PROG_TYPE_XDP,
6862 },
6863 {
6864 "meta access, test2",
6865 .insns = {
6866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6867 offsetof(struct xdp_md, data_meta)),
6868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6869 offsetof(struct xdp_md, data)),
6870 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6871 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
6872 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
6873 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
6874 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
6875 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6876 BPF_MOV64_IMM(BPF_REG_0, 0),
6877 BPF_EXIT_INSN(),
6878 },
6879 .result = REJECT,
6880 .errstr = "invalid access to packet, off=-8",
6881 .prog_type = BPF_PROG_TYPE_XDP,
6882 },
6883 {
6884 "meta access, test3",
6885 .insns = {
6886 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6887 offsetof(struct xdp_md, data_meta)),
6888 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6889 offsetof(struct xdp_md, data_end)),
6890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
6891 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6892 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
6893 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6894 BPF_MOV64_IMM(BPF_REG_0, 0),
6895 BPF_EXIT_INSN(),
6896 },
6897 .result = REJECT,
6898 .errstr = "invalid access to packet",
6899 .prog_type = BPF_PROG_TYPE_XDP,
6900 },
6901 {
6902 "meta access, test4",
6903 .insns = {
6904 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6905 offsetof(struct xdp_md, data_meta)),
6906 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6907 offsetof(struct xdp_md, data_end)),
6908 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
6909 offsetof(struct xdp_md, data)),
6910 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
6911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6912 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
6913 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6914 BPF_MOV64_IMM(BPF_REG_0, 0),
6915 BPF_EXIT_INSN(),
6916 },
6917 .result = REJECT,
6918 .errstr = "invalid access to packet",
6919 .prog_type = BPF_PROG_TYPE_XDP,
6920 },
6921 {
6922 "meta access, test5",
6923 .insns = {
6924 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6925 offsetof(struct xdp_md, data_meta)),
6926 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
6927 offsetof(struct xdp_md, data)),
6928 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6930 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
6931 BPF_MOV64_IMM(BPF_REG_2, -8),
6932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6933 BPF_FUNC_xdp_adjust_meta),
6934 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
6935 BPF_MOV64_IMM(BPF_REG_0, 0),
6936 BPF_EXIT_INSN(),
6937 },
6938 .result = REJECT,
6939 .errstr = "R3 !read_ok",
6940 .prog_type = BPF_PROG_TYPE_XDP,
6941 },
6942 {
6943 "meta access, test6",
6944 .insns = {
6945 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6946 offsetof(struct xdp_md, data_meta)),
6947 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6948 offsetof(struct xdp_md, data)),
6949 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6951 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
6952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
6953 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
6954 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6955 BPF_MOV64_IMM(BPF_REG_0, 0),
6956 BPF_EXIT_INSN(),
6957 },
6958 .result = REJECT,
6959 .errstr = "invalid access to packet",
6960 .prog_type = BPF_PROG_TYPE_XDP,
6961 },
6962 {
6963 "meta access, test7",
6964 .insns = {
6965 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6966 offsetof(struct xdp_md, data_meta)),
6967 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6968 offsetof(struct xdp_md, data)),
6969 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
6970 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
6971 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
6972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
6973 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
6974 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6975 BPF_MOV64_IMM(BPF_REG_0, 0),
6976 BPF_EXIT_INSN(),
6977 },
6978 .result = ACCEPT,
6979 .prog_type = BPF_PROG_TYPE_XDP,
6980 },
6981 {
6982 "meta access, test8",
6983 .insns = {
6984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6985 offsetof(struct xdp_md, data_meta)),
6986 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6987 offsetof(struct xdp_md, data)),
6988 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
6989 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
6990 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
6991 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
6992 BPF_MOV64_IMM(BPF_REG_0, 0),
6993 BPF_EXIT_INSN(),
6994 },
6995 .result = ACCEPT,
6996 .prog_type = BPF_PROG_TYPE_XDP,
6997 },
6998 {
6999 "meta access, test9",
7000 .insns = {
7001 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7002 offsetof(struct xdp_md, data_meta)),
7003 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7004 offsetof(struct xdp_md, data)),
7005 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7006 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7008 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7009 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7010 BPF_MOV64_IMM(BPF_REG_0, 0),
7011 BPF_EXIT_INSN(),
7012 },
7013 .result = REJECT,
7014 .errstr = "invalid access to packet",
7015 .prog_type = BPF_PROG_TYPE_XDP,
7016 },
7017 {
7018 "meta access, test10",
7019 .insns = {
7020 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7021 offsetof(struct xdp_md, data_meta)),
7022 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7023 offsetof(struct xdp_md, data)),
7024 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7025 offsetof(struct xdp_md, data_end)),
7026 BPF_MOV64_IMM(BPF_REG_5, 42),
7027 BPF_MOV64_IMM(BPF_REG_6, 24),
7028 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7029 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7030 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7031 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7032 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
7033 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7034 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7036 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
7037 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
7038 BPF_MOV64_IMM(BPF_REG_0, 0),
7039 BPF_EXIT_INSN(),
7040 },
7041 .result = REJECT,
7042 .errstr = "invalid access to packet",
7043 .prog_type = BPF_PROG_TYPE_XDP,
7044 },
7045 {
7046 "meta access, test11",
7047 .insns = {
7048 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7049 offsetof(struct xdp_md, data_meta)),
7050 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7051 offsetof(struct xdp_md, data)),
7052 BPF_MOV64_IMM(BPF_REG_5, 42),
7053 BPF_MOV64_IMM(BPF_REG_6, 24),
7054 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7055 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7056 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7057 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7058 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
7059 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7060 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7061 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7062 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
7063 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
7064 BPF_MOV64_IMM(BPF_REG_0, 0),
7065 BPF_EXIT_INSN(),
7066 },
7067 .result = ACCEPT,
7068 .prog_type = BPF_PROG_TYPE_XDP,
7069 },
7070 {
7071 "meta access, test12",
7072 .insns = {
7073 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7074 offsetof(struct xdp_md, data_meta)),
7075 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7076 offsetof(struct xdp_md, data)),
7077 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7078 offsetof(struct xdp_md, data_end)),
7079 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7081 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
7082 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7083 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7085 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
7086 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7087 BPF_MOV64_IMM(BPF_REG_0, 0),
7088 BPF_EXIT_INSN(),
7089 },
7090 .result = ACCEPT,
7091 .prog_type = BPF_PROG_TYPE_XDP,
7092 },
Alexei Starovoitov390ee7e2017-10-02 22:50:23 -07007093 {
Jakub Kicinski28e33f92017-10-16 11:16:55 -07007094 "arithmetic ops make PTR_TO_CTX unusable",
7095 .insns = {
7096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
7097 offsetof(struct __sk_buff, data) -
7098 offsetof(struct __sk_buff, mark)),
7099 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7100 offsetof(struct __sk_buff, mark)),
7101 BPF_EXIT_INSN(),
7102 },
7103 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
7104 .result = REJECT,
7105 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7106 },
Daniel Borkmannb37242c2017-10-21 02:34:23 +02007107 {
7108 "XDP pkt read, pkt_end mangling, bad access 1",
7109 .insns = {
7110 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7111 offsetof(struct xdp_md, data)),
7112 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7113 offsetof(struct xdp_md, data_end)),
7114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
7117 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7118 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7119 BPF_MOV64_IMM(BPF_REG_0, 0),
7120 BPF_EXIT_INSN(),
7121 },
7122 .errstr = "R1 offset is outside of the packet",
7123 .result = REJECT,
7124 .prog_type = BPF_PROG_TYPE_XDP,
7125 },
7126 {
7127 "XDP pkt read, pkt_end mangling, bad access 2",
7128 .insns = {
7129 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7130 offsetof(struct xdp_md, data)),
7131 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7132 offsetof(struct xdp_md, data_end)),
7133 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7135 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
7136 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7137 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7138 BPF_MOV64_IMM(BPF_REG_0, 0),
7139 BPF_EXIT_INSN(),
7140 },
7141 .errstr = "R1 offset is outside of the packet",
7142 .result = REJECT,
7143 .prog_type = BPF_PROG_TYPE_XDP,
7144 },
7145 {
7146 "XDP pkt read, pkt_data' > pkt_end, good access",
7147 .insns = {
7148 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7149 offsetof(struct xdp_md, data)),
7150 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7151 offsetof(struct xdp_md, data_end)),
7152 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7154 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7155 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7156 BPF_MOV64_IMM(BPF_REG_0, 0),
7157 BPF_EXIT_INSN(),
7158 },
7159 .result = ACCEPT,
7160 .prog_type = BPF_PROG_TYPE_XDP,
7161 },
7162 {
7163 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7164 .insns = {
7165 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7166 offsetof(struct xdp_md, data)),
7167 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7168 offsetof(struct xdp_md, data_end)),
7169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7171 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7172 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7173 BPF_MOV64_IMM(BPF_REG_0, 0),
7174 BPF_EXIT_INSN(),
7175 },
7176 .errstr = "R1 offset is outside of the packet",
7177 .result = REJECT,
7178 .prog_type = BPF_PROG_TYPE_XDP,
7179 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7180 },
7181 {
7182 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7183 .insns = {
7184 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7185 offsetof(struct xdp_md, data)),
7186 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7187 offsetof(struct xdp_md, data_end)),
7188 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7190 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7191 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7192 BPF_MOV64_IMM(BPF_REG_0, 0),
7193 BPF_EXIT_INSN(),
7194 },
7195 .errstr = "R1 offset is outside of the packet",
7196 .result = REJECT,
7197 .prog_type = BPF_PROG_TYPE_XDP,
7198 },
7199 {
7200 "XDP pkt read, pkt_end > pkt_data', good access",
7201 .insns = {
7202 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7203 offsetof(struct xdp_md, data)),
7204 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7205 offsetof(struct xdp_md, data_end)),
7206 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7208 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7209 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7211 BPF_MOV64_IMM(BPF_REG_0, 0),
7212 BPF_EXIT_INSN(),
7213 },
7214 .result = ACCEPT,
7215 .prog_type = BPF_PROG_TYPE_XDP,
7216 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7217 },
7218 {
7219 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7220 .insns = {
7221 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7222 offsetof(struct xdp_md, data)),
7223 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7224 offsetof(struct xdp_md, data_end)),
7225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7227 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7228 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7229 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7230 BPF_MOV64_IMM(BPF_REG_0, 0),
7231 BPF_EXIT_INSN(),
7232 },
7233 .errstr = "R1 offset is outside of the packet",
7234 .result = REJECT,
7235 .prog_type = BPF_PROG_TYPE_XDP,
7236 },
7237 {
7238 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7239 .insns = {
7240 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7241 offsetof(struct xdp_md, data)),
7242 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7243 offsetof(struct xdp_md, data_end)),
7244 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7245 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7246 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7247 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7248 BPF_MOV64_IMM(BPF_REG_0, 0),
7249 BPF_EXIT_INSN(),
7250 },
7251 .errstr = "R1 offset is outside of the packet",
7252 .result = REJECT,
7253 .prog_type = BPF_PROG_TYPE_XDP,
7254 },
7255 {
7256 "XDP pkt read, pkt_data' < pkt_end, good access",
7257 .insns = {
7258 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7259 offsetof(struct xdp_md, data)),
7260 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7261 offsetof(struct xdp_md, data_end)),
7262 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7264 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7265 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7266 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7267 BPF_MOV64_IMM(BPF_REG_0, 0),
7268 BPF_EXIT_INSN(),
7269 },
7270 .result = ACCEPT,
7271 .prog_type = BPF_PROG_TYPE_XDP,
7272 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7273 },
7274 {
7275 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7276 .insns = {
7277 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7278 offsetof(struct xdp_md, data)),
7279 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7280 offsetof(struct xdp_md, data_end)),
7281 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7283 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7284 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7286 BPF_MOV64_IMM(BPF_REG_0, 0),
7287 BPF_EXIT_INSN(),
7288 },
7289 .errstr = "R1 offset is outside of the packet",
7290 .result = REJECT,
7291 .prog_type = BPF_PROG_TYPE_XDP,
7292 },
7293 {
7294 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7295 .insns = {
7296 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7297 offsetof(struct xdp_md, data)),
7298 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7299 offsetof(struct xdp_md, data_end)),
7300 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7302 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7303 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7304 BPF_MOV64_IMM(BPF_REG_0, 0),
7305 BPF_EXIT_INSN(),
7306 },
7307 .errstr = "R1 offset is outside of the packet",
7308 .result = REJECT,
7309 .prog_type = BPF_PROG_TYPE_XDP,
7310 },
7311 {
7312 "XDP pkt read, pkt_end < pkt_data', good access",
7313 .insns = {
7314 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7315 offsetof(struct xdp_md, data)),
7316 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7317 offsetof(struct xdp_md, data_end)),
7318 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7320 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7321 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7322 BPF_MOV64_IMM(BPF_REG_0, 0),
7323 BPF_EXIT_INSN(),
7324 },
7325 .result = ACCEPT,
7326 .prog_type = BPF_PROG_TYPE_XDP,
7327 },
7328 {
7329 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7330 .insns = {
7331 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7332 offsetof(struct xdp_md, data)),
7333 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7334 offsetof(struct xdp_md, data_end)),
7335 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7337 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7338 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7339 BPF_MOV64_IMM(BPF_REG_0, 0),
7340 BPF_EXIT_INSN(),
7341 },
7342 .errstr = "R1 offset is outside of the packet",
7343 .result = REJECT,
7344 .prog_type = BPF_PROG_TYPE_XDP,
7345 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7346 },
7347 {
7348 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7349 .insns = {
7350 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7351 offsetof(struct xdp_md, data)),
7352 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7353 offsetof(struct xdp_md, data_end)),
7354 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7356 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7357 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7358 BPF_MOV64_IMM(BPF_REG_0, 0),
7359 BPF_EXIT_INSN(),
7360 },
7361 .errstr = "R1 offset is outside of the packet",
7362 .result = REJECT,
7363 .prog_type = BPF_PROG_TYPE_XDP,
7364 },
7365 {
7366 "XDP pkt read, pkt_data' >= pkt_end, good access",
7367 .insns = {
7368 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7369 offsetof(struct xdp_md, data)),
7370 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7371 offsetof(struct xdp_md, data_end)),
7372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7374 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7375 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7376 BPF_MOV64_IMM(BPF_REG_0, 0),
7377 BPF_EXIT_INSN(),
7378 },
7379 .result = ACCEPT,
7380 .prog_type = BPF_PROG_TYPE_XDP,
7381 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7382 },
7383 {
7384 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7385 .insns = {
7386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7387 offsetof(struct xdp_md, data)),
7388 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7389 offsetof(struct xdp_md, data_end)),
7390 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7392 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7393 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7394 BPF_MOV64_IMM(BPF_REG_0, 0),
7395 BPF_EXIT_INSN(),
7396 },
7397 .errstr = "R1 offset is outside of the packet",
7398 .result = REJECT,
7399 .prog_type = BPF_PROG_TYPE_XDP,
7400 },
7401 {
7402 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7403 .insns = {
7404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7405 offsetof(struct xdp_md, data)),
7406 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7407 offsetof(struct xdp_md, data_end)),
7408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7410 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7411 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7412 BPF_MOV64_IMM(BPF_REG_0, 0),
7413 BPF_EXIT_INSN(),
7414 },
7415 .errstr = "R1 offset is outside of the packet",
7416 .result = REJECT,
7417 .prog_type = BPF_PROG_TYPE_XDP,
7418 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7419 },
7420 {
7421 "XDP pkt read, pkt_end >= pkt_data', good access",
7422 .insns = {
7423 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7424 offsetof(struct xdp_md, data)),
7425 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7426 offsetof(struct xdp_md, data_end)),
7427 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7429 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7430 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7431 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7432 BPF_MOV64_IMM(BPF_REG_0, 0),
7433 BPF_EXIT_INSN(),
7434 },
7435 .result = ACCEPT,
7436 .prog_type = BPF_PROG_TYPE_XDP,
7437 },
7438 {
7439 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7440 .insns = {
7441 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7442 offsetof(struct xdp_md, data)),
7443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7444 offsetof(struct xdp_md, data_end)),
7445 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7447 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7448 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7449 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7450 BPF_MOV64_IMM(BPF_REG_0, 0),
7451 BPF_EXIT_INSN(),
7452 },
7453 .errstr = "R1 offset is outside of the packet",
7454 .result = REJECT,
7455 .prog_type = BPF_PROG_TYPE_XDP,
7456 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7457 },
7458 {
7459 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7460 .insns = {
7461 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7462 offsetof(struct xdp_md, data)),
7463 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7464 offsetof(struct xdp_md, data_end)),
7465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7467 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7468 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7469 BPF_MOV64_IMM(BPF_REG_0, 0),
7470 BPF_EXIT_INSN(),
7471 },
7472 .errstr = "R1 offset is outside of the packet",
7473 .result = REJECT,
7474 .prog_type = BPF_PROG_TYPE_XDP,
7475 },
7476 {
7477 "XDP pkt read, pkt_data' <= pkt_end, good access",
7478 .insns = {
7479 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7480 offsetof(struct xdp_md, data)),
7481 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7482 offsetof(struct xdp_md, data_end)),
7483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7485 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7486 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7488 BPF_MOV64_IMM(BPF_REG_0, 0),
7489 BPF_EXIT_INSN(),
7490 },
7491 .result = ACCEPT,
7492 .prog_type = BPF_PROG_TYPE_XDP,
7493 },
7494 {
7495 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7496 .insns = {
7497 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7498 offsetof(struct xdp_md, data)),
7499 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7500 offsetof(struct xdp_md, data_end)),
7501 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7503 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7504 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7505 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7506 BPF_MOV64_IMM(BPF_REG_0, 0),
7507 BPF_EXIT_INSN(),
7508 },
7509 .errstr = "R1 offset is outside of the packet",
7510 .result = REJECT,
7511 .prog_type = BPF_PROG_TYPE_XDP,
7512 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7513 },
7514 {
7515 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7516 .insns = {
7517 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7518 offsetof(struct xdp_md, data)),
7519 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7520 offsetof(struct xdp_md, data_end)),
7521 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7522 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7523 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7524 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7525 BPF_MOV64_IMM(BPF_REG_0, 0),
7526 BPF_EXIT_INSN(),
7527 },
7528 .errstr = "R1 offset is outside of the packet",
7529 .result = REJECT,
7530 .prog_type = BPF_PROG_TYPE_XDP,
7531 },
7532 {
7533 "XDP pkt read, pkt_end <= pkt_data', good access",
7534 .insns = {
7535 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7536 offsetof(struct xdp_md, data)),
7537 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7538 offsetof(struct xdp_md, data_end)),
7539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7541 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7542 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7543 BPF_MOV64_IMM(BPF_REG_0, 0),
7544 BPF_EXIT_INSN(),
7545 },
7546 .result = ACCEPT,
7547 .prog_type = BPF_PROG_TYPE_XDP,
7548 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7549 },
7550 {
7551 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7552 .insns = {
7553 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7554 offsetof(struct xdp_md, data)),
7555 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7556 offsetof(struct xdp_md, data_end)),
7557 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7559 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7560 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7561 BPF_MOV64_IMM(BPF_REG_0, 0),
7562 BPF_EXIT_INSN(),
7563 },
7564 .errstr = "R1 offset is outside of the packet",
7565 .result = REJECT,
7566 .prog_type = BPF_PROG_TYPE_XDP,
7567 },
7568 {
7569 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7570 .insns = {
7571 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7572 offsetof(struct xdp_md, data)),
7573 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7574 offsetof(struct xdp_md, data_end)),
7575 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7577 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7578 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7579 BPF_MOV64_IMM(BPF_REG_0, 0),
7580 BPF_EXIT_INSN(),
7581 },
7582 .errstr = "R1 offset is outside of the packet",
7583 .result = REJECT,
7584 .prog_type = BPF_PROG_TYPE_XDP,
7585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7586 },
Daniel Borkmannb06723d2017-11-01 23:58:09 +01007587 {
Daniel Borkmann634eab12017-11-01 23:58:11 +01007588 "XDP pkt read, pkt_meta' > pkt_data, good access",
7589 .insns = {
7590 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7591 offsetof(struct xdp_md, data_meta)),
7592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7593 offsetof(struct xdp_md, data)),
7594 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7596 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7597 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7598 BPF_MOV64_IMM(BPF_REG_0, 0),
7599 BPF_EXIT_INSN(),
7600 },
7601 .result = ACCEPT,
7602 .prog_type = BPF_PROG_TYPE_XDP,
7603 },
7604 {
7605 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
7606 .insns = {
7607 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7608 offsetof(struct xdp_md, data_meta)),
7609 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7610 offsetof(struct xdp_md, data)),
7611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7613 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7614 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7615 BPF_MOV64_IMM(BPF_REG_0, 0),
7616 BPF_EXIT_INSN(),
7617 },
7618 .errstr = "R1 offset is outside of the packet",
7619 .result = REJECT,
7620 .prog_type = BPF_PROG_TYPE_XDP,
7621 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7622 },
7623 {
7624 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
7625 .insns = {
7626 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7627 offsetof(struct xdp_md, data_meta)),
7628 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7629 offsetof(struct xdp_md, data)),
7630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7632 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7633 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7634 BPF_MOV64_IMM(BPF_REG_0, 0),
7635 BPF_EXIT_INSN(),
7636 },
7637 .errstr = "R1 offset is outside of the packet",
7638 .result = REJECT,
7639 .prog_type = BPF_PROG_TYPE_XDP,
7640 },
7641 {
7642 "XDP pkt read, pkt_data > pkt_meta', good access",
7643 .insns = {
7644 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7645 offsetof(struct xdp_md, data_meta)),
7646 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7647 offsetof(struct xdp_md, data)),
7648 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7649 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7650 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7651 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7652 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7653 BPF_MOV64_IMM(BPF_REG_0, 0),
7654 BPF_EXIT_INSN(),
7655 },
7656 .result = ACCEPT,
7657 .prog_type = BPF_PROG_TYPE_XDP,
7658 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7659 },
7660 {
7661 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
7662 .insns = {
7663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7664 offsetof(struct xdp_md, data_meta)),
7665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7666 offsetof(struct xdp_md, data)),
7667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7669 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7670 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7671 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7672 BPF_MOV64_IMM(BPF_REG_0, 0),
7673 BPF_EXIT_INSN(),
7674 },
7675 .errstr = "R1 offset is outside of the packet",
7676 .result = REJECT,
7677 .prog_type = BPF_PROG_TYPE_XDP,
7678 },
7679 {
7680 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
7681 .insns = {
7682 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7683 offsetof(struct xdp_md, data_meta)),
7684 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7685 offsetof(struct xdp_md, data)),
7686 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7688 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7689 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7690 BPF_MOV64_IMM(BPF_REG_0, 0),
7691 BPF_EXIT_INSN(),
7692 },
7693 .errstr = "R1 offset is outside of the packet",
7694 .result = REJECT,
7695 .prog_type = BPF_PROG_TYPE_XDP,
7696 },
7697 {
7698 "XDP pkt read, pkt_meta' < pkt_data, good access",
7699 .insns = {
7700 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7701 offsetof(struct xdp_md, data_meta)),
7702 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7703 offsetof(struct xdp_md, data)),
7704 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7706 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7707 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7708 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7709 BPF_MOV64_IMM(BPF_REG_0, 0),
7710 BPF_EXIT_INSN(),
7711 },
7712 .result = ACCEPT,
7713 .prog_type = BPF_PROG_TYPE_XDP,
7714 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7715 },
7716 {
7717 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
7718 .insns = {
7719 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7720 offsetof(struct xdp_md, data_meta)),
7721 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7722 offsetof(struct xdp_md, data)),
7723 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7725 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7726 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7727 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7728 BPF_MOV64_IMM(BPF_REG_0, 0),
7729 BPF_EXIT_INSN(),
7730 },
7731 .errstr = "R1 offset is outside of the packet",
7732 .result = REJECT,
7733 .prog_type = BPF_PROG_TYPE_XDP,
7734 },
7735 {
7736 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
7737 .insns = {
7738 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7739 offsetof(struct xdp_md, data_meta)),
7740 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7741 offsetof(struct xdp_md, data)),
7742 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7744 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7745 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7746 BPF_MOV64_IMM(BPF_REG_0, 0),
7747 BPF_EXIT_INSN(),
7748 },
7749 .errstr = "R1 offset is outside of the packet",
7750 .result = REJECT,
7751 .prog_type = BPF_PROG_TYPE_XDP,
7752 },
7753 {
7754 "XDP pkt read, pkt_data < pkt_meta', good access",
7755 .insns = {
7756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7757 offsetof(struct xdp_md, data_meta)),
7758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7759 offsetof(struct xdp_md, data)),
7760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7762 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7763 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7764 BPF_MOV64_IMM(BPF_REG_0, 0),
7765 BPF_EXIT_INSN(),
7766 },
7767 .result = ACCEPT,
7768 .prog_type = BPF_PROG_TYPE_XDP,
7769 },
7770 {
7771 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
7772 .insns = {
7773 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7774 offsetof(struct xdp_md, data_meta)),
7775 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7776 offsetof(struct xdp_md, data)),
7777 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7779 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7780 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7781 BPF_MOV64_IMM(BPF_REG_0, 0),
7782 BPF_EXIT_INSN(),
7783 },
7784 .errstr = "R1 offset is outside of the packet",
7785 .result = REJECT,
7786 .prog_type = BPF_PROG_TYPE_XDP,
7787 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7788 },
7789 {
7790 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
7791 .insns = {
7792 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7793 offsetof(struct xdp_md, data_meta)),
7794 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7795 offsetof(struct xdp_md, data)),
7796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7798 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7799 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7800 BPF_MOV64_IMM(BPF_REG_0, 0),
7801 BPF_EXIT_INSN(),
7802 },
7803 .errstr = "R1 offset is outside of the packet",
7804 .result = REJECT,
7805 .prog_type = BPF_PROG_TYPE_XDP,
7806 },
7807 {
7808 "XDP pkt read, pkt_meta' >= pkt_data, good access",
7809 .insns = {
7810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7811 offsetof(struct xdp_md, data_meta)),
7812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7813 offsetof(struct xdp_md, data)),
7814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7816 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7817 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7818 BPF_MOV64_IMM(BPF_REG_0, 0),
7819 BPF_EXIT_INSN(),
7820 },
7821 .result = ACCEPT,
7822 .prog_type = BPF_PROG_TYPE_XDP,
7823 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7824 },
7825 {
7826 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
7827 .insns = {
7828 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7829 offsetof(struct xdp_md, data_meta)),
7830 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7831 offsetof(struct xdp_md, data)),
7832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7834 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7835 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7836 BPF_MOV64_IMM(BPF_REG_0, 0),
7837 BPF_EXIT_INSN(),
7838 },
7839 .errstr = "R1 offset is outside of the packet",
7840 .result = REJECT,
7841 .prog_type = BPF_PROG_TYPE_XDP,
7842 },
7843 {
7844 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
7845 .insns = {
7846 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7847 offsetof(struct xdp_md, data_meta)),
7848 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7849 offsetof(struct xdp_md, data)),
7850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7852 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7853 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7854 BPF_MOV64_IMM(BPF_REG_0, 0),
7855 BPF_EXIT_INSN(),
7856 },
7857 .errstr = "R1 offset is outside of the packet",
7858 .result = REJECT,
7859 .prog_type = BPF_PROG_TYPE_XDP,
7860 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7861 },
7862 {
7863 "XDP pkt read, pkt_data >= pkt_meta', good access",
7864 .insns = {
7865 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7866 offsetof(struct xdp_md, data_meta)),
7867 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7868 offsetof(struct xdp_md, data)),
7869 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7871 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7872 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7873 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7874 BPF_MOV64_IMM(BPF_REG_0, 0),
7875 BPF_EXIT_INSN(),
7876 },
7877 .result = ACCEPT,
7878 .prog_type = BPF_PROG_TYPE_XDP,
7879 },
7880 {
7881 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
7882 .insns = {
7883 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7884 offsetof(struct xdp_md, data_meta)),
7885 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7886 offsetof(struct xdp_md, data)),
7887 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7889 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7890 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7891 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7892 BPF_MOV64_IMM(BPF_REG_0, 0),
7893 BPF_EXIT_INSN(),
7894 },
7895 .errstr = "R1 offset is outside of the packet",
7896 .result = REJECT,
7897 .prog_type = BPF_PROG_TYPE_XDP,
7898 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7899 },
7900 {
7901 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
7902 .insns = {
7903 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7904 offsetof(struct xdp_md, data_meta)),
7905 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7906 offsetof(struct xdp_md, data)),
7907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7908 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7909 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7910 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7911 BPF_MOV64_IMM(BPF_REG_0, 0),
7912 BPF_EXIT_INSN(),
7913 },
7914 .errstr = "R1 offset is outside of the packet",
7915 .result = REJECT,
7916 .prog_type = BPF_PROG_TYPE_XDP,
7917 },
7918 {
7919 "XDP pkt read, pkt_meta' <= pkt_data, good access",
7920 .insns = {
7921 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7922 offsetof(struct xdp_md, data_meta)),
7923 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7924 offsetof(struct xdp_md, data)),
7925 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7927 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7928 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7929 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7930 BPF_MOV64_IMM(BPF_REG_0, 0),
7931 BPF_EXIT_INSN(),
7932 },
7933 .result = ACCEPT,
7934 .prog_type = BPF_PROG_TYPE_XDP,
7935 },
7936 {
7937 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
7938 .insns = {
7939 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7940 offsetof(struct xdp_md, data_meta)),
7941 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7942 offsetof(struct xdp_md, data)),
7943 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7944 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7945 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7946 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7948 BPF_MOV64_IMM(BPF_REG_0, 0),
7949 BPF_EXIT_INSN(),
7950 },
7951 .errstr = "R1 offset is outside of the packet",
7952 .result = REJECT,
7953 .prog_type = BPF_PROG_TYPE_XDP,
7954 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7955 },
7956 {
7957 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
7958 .insns = {
7959 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7960 offsetof(struct xdp_md, data_meta)),
7961 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7962 offsetof(struct xdp_md, data)),
7963 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7965 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7966 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7967 BPF_MOV64_IMM(BPF_REG_0, 0),
7968 BPF_EXIT_INSN(),
7969 },
7970 .errstr = "R1 offset is outside of the packet",
7971 .result = REJECT,
7972 .prog_type = BPF_PROG_TYPE_XDP,
7973 },
7974 {
7975 "XDP pkt read, pkt_data <= pkt_meta', good access",
7976 .insns = {
7977 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7978 offsetof(struct xdp_md, data_meta)),
7979 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7980 offsetof(struct xdp_md, data)),
7981 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7983 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7984 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7985 BPF_MOV64_IMM(BPF_REG_0, 0),
7986 BPF_EXIT_INSN(),
7987 },
7988 .result = ACCEPT,
7989 .prog_type = BPF_PROG_TYPE_XDP,
7990 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7991 },
7992 {
7993 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
7994 .insns = {
7995 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7996 offsetof(struct xdp_md, data_meta)),
7997 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7998 offsetof(struct xdp_md, data)),
7999 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8000 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8001 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8002 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8003 BPF_MOV64_IMM(BPF_REG_0, 0),
8004 BPF_EXIT_INSN(),
8005 },
8006 .errstr = "R1 offset is outside of the packet",
8007 .result = REJECT,
8008 .prog_type = BPF_PROG_TYPE_XDP,
8009 },
8010 {
8011 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8012 .insns = {
8013 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8014 offsetof(struct xdp_md, data_meta)),
8015 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8016 offsetof(struct xdp_md, data)),
8017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8019 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8020 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8021 BPF_MOV64_IMM(BPF_REG_0, 0),
8022 BPF_EXIT_INSN(),
8023 },
8024 .errstr = "R1 offset is outside of the packet",
8025 .result = REJECT,
8026 .prog_type = BPF_PROG_TYPE_XDP,
8027 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8028 },
8029 {
Daniel Borkmannb06723d2017-11-01 23:58:09 +01008030 "bpf_exit with invalid return code. test1",
8031 .insns = {
8032 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8033 BPF_EXIT_INSN(),
8034 },
8035 .errstr = "R0 has value (0x0; 0xffffffff)",
8036 .result = REJECT,
8037 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8038 },
8039 {
8040 "bpf_exit with invalid return code. test2",
8041 .insns = {
8042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8043 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
8044 BPF_EXIT_INSN(),
8045 },
8046 .result = ACCEPT,
8047 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8048 },
8049 {
8050 "bpf_exit with invalid return code. test3",
8051 .insns = {
8052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8053 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
8054 BPF_EXIT_INSN(),
8055 },
8056 .errstr = "R0 has value (0x0; 0x3)",
8057 .result = REJECT,
8058 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8059 },
8060 {
8061 "bpf_exit with invalid return code. test4",
8062 .insns = {
8063 BPF_MOV64_IMM(BPF_REG_0, 1),
8064 BPF_EXIT_INSN(),
8065 },
8066 .result = ACCEPT,
8067 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8068 },
8069 {
8070 "bpf_exit with invalid return code. test5",
8071 .insns = {
8072 BPF_MOV64_IMM(BPF_REG_0, 2),
8073 BPF_EXIT_INSN(),
8074 },
8075 .errstr = "R0 has value (0x2; 0x0)",
8076 .result = REJECT,
8077 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8078 },
8079 {
8080 "bpf_exit with invalid return code. test6",
8081 .insns = {
8082 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8083 BPF_EXIT_INSN(),
8084 },
8085 .errstr = "R0 is not a known value (ctx)",
8086 .result = REJECT,
8087 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8088 },
8089 {
8090 "bpf_exit with invalid return code. test7",
8091 .insns = {
8092 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8093 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
8094 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
8095 BPF_EXIT_INSN(),
8096 },
8097 .errstr = "R0 has unknown scalar value",
8098 .result = REJECT,
8099 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8100 },
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008101 {
8102 "calls: basic sanity",
8103 .insns = {
8104 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8105 BPF_MOV64_IMM(BPF_REG_0, 1),
8106 BPF_EXIT_INSN(),
8107 BPF_MOV64_IMM(BPF_REG_0, 2),
8108 BPF_EXIT_INSN(),
8109 },
8110 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8111 .result = ACCEPT,
8112 },
8113 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008114 "calls: not on unpriviledged",
8115 .insns = {
8116 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8117 BPF_MOV64_IMM(BPF_REG_0, 1),
8118 BPF_EXIT_INSN(),
8119 BPF_MOV64_IMM(BPF_REG_0, 2),
8120 BPF_EXIT_INSN(),
8121 },
8122 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
8123 .result_unpriv = REJECT,
8124 .result = ACCEPT,
8125 },
8126 {
8127 "calls: overlapping caller/callee",
8128 .insns = {
8129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
8130 BPF_MOV64_IMM(BPF_REG_0, 1),
8131 BPF_EXIT_INSN(),
8132 },
8133 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8134 .errstr = "last insn is not an exit or jmp",
8135 .result = REJECT,
8136 },
8137 {
8138 "calls: wrong recursive calls",
8139 .insns = {
8140 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8141 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8143 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8144 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
8145 BPF_MOV64_IMM(BPF_REG_0, 1),
8146 BPF_EXIT_INSN(),
8147 },
8148 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8149 .errstr = "jump out of range",
8150 .result = REJECT,
8151 },
8152 {
8153 "calls: wrong src reg",
8154 .insns = {
8155 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
8156 BPF_MOV64_IMM(BPF_REG_0, 1),
8157 BPF_EXIT_INSN(),
8158 },
8159 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8160 .errstr = "BPF_CALL uses reserved fields",
8161 .result = REJECT,
8162 },
8163 {
8164 "calls: wrong off value",
8165 .insns = {
8166 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
8167 BPF_MOV64_IMM(BPF_REG_0, 1),
8168 BPF_EXIT_INSN(),
8169 BPF_MOV64_IMM(BPF_REG_0, 2),
8170 BPF_EXIT_INSN(),
8171 },
8172 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8173 .errstr = "BPF_CALL uses reserved fields",
8174 .result = REJECT,
8175 },
8176 {
8177 "calls: jump back loop",
8178 .insns = {
8179 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
8180 BPF_MOV64_IMM(BPF_REG_0, 1),
8181 BPF_EXIT_INSN(),
8182 },
8183 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8184 .errstr = "back-edge from insn 0 to 0",
8185 .result = REJECT,
8186 },
8187 {
8188 "calls: conditional call",
8189 .insns = {
8190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8191 offsetof(struct __sk_buff, mark)),
8192 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8193 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8194 BPF_MOV64_IMM(BPF_REG_0, 1),
8195 BPF_EXIT_INSN(),
8196 BPF_MOV64_IMM(BPF_REG_0, 2),
8197 BPF_EXIT_INSN(),
8198 },
8199 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8200 .errstr = "jump out of range",
8201 .result = REJECT,
8202 },
8203 {
8204 "calls: conditional call 2",
8205 .insns = {
8206 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8207 offsetof(struct __sk_buff, mark)),
8208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8209 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8210 BPF_MOV64_IMM(BPF_REG_0, 1),
8211 BPF_EXIT_INSN(),
8212 BPF_MOV64_IMM(BPF_REG_0, 2),
8213 BPF_EXIT_INSN(),
8214 BPF_MOV64_IMM(BPF_REG_0, 3),
8215 BPF_EXIT_INSN(),
8216 },
8217 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8218 .result = ACCEPT,
8219 },
8220 {
8221 "calls: conditional call 3",
8222 .insns = {
8223 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8224 offsetof(struct __sk_buff, mark)),
8225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8226 BPF_JMP_IMM(BPF_JA, 0, 0, 4),
8227 BPF_MOV64_IMM(BPF_REG_0, 1),
8228 BPF_EXIT_INSN(),
8229 BPF_MOV64_IMM(BPF_REG_0, 1),
8230 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8231 BPF_MOV64_IMM(BPF_REG_0, 3),
8232 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8233 },
8234 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8235 .errstr = "back-edge from insn",
8236 .result = REJECT,
8237 },
8238 {
8239 "calls: conditional call 4",
8240 .insns = {
8241 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8242 offsetof(struct __sk_buff, mark)),
8243 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8245 BPF_MOV64_IMM(BPF_REG_0, 1),
8246 BPF_EXIT_INSN(),
8247 BPF_MOV64_IMM(BPF_REG_0, 1),
8248 BPF_JMP_IMM(BPF_JA, 0, 0, -5),
8249 BPF_MOV64_IMM(BPF_REG_0, 3),
8250 BPF_EXIT_INSN(),
8251 },
8252 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8253 .result = ACCEPT,
8254 },
8255 {
8256 "calls: conditional call 5",
8257 .insns = {
8258 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8259 offsetof(struct __sk_buff, mark)),
8260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
8261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8262 BPF_MOV64_IMM(BPF_REG_0, 1),
8263 BPF_EXIT_INSN(),
8264 BPF_MOV64_IMM(BPF_REG_0, 1),
8265 BPF_JMP_IMM(BPF_JA, 0, 0, -6),
8266 BPF_MOV64_IMM(BPF_REG_0, 3),
8267 BPF_EXIT_INSN(),
8268 },
8269 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8270 .errstr = "back-edge from insn",
8271 .result = REJECT,
8272 },
8273 {
8274 "calls: conditional call 6",
8275 .insns = {
8276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
8278 BPF_EXIT_INSN(),
8279 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8280 offsetof(struct __sk_buff, mark)),
8281 BPF_EXIT_INSN(),
8282 },
8283 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8284 .errstr = "back-edge from insn",
8285 .result = REJECT,
8286 },
8287 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008288 "calls: using r0 returned by callee",
8289 .insns = {
8290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8291 BPF_EXIT_INSN(),
8292 BPF_MOV64_IMM(BPF_REG_0, 2),
8293 BPF_EXIT_INSN(),
8294 },
8295 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8296 .result = ACCEPT,
8297 },
8298 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008299 "calls: using uninit r0 from callee",
8300 .insns = {
8301 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8302 BPF_EXIT_INSN(),
8303 BPF_EXIT_INSN(),
8304 },
8305 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8306 .errstr = "!read_ok",
8307 .result = REJECT,
8308 },
8309 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008310 "calls: callee is using r1",
8311 .insns = {
8312 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8313 BPF_EXIT_INSN(),
8314 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8315 offsetof(struct __sk_buff, len)),
8316 BPF_EXIT_INSN(),
8317 },
8318 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
8319 .result = ACCEPT,
8320 },
8321 {
8322 "calls: callee using args1",
8323 .insns = {
8324 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8325 BPF_EXIT_INSN(),
8326 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8327 BPF_EXIT_INSN(),
8328 },
8329 .errstr_unpriv = "allowed for root only",
8330 .result_unpriv = REJECT,
8331 .result = ACCEPT,
8332 },
8333 {
8334 "calls: callee using wrong args2",
8335 .insns = {
8336 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8337 BPF_EXIT_INSN(),
8338 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8339 BPF_EXIT_INSN(),
8340 },
8341 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8342 .errstr = "R2 !read_ok",
8343 .result = REJECT,
8344 },
8345 {
8346 "calls: callee using two args",
8347 .insns = {
8348 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8349 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
8350 offsetof(struct __sk_buff, len)),
8351 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
8352 offsetof(struct __sk_buff, len)),
8353 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8354 BPF_EXIT_INSN(),
8355 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8356 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8357 BPF_EXIT_INSN(),
8358 },
8359 .errstr_unpriv = "allowed for root only",
8360 .result_unpriv = REJECT,
8361 .result = ACCEPT,
8362 },
8363 {
8364 "calls: callee changing pkt pointers",
8365 .insns = {
8366 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8367 offsetof(struct xdp_md, data)),
8368 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
8369 offsetof(struct xdp_md, data_end)),
8370 BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
8371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
8372 BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
8373 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8374 /* clear_all_pkt_pointers() has to walk all frames
8375 * to make sure that pkt pointers in the caller
8376 * are cleared when callee is calling a helper that
8377 * adjusts packet size
8378 */
8379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
8380 BPF_MOV32_IMM(BPF_REG_0, 0),
8381 BPF_EXIT_INSN(),
8382 BPF_MOV64_IMM(BPF_REG_2, 0),
8383 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8384 BPF_FUNC_xdp_adjust_head),
8385 BPF_EXIT_INSN(),
8386 },
8387 .result = REJECT,
8388 .errstr = "R6 invalid mem access 'inv'",
8389 .prog_type = BPF_PROG_TYPE_XDP,
8390 },
8391 {
8392 "calls: two calls with args",
8393 .insns = {
8394 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8395 BPF_EXIT_INSN(),
8396 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
8398 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
8399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8401 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
8402 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8403 BPF_EXIT_INSN(),
8404 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8405 offsetof(struct __sk_buff, len)),
8406 BPF_EXIT_INSN(),
8407 },
8408 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8409 .result = ACCEPT,
8410 },
8411 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008412 "calls: calls with stack arith",
8413 .insns = {
8414 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8415 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
8416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8417 BPF_EXIT_INSN(),
8418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
8419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8420 BPF_EXIT_INSN(),
8421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
8422 BPF_MOV64_IMM(BPF_REG_0, 42),
8423 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8424 BPF_EXIT_INSN(),
8425 },
8426 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8427 .result = ACCEPT,
8428 },
8429 {
8430 "calls: calls with misaligned stack access",
8431 .insns = {
8432 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8433 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
8434 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8435 BPF_EXIT_INSN(),
8436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
8437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8438 BPF_EXIT_INSN(),
8439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
8440 BPF_MOV64_IMM(BPF_REG_0, 42),
8441 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
8442 BPF_EXIT_INSN(),
8443 },
8444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8445 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
8446 .errstr = "misaligned stack access",
8447 .result = REJECT,
8448 },
8449 {
8450 "calls: calls control flow, jump test",
8451 .insns = {
8452 BPF_MOV64_IMM(BPF_REG_0, 42),
8453 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
8454 BPF_MOV64_IMM(BPF_REG_0, 43),
8455 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8456 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
8457 BPF_EXIT_INSN(),
8458 },
8459 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8460 .result = ACCEPT,
8461 },
8462 {
8463 "calls: calls control flow, jump test 2",
8464 .insns = {
8465 BPF_MOV64_IMM(BPF_REG_0, 42),
8466 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
8467 BPF_MOV64_IMM(BPF_REG_0, 43),
8468 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8469 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
8470 BPF_EXIT_INSN(),
8471 },
8472 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8473 .errstr = "jump out of range from insn 1 to 4",
8474 .result = REJECT,
8475 },
8476 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008477 "calls: two calls with bad jump",
8478 .insns = {
8479 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8480 BPF_EXIT_INSN(),
8481 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
8483 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
8484 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8485 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8486 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
8487 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8488 BPF_EXIT_INSN(),
8489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8490 offsetof(struct __sk_buff, len)),
8491 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
8492 BPF_EXIT_INSN(),
8493 },
8494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8495 .errstr = "jump out of range from insn 11 to 9",
8496 .result = REJECT,
8497 },
8498 {
8499 "calls: recursive call. test1",
8500 .insns = {
8501 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8502 BPF_EXIT_INSN(),
8503 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
8504 BPF_EXIT_INSN(),
8505 },
8506 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8507 .errstr = "back-edge",
8508 .result = REJECT,
8509 },
8510 {
8511 "calls: recursive call. test2",
8512 .insns = {
8513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8514 BPF_EXIT_INSN(),
8515 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
8516 BPF_EXIT_INSN(),
8517 },
8518 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8519 .errstr = "back-edge",
8520 .result = REJECT,
8521 },
8522 {
8523 "calls: unreachable code",
8524 .insns = {
8525 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8526 BPF_EXIT_INSN(),
8527 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8528 BPF_EXIT_INSN(),
8529 BPF_MOV64_IMM(BPF_REG_0, 0),
8530 BPF_EXIT_INSN(),
8531 BPF_MOV64_IMM(BPF_REG_0, 0),
8532 BPF_EXIT_INSN(),
8533 },
8534 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8535 .errstr = "unreachable insn 6",
8536 .result = REJECT,
8537 },
8538 {
8539 "calls: invalid call",
8540 .insns = {
8541 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8542 BPF_EXIT_INSN(),
8543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
8544 BPF_EXIT_INSN(),
8545 },
8546 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8547 .errstr = "invalid destination",
8548 .result = REJECT,
8549 },
8550 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008551 "calls: invalid call 2",
8552 .insns = {
8553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8554 BPF_EXIT_INSN(),
8555 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
8556 BPF_EXIT_INSN(),
8557 },
8558 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8559 .errstr = "invalid destination",
8560 .result = REJECT,
8561 },
8562 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008563 "calls: jumping across function bodies. test1",
8564 .insns = {
8565 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8566 BPF_MOV64_IMM(BPF_REG_0, 0),
8567 BPF_EXIT_INSN(),
8568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
8569 BPF_EXIT_INSN(),
8570 },
8571 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8572 .errstr = "jump out of range",
8573 .result = REJECT,
8574 },
8575 {
8576 "calls: jumping across function bodies. test2",
8577 .insns = {
8578 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
8579 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8580 BPF_MOV64_IMM(BPF_REG_0, 0),
8581 BPF_EXIT_INSN(),
8582 BPF_EXIT_INSN(),
8583 },
8584 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8585 .errstr = "jump out of range",
8586 .result = REJECT,
8587 },
8588 {
8589 "calls: call without exit",
8590 .insns = {
8591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8592 BPF_EXIT_INSN(),
8593 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8594 BPF_EXIT_INSN(),
8595 BPF_MOV64_IMM(BPF_REG_0, 0),
8596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
8597 },
8598 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8599 .errstr = "not an exit",
8600 .result = REJECT,
8601 },
8602 {
8603 "calls: call into middle of ld_imm64",
8604 .insns = {
8605 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8606 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8607 BPF_MOV64_IMM(BPF_REG_0, 0),
8608 BPF_EXIT_INSN(),
8609 BPF_LD_IMM64(BPF_REG_0, 0),
8610 BPF_EXIT_INSN(),
8611 },
8612 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8613 .errstr = "last insn",
8614 .result = REJECT,
8615 },
8616 {
8617 "calls: call into middle of other call",
8618 .insns = {
8619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8621 BPF_MOV64_IMM(BPF_REG_0, 0),
8622 BPF_EXIT_INSN(),
8623 BPF_MOV64_IMM(BPF_REG_0, 0),
8624 BPF_MOV64_IMM(BPF_REG_0, 0),
8625 BPF_EXIT_INSN(),
8626 },
8627 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8628 .errstr = "last insn",
8629 .result = REJECT,
8630 },
8631 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008632 "calls: ld_abs with changing ctx data in callee",
8633 .insns = {
8634 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8635 BPF_LD_ABS(BPF_B, 0),
8636 BPF_LD_ABS(BPF_H, 0),
8637 BPF_LD_ABS(BPF_W, 0),
8638 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8639 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
8640 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8641 BPF_LD_ABS(BPF_B, 0),
8642 BPF_LD_ABS(BPF_H, 0),
8643 BPF_LD_ABS(BPF_W, 0),
8644 BPF_EXIT_INSN(),
8645 BPF_MOV64_IMM(BPF_REG_2, 1),
8646 BPF_MOV64_IMM(BPF_REG_3, 2),
8647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8648 BPF_FUNC_skb_vlan_push),
8649 BPF_EXIT_INSN(),
8650 },
8651 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
8652 .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
8653 .result = REJECT,
8654 },
8655 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008656 "calls: two calls with bad fallthrough",
8657 .insns = {
8658 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8659 BPF_EXIT_INSN(),
8660 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
8662 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
8663 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8665 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
8666 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8667 BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
8668 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8669 offsetof(struct __sk_buff, len)),
8670 BPF_EXIT_INSN(),
8671 },
8672 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
8673 .errstr = "not an exit",
8674 .result = REJECT,
8675 },
8676 {
8677 "calls: two calls with stack read",
8678 .insns = {
8679 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8680 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8683 BPF_EXIT_INSN(),
8684 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8685 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
8686 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
8687 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8688 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8689 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
8690 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8691 BPF_EXIT_INSN(),
8692 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8693 BPF_EXIT_INSN(),
8694 },
8695 .prog_type = BPF_PROG_TYPE_XDP,
8696 .result = ACCEPT,
8697 },
8698 {
8699 "calls: two calls with stack write",
8700 .insns = {
8701 /* main prog */
8702 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
8707 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8708 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
8709 BPF_EXIT_INSN(),
8710
8711 /* subprog 1 */
8712 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8713 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
8714 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
8715 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
8716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8717 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
8718 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
8719 BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
8720 /* write into stack frame of main prog */
8721 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
8722 BPF_EXIT_INSN(),
8723
8724 /* subprog 2 */
8725 /* read from stack frame of main prog */
8726 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8727 BPF_EXIT_INSN(),
8728 },
8729 .prog_type = BPF_PROG_TYPE_XDP,
8730 .result = ACCEPT,
8731 },
8732 {
Jann Horn6b80ad22017-12-22 19:12:35 +01008733 "calls: stack overflow using two frames (pre-call access)",
8734 .insns = {
8735 /* prog 1 */
8736 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8737 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
8738 BPF_EXIT_INSN(),
8739
8740 /* prog 2 */
8741 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8742 BPF_MOV64_IMM(BPF_REG_0, 0),
8743 BPF_EXIT_INSN(),
8744 },
8745 .prog_type = BPF_PROG_TYPE_XDP,
8746 .errstr = "combined stack size",
8747 .result = REJECT,
8748 },
8749 {
8750 "calls: stack overflow using two frames (post-call access)",
8751 .insns = {
8752 /* prog 1 */
8753 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
8754 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8755 BPF_EXIT_INSN(),
8756
8757 /* prog 2 */
8758 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8759 BPF_MOV64_IMM(BPF_REG_0, 0),
8760 BPF_EXIT_INSN(),
8761 },
8762 .prog_type = BPF_PROG_TYPE_XDP,
8763 .errstr = "combined stack size",
8764 .result = REJECT,
8765 },
8766 {
Alexei Starovoitov6b86c422017-12-25 13:15:41 -08008767 "calls: stack depth check using three frames. test1",
8768 .insns = {
8769 /* main */
8770 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
8771 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
8772 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
8773 BPF_MOV64_IMM(BPF_REG_0, 0),
8774 BPF_EXIT_INSN(),
8775 /* A */
8776 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
8777 BPF_EXIT_INSN(),
8778 /* B */
8779 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
8780 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
8781 BPF_EXIT_INSN(),
8782 },
8783 .prog_type = BPF_PROG_TYPE_XDP,
8784 /* stack_main=32, stack_A=256, stack_B=64
8785 * and max(main+A, main+A+B) < 512
8786 */
8787 .result = ACCEPT,
8788 },
8789 {
8790 "calls: stack depth check using three frames. test2",
8791 .insns = {
8792 /* main */
8793 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
8794 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
8795 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
8796 BPF_MOV64_IMM(BPF_REG_0, 0),
8797 BPF_EXIT_INSN(),
8798 /* A */
8799 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
8800 BPF_EXIT_INSN(),
8801 /* B */
8802 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
8803 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
8804 BPF_EXIT_INSN(),
8805 },
8806 .prog_type = BPF_PROG_TYPE_XDP,
8807 /* stack_main=32, stack_A=64, stack_B=256
8808 * and max(main+A, main+A+B) < 512
8809 */
8810 .result = ACCEPT,
8811 },
8812 {
8813 "calls: stack depth check using three frames. test3",
8814 .insns = {
8815 /* main */
8816 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8817 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
8818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
8819 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
8820 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
8821 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
8822 BPF_MOV64_IMM(BPF_REG_0, 0),
8823 BPF_EXIT_INSN(),
8824 /* A */
8825 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
8826 BPF_EXIT_INSN(),
8827 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
8828 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
8829 /* B */
8830 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
8831 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
8832 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
8833 BPF_EXIT_INSN(),
8834 },
8835 .prog_type = BPF_PROG_TYPE_XDP,
8836 /* stack_main=64, stack_A=224, stack_B=256
8837 * and max(main+A, main+A+B) > 512
8838 */
8839 .errstr = "combined stack",
8840 .result = REJECT,
8841 },
8842 {
8843 "calls: stack depth check using three frames. test4",
8844 /* void main(void) {
8845 * func1(0);
8846 * func1(1);
8847 * func2(1);
8848 * }
8849 * void func1(int alloc_or_recurse) {
8850 * if (alloc_or_recurse) {
8851 * frame_pointer[-300] = 1;
8852 * } else {
8853 * func2(alloc_or_recurse);
8854 * }
8855 * }
8856 * void func2(int alloc_or_recurse) {
8857 * if (alloc_or_recurse) {
8858 * frame_pointer[-300] = 1;
8859 * }
8860 * }
8861 */
8862 .insns = {
8863 /* main */
8864 BPF_MOV64_IMM(BPF_REG_1, 0),
8865 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
8866 BPF_MOV64_IMM(BPF_REG_1, 1),
8867 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
8868 BPF_MOV64_IMM(BPF_REG_1, 1),
8869 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
8870 BPF_MOV64_IMM(BPF_REG_0, 0),
8871 BPF_EXIT_INSN(),
8872 /* A */
8873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
8874 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8875 BPF_EXIT_INSN(),
8876 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
8877 BPF_EXIT_INSN(),
8878 /* B */
8879 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
8880 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
8881 BPF_EXIT_INSN(),
8882 },
8883 .prog_type = BPF_PROG_TYPE_XDP,
8884 .result = REJECT,
8885 .errstr = "combined stack",
8886 },
8887 {
Alexei Starovoitovaada9ce2017-12-25 13:15:42 -08008888 "calls: stack depth check using three frames. test5",
8889 .insns = {
8890 /* main */
8891 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
8892 BPF_EXIT_INSN(),
8893 /* A */
8894 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
8895 BPF_EXIT_INSN(),
8896 /* B */
8897 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
8898 BPF_EXIT_INSN(),
8899 /* C */
8900 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
8901 BPF_EXIT_INSN(),
8902 /* D */
8903 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
8904 BPF_EXIT_INSN(),
8905 /* E */
8906 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
8907 BPF_EXIT_INSN(),
8908 /* F */
8909 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
8910 BPF_EXIT_INSN(),
8911 /* G */
8912 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
8913 BPF_EXIT_INSN(),
8914 /* H */
8915 BPF_MOV64_IMM(BPF_REG_0, 0),
8916 BPF_EXIT_INSN(),
8917 },
8918 .prog_type = BPF_PROG_TYPE_XDP,
8919 .errstr = "call stack",
8920 .result = REJECT,
8921 },
8922 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008923 "calls: spill into caller stack frame",
8924 .insns = {
8925 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8927 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8929 BPF_EXIT_INSN(),
8930 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
8931 BPF_MOV64_IMM(BPF_REG_0, 0),
8932 BPF_EXIT_INSN(),
8933 },
8934 .prog_type = BPF_PROG_TYPE_XDP,
8935 .errstr = "cannot spill",
8936 .result = REJECT,
8937 },
8938 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08008939 "calls: write into caller stack frame",
8940 .insns = {
8941 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8943 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8944 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8945 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
8946 BPF_EXIT_INSN(),
8947 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
8948 BPF_MOV64_IMM(BPF_REG_0, 0),
8949 BPF_EXIT_INSN(),
8950 },
8951 .prog_type = BPF_PROG_TYPE_XDP,
8952 .result = ACCEPT,
8953 },
8954 {
8955 "calls: write into callee stack frame",
8956 .insns = {
8957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8958 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
8959 BPF_EXIT_INSN(),
8960 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8961 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
8962 BPF_EXIT_INSN(),
8963 },
8964 .prog_type = BPF_PROG_TYPE_XDP,
8965 .errstr = "cannot return stack pointer",
8966 .result = REJECT,
8967 },
8968 {
Alexei Starovoitova7ff3ec2017-12-14 17:55:07 -08008969 "calls: two calls with stack write and void return",
8970 .insns = {
8971 /* main prog */
8972 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8973 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
8975 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
8977 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
8978 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
8979 BPF_EXIT_INSN(),
8980
8981 /* subprog 1 */
8982 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8983 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
8984 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
8985 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8986 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
8987 BPF_EXIT_INSN(),
8988
8989 /* subprog 2 */
8990 /* write into stack frame of main prog */
8991 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
8992 BPF_EXIT_INSN(), /* void return */
8993 },
8994 .prog_type = BPF_PROG_TYPE_XDP,
8995 .result = ACCEPT,
8996 },
8997 {
8998 "calls: ambiguous return value",
8999 .insns = {
9000 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9001 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
9002 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9003 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9005 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9006 BPF_EXIT_INSN(),
9007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
9008 BPF_MOV64_IMM(BPF_REG_0, 0),
9009 BPF_EXIT_INSN(),
9010 },
9011 .errstr_unpriv = "allowed for root only",
9012 .result_unpriv = REJECT,
9013 .errstr = "R0 !read_ok",
9014 .result = REJECT,
9015 },
9016 {
9017 "calls: two calls that return map_value",
9018 .insns = {
9019 /* main prog */
9020 /* pass fp-16, fp-8 into a function */
9021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9023 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9024 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9025 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
9026
9027 /* fetch map_value_ptr from the stack of this function */
9028 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9029 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9030 /* write into map value */
9031 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9032 /* fetch secound map_value_ptr from the stack */
9033 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
9034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9035 /* write into map value */
9036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9037 BPF_MOV64_IMM(BPF_REG_0, 0),
9038 BPF_EXIT_INSN(),
9039
9040 /* subprog 1 */
9041 /* call 3rd function twice */
9042 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9043 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9044 /* first time with fp-8 */
9045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9046 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9047 /* second time with fp-16 */
9048 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9049 BPF_EXIT_INSN(),
9050
9051 /* subprog 2 */
9052 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9053 /* lookup from map */
9054 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9055 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9057 BPF_LD_MAP_FD(BPF_REG_1, 0),
9058 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9059 BPF_FUNC_map_lookup_elem),
9060 /* write map_value_ptr into stack frame of main prog */
9061 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9062 BPF_MOV64_IMM(BPF_REG_0, 0),
9063 BPF_EXIT_INSN(), /* return 0 */
9064 },
9065 .prog_type = BPF_PROG_TYPE_XDP,
9066 .fixup_map1 = { 23 },
9067 .result = ACCEPT,
9068 },
9069 {
9070 "calls: two calls that return map_value with bool condition",
9071 .insns = {
9072 /* main prog */
9073 /* pass fp-16, fp-8 into a function */
9074 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9076 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9078 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9079 BPF_MOV64_IMM(BPF_REG_0, 0),
9080 BPF_EXIT_INSN(),
9081
9082 /* subprog 1 */
9083 /* call 3rd function twice */
9084 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9085 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9086 /* first time with fp-8 */
9087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
9088 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9089 /* fetch map_value_ptr from the stack of this function */
9090 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9091 /* write into map value */
9092 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9093 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9094 /* second time with fp-16 */
9095 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9096 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9097 /* fetch secound map_value_ptr from the stack */
9098 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
9099 /* write into map value */
9100 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9101 BPF_EXIT_INSN(),
9102
9103 /* subprog 2 */
9104 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9105 /* lookup from map */
9106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9109 BPF_LD_MAP_FD(BPF_REG_1, 0),
9110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9111 BPF_FUNC_map_lookup_elem),
9112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9113 BPF_MOV64_IMM(BPF_REG_0, 0),
9114 BPF_EXIT_INSN(), /* return 0 */
9115 /* write map_value_ptr into stack frame of main prog */
9116 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9117 BPF_MOV64_IMM(BPF_REG_0, 1),
9118 BPF_EXIT_INSN(), /* return 1 */
9119 },
9120 .prog_type = BPF_PROG_TYPE_XDP,
9121 .fixup_map1 = { 23 },
9122 .result = ACCEPT,
9123 },
9124 {
9125 "calls: two calls that return map_value with incorrect bool check",
9126 .insns = {
9127 /* main prog */
9128 /* pass fp-16, fp-8 into a function */
9129 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9133 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9134 BPF_MOV64_IMM(BPF_REG_0, 0),
9135 BPF_EXIT_INSN(),
9136
9137 /* subprog 1 */
9138 /* call 3rd function twice */
9139 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9140 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9141 /* first time with fp-8 */
9142 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
9143 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
9144 /* fetch map_value_ptr from the stack of this function */
9145 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
9146 /* write into map value */
9147 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9148 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
9149 /* second time with fp-16 */
9150 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9151 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9152 /* fetch secound map_value_ptr from the stack */
9153 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
9154 /* write into map value */
9155 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9156 BPF_EXIT_INSN(),
9157
9158 /* subprog 2 */
9159 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9160 /* lookup from map */
9161 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9162 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9164 BPF_LD_MAP_FD(BPF_REG_1, 0),
9165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9166 BPF_FUNC_map_lookup_elem),
9167 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9168 BPF_MOV64_IMM(BPF_REG_0, 0),
9169 BPF_EXIT_INSN(), /* return 0 */
9170 /* write map_value_ptr into stack frame of main prog */
9171 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9172 BPF_MOV64_IMM(BPF_REG_0, 1),
9173 BPF_EXIT_INSN(), /* return 1 */
9174 },
9175 .prog_type = BPF_PROG_TYPE_XDP,
9176 .fixup_map1 = { 23 },
9177 .result = REJECT,
9178 .errstr = "invalid read from stack off -16+0 size 8",
9179 },
9180 {
9181 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
9182 .insns = {
9183 /* main prog */
9184 /* pass fp-16, fp-8 into a function */
9185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9187 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9190 BPF_MOV64_IMM(BPF_REG_0, 0),
9191 BPF_EXIT_INSN(),
9192
9193 /* subprog 1 */
9194 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9195 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9196 /* 1st lookup from map */
9197 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9198 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9200 BPF_LD_MAP_FD(BPF_REG_1, 0),
9201 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9202 BPF_FUNC_map_lookup_elem),
9203 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9204 BPF_MOV64_IMM(BPF_REG_8, 0),
9205 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9206 /* write map_value_ptr into stack frame of main prog at fp-8 */
9207 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9208 BPF_MOV64_IMM(BPF_REG_8, 1),
9209
9210 /* 2nd lookup from map */
9211 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
9212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9213 BPF_LD_MAP_FD(BPF_REG_1, 0),
9214 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
9215 BPF_FUNC_map_lookup_elem),
9216 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9217 BPF_MOV64_IMM(BPF_REG_9, 0),
9218 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9219 /* write map_value_ptr into stack frame of main prog at fp-16 */
9220 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9221 BPF_MOV64_IMM(BPF_REG_9, 1),
9222
9223 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
9225 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9226 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9227 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9228 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
9229 BPF_EXIT_INSN(),
9230
9231 /* subprog 2 */
9232 /* if arg2 == 1 do *arg1 = 0 */
9233 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9234 /* fetch map_value_ptr from the stack of this function */
9235 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9236 /* write into map value */
9237 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9238
9239 /* if arg4 == 1 do *arg3 = 0 */
9240 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9241 /* fetch map_value_ptr from the stack of this function */
9242 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9243 /* write into map value */
9244 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
9245 BPF_EXIT_INSN(),
9246 },
9247 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9248 .fixup_map1 = { 12, 22 },
9249 .result = REJECT,
9250 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
9251 },
9252 {
9253 "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
9254 .insns = {
9255 /* main prog */
9256 /* pass fp-16, fp-8 into a function */
9257 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9259 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9262 BPF_MOV64_IMM(BPF_REG_0, 0),
9263 BPF_EXIT_INSN(),
9264
9265 /* subprog 1 */
9266 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9267 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9268 /* 1st lookup from map */
9269 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9270 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9272 BPF_LD_MAP_FD(BPF_REG_1, 0),
9273 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9274 BPF_FUNC_map_lookup_elem),
9275 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9276 BPF_MOV64_IMM(BPF_REG_8, 0),
9277 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9278 /* write map_value_ptr into stack frame of main prog at fp-8 */
9279 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9280 BPF_MOV64_IMM(BPF_REG_8, 1),
9281
9282 /* 2nd lookup from map */
9283 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
9284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9285 BPF_LD_MAP_FD(BPF_REG_1, 0),
9286 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
9287 BPF_FUNC_map_lookup_elem),
9288 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9289 BPF_MOV64_IMM(BPF_REG_9, 0),
9290 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9291 /* write map_value_ptr into stack frame of main prog at fp-16 */
9292 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9293 BPF_MOV64_IMM(BPF_REG_9, 1),
9294
9295 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9296 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
9297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9298 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9299 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
9301 BPF_EXIT_INSN(),
9302
9303 /* subprog 2 */
9304 /* if arg2 == 1 do *arg1 = 0 */
9305 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9306 /* fetch map_value_ptr from the stack of this function */
9307 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9308 /* write into map value */
9309 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9310
9311 /* if arg4 == 1 do *arg3 = 0 */
9312 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9313 /* fetch map_value_ptr from the stack of this function */
9314 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9315 /* write into map value */
9316 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9317 BPF_EXIT_INSN(),
9318 },
9319 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9320 .fixup_map1 = { 12, 22 },
9321 .result = ACCEPT,
9322 },
9323 {
9324 "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
9325 .insns = {
9326 /* main prog */
9327 /* pass fp-16, fp-8 into a function */
9328 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9330 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9332 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
9333 BPF_MOV64_IMM(BPF_REG_0, 0),
9334 BPF_EXIT_INSN(),
9335
9336 /* subprog 1 */
9337 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9338 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9339 /* 1st lookup from map */
9340 BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
9341 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
9343 BPF_LD_MAP_FD(BPF_REG_1, 0),
9344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9345 BPF_FUNC_map_lookup_elem),
9346 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9347 BPF_MOV64_IMM(BPF_REG_8, 0),
9348 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9349 /* write map_value_ptr into stack frame of main prog at fp-8 */
9350 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9351 BPF_MOV64_IMM(BPF_REG_8, 1),
9352
9353 /* 2nd lookup from map */
9354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
9356 BPF_LD_MAP_FD(BPF_REG_1, 0),
9357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9358 BPF_FUNC_map_lookup_elem),
9359 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9360 BPF_MOV64_IMM(BPF_REG_9, 0), // 26
9361 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
9362 /* write map_value_ptr into stack frame of main prog at fp-16 */
9363 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9364 BPF_MOV64_IMM(BPF_REG_9, 1),
9365
9366 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
9368 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9369 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9370 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9371 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
9372 BPF_JMP_IMM(BPF_JA, 0, 0, -30),
9373
9374 /* subprog 2 */
9375 /* if arg2 == 1 do *arg1 = 0 */
9376 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9377 /* fetch map_value_ptr from the stack of this function */
9378 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9379 /* write into map value */
9380 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9381
9382 /* if arg4 == 1 do *arg3 = 0 */
9383 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9384 /* fetch map_value_ptr from the stack of this function */
9385 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9386 /* write into map value */
9387 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
9388 BPF_JMP_IMM(BPF_JA, 0, 0, -8),
9389 },
9390 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9391 .fixup_map1 = { 12, 22 },
9392 .result = REJECT,
9393 .errstr = "invalid access to map value, value_size=8 off=2 size=8",
9394 },
9395 {
9396 "calls: two calls that receive map_value_ptr_or_null via arg. test1",
9397 .insns = {
9398 /* main prog */
9399 /* pass fp-16, fp-8 into a function */
9400 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9404 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9405 BPF_MOV64_IMM(BPF_REG_0, 0),
9406 BPF_EXIT_INSN(),
9407
9408 /* subprog 1 */
9409 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9410 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9411 /* 1st lookup from map */
9412 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9413 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9415 BPF_LD_MAP_FD(BPF_REG_1, 0),
9416 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9417 BPF_FUNC_map_lookup_elem),
9418 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
9419 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9420 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9421 BPF_MOV64_IMM(BPF_REG_8, 0),
9422 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9423 BPF_MOV64_IMM(BPF_REG_8, 1),
9424
9425 /* 2nd lookup from map */
9426 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9428 BPF_LD_MAP_FD(BPF_REG_1, 0),
9429 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9430 BPF_FUNC_map_lookup_elem),
9431 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
9432 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9433 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9434 BPF_MOV64_IMM(BPF_REG_9, 0),
9435 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9436 BPF_MOV64_IMM(BPF_REG_9, 1),
9437
9438 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9440 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9442 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9444 BPF_EXIT_INSN(),
9445
9446 /* subprog 2 */
9447 /* if arg2 == 1 do *arg1 = 0 */
9448 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9449 /* fetch map_value_ptr from the stack of this function */
9450 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9451 /* write into map value */
9452 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9453
9454 /* if arg4 == 1 do *arg3 = 0 */
9455 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
9456 /* fetch map_value_ptr from the stack of this function */
9457 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9458 /* write into map value */
9459 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9460 BPF_EXIT_INSN(),
9461 },
9462 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9463 .fixup_map1 = { 12, 22 },
9464 .result = ACCEPT,
9465 },
9466 {
9467 "calls: two calls that receive map_value_ptr_or_null via arg. test2",
9468 .insns = {
9469 /* main prog */
9470 /* pass fp-16, fp-8 into a function */
9471 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
9473 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
9475 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
9476 BPF_MOV64_IMM(BPF_REG_0, 0),
9477 BPF_EXIT_INSN(),
9478
9479 /* subprog 1 */
9480 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9481 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
9482 /* 1st lookup from map */
9483 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9484 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9486 BPF_LD_MAP_FD(BPF_REG_1, 0),
9487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9488 BPF_FUNC_map_lookup_elem),
9489 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
9490 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9491 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9492 BPF_MOV64_IMM(BPF_REG_8, 0),
9493 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9494 BPF_MOV64_IMM(BPF_REG_8, 1),
9495
9496 /* 2nd lookup from map */
9497 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9498 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9499 BPF_LD_MAP_FD(BPF_REG_1, 0),
9500 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9501 BPF_FUNC_map_lookup_elem),
9502 /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
9503 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
9504 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9505 BPF_MOV64_IMM(BPF_REG_9, 0),
9506 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9507 BPF_MOV64_IMM(BPF_REG_9, 1),
9508
9509 /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
9510 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9511 BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
9512 BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
9513 BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
9514 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9515 BPF_EXIT_INSN(),
9516
9517 /* subprog 2 */
9518 /* if arg2 == 1 do *arg1 = 0 */
9519 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
9520 /* fetch map_value_ptr from the stack of this function */
9521 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
9522 /* write into map value */
9523 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9524
9525 /* if arg4 == 0 do *arg3 = 0 */
9526 BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
9527 /* fetch map_value_ptr from the stack of this function */
9528 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
9529 /* write into map value */
9530 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
9531 BPF_EXIT_INSN(),
9532 },
9533 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9534 .fixup_map1 = { 12, 22 },
9535 .result = REJECT,
9536 .errstr = "R0 invalid mem access 'inv'",
9537 },
9538 {
9539 "calls: pkt_ptr spill into caller stack",
9540 .insns = {
9541 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
9544 BPF_EXIT_INSN(),
9545
9546 /* subprog 1 */
9547 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9548 offsetof(struct __sk_buff, data)),
9549 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9550 offsetof(struct __sk_buff, data_end)),
9551 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9553 /* spill unchecked pkt_ptr into stack of caller */
9554 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9555 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
9556 /* now the pkt range is verified, read pkt_ptr from stack */
9557 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
9558 /* write 4 bytes into packet */
9559 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9560 BPF_EXIT_INSN(),
9561 },
9562 .result = ACCEPT,
9563 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9564 },
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08009565 {
Daniel Borkmann28ab1732017-12-14 17:55:17 -08009566 "calls: pkt_ptr spill into caller stack 2",
9567 .insns = {
9568 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9570 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9571 /* Marking is still kept, but not in all cases safe. */
9572 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9573 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
9574 BPF_EXIT_INSN(),
9575
9576 /* subprog 1 */
9577 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9578 offsetof(struct __sk_buff, data)),
9579 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9580 offsetof(struct __sk_buff, data_end)),
9581 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9583 /* spill unchecked pkt_ptr into stack of caller */
9584 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9585 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
9586 /* now the pkt range is verified, read pkt_ptr from stack */
9587 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
9588 /* write 4 bytes into packet */
9589 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9590 BPF_EXIT_INSN(),
9591 },
9592 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9593 .errstr = "invalid access to packet",
9594 .result = REJECT,
9595 },
9596 {
9597 "calls: pkt_ptr spill into caller stack 3",
9598 .insns = {
9599 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9601 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9603 /* Marking is still kept and safe here. */
9604 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9605 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
9606 BPF_EXIT_INSN(),
9607
9608 /* subprog 1 */
9609 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9610 offsetof(struct __sk_buff, data)),
9611 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9612 offsetof(struct __sk_buff, data_end)),
9613 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9615 /* spill unchecked pkt_ptr into stack of caller */
9616 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9617 BPF_MOV64_IMM(BPF_REG_5, 0),
9618 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
9619 BPF_MOV64_IMM(BPF_REG_5, 1),
9620 /* now the pkt range is verified, read pkt_ptr from stack */
9621 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
9622 /* write 4 bytes into packet */
9623 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9624 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9625 BPF_EXIT_INSN(),
9626 },
9627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9628 .result = ACCEPT,
9629 },
9630 {
9631 "calls: pkt_ptr spill into caller stack 4",
9632 .insns = {
9633 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9635 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9636 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9637 /* Check marking propagated. */
9638 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9639 BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
9640 BPF_EXIT_INSN(),
9641
9642 /* subprog 1 */
9643 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9644 offsetof(struct __sk_buff, data)),
9645 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9646 offsetof(struct __sk_buff, data_end)),
9647 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9649 /* spill unchecked pkt_ptr into stack of caller */
9650 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9651 BPF_MOV64_IMM(BPF_REG_5, 0),
9652 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
9653 BPF_MOV64_IMM(BPF_REG_5, 1),
9654 /* don't read back pkt_ptr from stack here */
9655 /* write 4 bytes into packet */
9656 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9657 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9658 BPF_EXIT_INSN(),
9659 },
9660 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9661 .result = ACCEPT,
9662 },
9663 {
9664 "calls: pkt_ptr spill into caller stack 5",
9665 .insns = {
9666 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9668 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
9669 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9670 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9671 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
9672 BPF_EXIT_INSN(),
9673
9674 /* subprog 1 */
9675 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9676 offsetof(struct __sk_buff, data)),
9677 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9678 offsetof(struct __sk_buff, data_end)),
9679 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9681 BPF_MOV64_IMM(BPF_REG_5, 0),
9682 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
9683 /* spill checked pkt_ptr into stack of caller */
9684 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9685 BPF_MOV64_IMM(BPF_REG_5, 1),
9686 /* don't read back pkt_ptr from stack here */
9687 /* write 4 bytes into packet */
9688 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9689 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9690 BPF_EXIT_INSN(),
9691 },
9692 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9693 .errstr = "same insn cannot be used with different",
9694 .result = REJECT,
9695 },
9696 {
9697 "calls: pkt_ptr spill into caller stack 6",
9698 .insns = {
9699 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9700 offsetof(struct __sk_buff, data_end)),
9701 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9703 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9704 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9705 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9706 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
9707 BPF_EXIT_INSN(),
9708
9709 /* subprog 1 */
9710 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9711 offsetof(struct __sk_buff, data)),
9712 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9713 offsetof(struct __sk_buff, data_end)),
9714 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9715 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9716 BPF_MOV64_IMM(BPF_REG_5, 0),
9717 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
9718 /* spill checked pkt_ptr into stack of caller */
9719 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9720 BPF_MOV64_IMM(BPF_REG_5, 1),
9721 /* don't read back pkt_ptr from stack here */
9722 /* write 4 bytes into packet */
9723 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9724 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9725 BPF_EXIT_INSN(),
9726 },
9727 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9728 .errstr = "R4 invalid mem access",
9729 .result = REJECT,
9730 },
9731 {
9732 "calls: pkt_ptr spill into caller stack 7",
9733 .insns = {
9734 BPF_MOV64_IMM(BPF_REG_2, 0),
9735 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9737 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9738 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9739 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
9741 BPF_EXIT_INSN(),
9742
9743 /* subprog 1 */
9744 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9745 offsetof(struct __sk_buff, data)),
9746 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9747 offsetof(struct __sk_buff, data_end)),
9748 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9750 BPF_MOV64_IMM(BPF_REG_5, 0),
9751 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
9752 /* spill checked pkt_ptr into stack of caller */
9753 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9754 BPF_MOV64_IMM(BPF_REG_5, 1),
9755 /* don't read back pkt_ptr from stack here */
9756 /* write 4 bytes into packet */
9757 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9758 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9759 BPF_EXIT_INSN(),
9760 },
9761 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9762 .errstr = "R4 invalid mem access",
9763 .result = REJECT,
9764 },
9765 {
9766 "calls: pkt_ptr spill into caller stack 8",
9767 .insns = {
9768 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9769 offsetof(struct __sk_buff, data)),
9770 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9771 offsetof(struct __sk_buff, data_end)),
9772 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9773 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9774 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
9775 BPF_EXIT_INSN(),
9776 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9778 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9779 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9780 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9781 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
9782 BPF_EXIT_INSN(),
9783
9784 /* subprog 1 */
9785 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9786 offsetof(struct __sk_buff, data)),
9787 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9788 offsetof(struct __sk_buff, data_end)),
9789 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9791 BPF_MOV64_IMM(BPF_REG_5, 0),
9792 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
9793 /* spill checked pkt_ptr into stack of caller */
9794 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9795 BPF_MOV64_IMM(BPF_REG_5, 1),
9796 /* don't read back pkt_ptr from stack here */
9797 /* write 4 bytes into packet */
9798 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9799 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9800 BPF_EXIT_INSN(),
9801 },
9802 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9803 .result = ACCEPT,
9804 },
9805 {
9806 "calls: pkt_ptr spill into caller stack 9",
9807 .insns = {
9808 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9809 offsetof(struct __sk_buff, data)),
9810 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9811 offsetof(struct __sk_buff, data_end)),
9812 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9814 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
9815 BPF_EXIT_INSN(),
9816 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
9817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
9818 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9819 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
9820 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
9821 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
9822 BPF_EXIT_INSN(),
9823
9824 /* subprog 1 */
9825 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9826 offsetof(struct __sk_buff, data)),
9827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9828 offsetof(struct __sk_buff, data_end)),
9829 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9831 BPF_MOV64_IMM(BPF_REG_5, 0),
9832 /* spill unchecked pkt_ptr into stack of caller */
9833 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
9834 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
9835 BPF_MOV64_IMM(BPF_REG_5, 1),
9836 /* don't read back pkt_ptr from stack here */
9837 /* write 4 bytes into packet */
9838 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
9839 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
9840 BPF_EXIT_INSN(),
9841 },
9842 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9843 .errstr = "invalid access to packet",
9844 .result = REJECT,
9845 },
9846 {
Alexei Starovoitovd98588c2017-12-14 17:55:09 -08009847 "calls: caller stack init to zero or map_value_or_null",
9848 .insns = {
9849 BPF_MOV64_IMM(BPF_REG_0, 0),
9850 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
9851 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9853 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9854 /* fetch map_value_or_null or const_zero from stack */
9855 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9856 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9857 /* store into map_value */
9858 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
9859 BPF_EXIT_INSN(),
9860
9861 /* subprog 1 */
9862 /* if (ctx == 0) return; */
9863 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
9864 /* else bpf_map_lookup() and *(fp - 8) = r0 */
9865 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9868 BPF_LD_MAP_FD(BPF_REG_1, 0),
9869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9870 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9871 BPF_FUNC_map_lookup_elem),
9872 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
9873 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9874 BPF_EXIT_INSN(),
9875 },
9876 .fixup_map1 = { 13 },
9877 .result = ACCEPT,
9878 .prog_type = BPF_PROG_TYPE_XDP,
9879 },
9880 {
9881 "calls: stack init to zero and pruning",
9882 .insns = {
9883 /* first make allocated_stack 16 byte */
9884 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
9885 /* now fork the execution such that the false branch
9886 * of JGT insn will be verified second and it skisp zero
9887 * init of fp-8 stack slot. If stack liveness marking
9888 * is missing live_read marks from call map_lookup
9889 * processing then pruning will incorrectly assume
9890 * that fp-8 stack slot was unused in the fall-through
9891 * branch and will accept the program incorrectly
9892 */
9893 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
9894 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9895 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9898 BPF_LD_MAP_FD(BPF_REG_1, 0),
9899 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9900 BPF_FUNC_map_lookup_elem),
9901 BPF_EXIT_INSN(),
9902 },
9903 .fixup_map2 = { 6 },
9904 .errstr = "invalid indirect read from stack off -8+0 size 8",
9905 .result = REJECT,
9906 .prog_type = BPF_PROG_TYPE_XDP,
9907 },
Gianluca Borellofd05e572017-12-23 10:09:55 +00009908 {
9909 "search pruning: all branches should be verified (nop operation)",
9910 .insns = {
9911 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9913 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
9914 BPF_LD_MAP_FD(BPF_REG_1, 0),
9915 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9916 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
9917 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
9918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
9919 BPF_MOV64_IMM(BPF_REG_4, 0),
9920 BPF_JMP_A(1),
9921 BPF_MOV64_IMM(BPF_REG_4, 1),
9922 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
9923 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
9924 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
9925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
9926 BPF_MOV64_IMM(BPF_REG_6, 0),
9927 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
9928 BPF_EXIT_INSN(),
9929 },
9930 .fixup_map1 = { 3 },
9931 .errstr = "R6 invalid mem access 'inv'",
9932 .result = REJECT,
9933 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9934 },
9935 {
9936 "search pruning: all branches should be verified (invalid stack access)",
9937 .insns = {
9938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9940 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
9941 BPF_LD_MAP_FD(BPF_REG_1, 0),
9942 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9944 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
9945 BPF_MOV64_IMM(BPF_REG_4, 0),
9946 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
9947 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
9948 BPF_JMP_A(1),
9949 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
9950 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
9951 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
9952 BPF_EXIT_INSN(),
9953 },
9954 .fixup_map1 = { 3 },
9955 .errstr = "invalid read from stack off -16+0 size 8",
9956 .result = REJECT,
9957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9958 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009959};
9960
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009961static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009962{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009963 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009964
9965 for (len = MAX_INSNS - 1; len > 0; --len)
9966 if (fp[len].code != 0 || fp[len].imm != 0)
9967 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009968 return len + 1;
9969}
9970
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009971static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009972{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009973 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009974
Mickaël Salaünf4874d02017-02-10 00:21:43 +01009975 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009976 size_value, max_elem, BPF_F_NO_PREALLOC);
9977 if (fd < 0)
9978 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07009979
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009980 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07009981}
9982
9983static int create_prog_array(void)
9984{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009985 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07009986
Mickaël Salaünf4874d02017-02-10 00:21:43 +01009987 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009988 sizeof(int), 4, 0);
9989 if (fd < 0)
9990 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009991
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02009992 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07009993}
9994
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07009995static int create_map_in_map(void)
9996{
9997 int inner_map_fd, outer_map_fd;
9998
9999 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
10000 sizeof(int), 1, 0);
10001 if (inner_map_fd < 0) {
10002 printf("Failed to create array '%s'!\n", strerror(errno));
10003 return inner_map_fd;
10004 }
10005
Martin KaFai Lau88cda1c2017-09-27 14:37:54 -070010006 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010007 sizeof(int), inner_map_fd, 1, 0);
10008 if (outer_map_fd < 0)
10009 printf("Failed to create array of maps '%s'!\n",
10010 strerror(errno));
10011
10012 close(inner_map_fd);
10013
10014 return outer_map_fd;
10015}
10016
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010017static char bpf_vlog[32768];
10018
10019static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010020 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010021{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010022 int *fixup_map1 = test->fixup_map1;
10023 int *fixup_map2 = test->fixup_map2;
10024 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010025 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010026
10027 /* Allocating HTs with 1 elem is fine here, since we only test
10028 * for verifier and not do a runtime lookup, so the only thing
10029 * that really matters is value size in this case.
10030 */
10031 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010032 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010033 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010034 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010035 fixup_map1++;
10036 } while (*fixup_map1);
10037 }
10038
10039 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010040 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010041 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010042 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010043 fixup_map2++;
10044 } while (*fixup_map2);
10045 }
10046
10047 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010048 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010049 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010050 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010051 fixup_prog++;
10052 } while (*fixup_prog);
10053 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010054
10055 if (*fixup_map_in_map) {
10056 map_fds[3] = create_map_in_map();
10057 do {
10058 prog[*fixup_map_in_map].imm = map_fds[3];
10059 fixup_map_in_map++;
10060 } while (*fixup_map_in_map);
10061 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010062}
10063
10064static void do_test_single(struct bpf_test *test, bool unpriv,
10065 int *passes, int *errors)
10066{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010067 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010068 struct bpf_insn *prog = test->insns;
10069 int prog_len = probe_filter_length(prog);
10070 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010071 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010072 const char *expected_err;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010073 int i;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010074
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010075 for (i = 0; i < MAX_NR_MAPS; i++)
10076 map_fds[i] = -1;
10077
10078 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010079
Daniel Borkmann614d0d72017-05-25 01:05:09 +020010080 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
10081 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +020010082 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010083
10084 expected_ret = unpriv && test->result_unpriv != UNDEF ?
10085 test->result_unpriv : test->result;
10086 expected_err = unpriv && test->errstr_unpriv ?
10087 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010088
10089 reject_from_alignment = fd_prog < 0 &&
10090 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
10091 strstr(bpf_vlog, "Unknown alignment.");
10092#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
10093 if (reject_from_alignment) {
10094 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
10095 strerror(errno));
10096 goto fail_log;
10097 }
10098#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010099 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010100 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010101 printf("FAIL\nFailed to load prog '%s'!\n",
10102 strerror(errno));
10103 goto fail_log;
10104 }
10105 } else {
10106 if (fd_prog >= 0) {
10107 printf("FAIL\nUnexpected success to load!\n");
10108 goto fail_log;
10109 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010110 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010111 printf("FAIL\nUnexpected error message!\n");
10112 goto fail_log;
10113 }
10114 }
10115
10116 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020010117 printf("OK%s\n", reject_from_alignment ?
10118 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010119close_fds:
10120 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070010121 for (i = 0; i < MAX_NR_MAPS; i++)
10122 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010123 sched_yield();
10124 return;
10125fail_log:
10126 (*errors)++;
10127 printf("%s", bpf_vlog);
10128 goto close_fds;
10129}
10130
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010131static bool is_admin(void)
10132{
10133 cap_t caps;
10134 cap_flag_value_t sysadmin = CAP_CLEAR;
10135 const cap_value_t cap_val = CAP_SYS_ADMIN;
10136
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080010137#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010138 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
10139 perror("cap_get_flag");
10140 return false;
10141 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080010142#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010143 caps = cap_get_proc();
10144 if (!caps) {
10145 perror("cap_get_proc");
10146 return false;
10147 }
10148 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
10149 perror("cap_get_flag");
10150 if (cap_free(caps))
10151 perror("cap_free");
10152 return (sysadmin == CAP_SET);
10153}
10154
10155static int set_admin(bool admin)
10156{
10157 cap_t caps;
10158 const cap_value_t cap_val = CAP_SYS_ADMIN;
10159 int ret = -1;
10160
10161 caps = cap_get_proc();
10162 if (!caps) {
10163 perror("cap_get_proc");
10164 return -1;
10165 }
10166 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
10167 admin ? CAP_SET : CAP_CLEAR)) {
10168 perror("cap_set_flag");
10169 goto out;
10170 }
10171 if (cap_set_proc(caps)) {
10172 perror("cap_set_proc");
10173 goto out;
10174 }
10175 ret = 0;
10176out:
10177 if (cap_free(caps))
10178 perror("cap_free");
10179 return ret;
10180}
10181
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010182static int do_test(bool unpriv, unsigned int from, unsigned int to)
10183{
10184 int i, passes = 0, errors = 0;
10185
10186 for (i = from; i < to; i++) {
10187 struct bpf_test *test = &tests[i];
10188
10189 /* Program types that are not supported by non-root we
10190 * skip right away.
10191 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010192 if (!test->prog_type) {
10193 if (!unpriv)
10194 set_admin(false);
10195 printf("#%d/u %s ", i, test->descr);
10196 do_test_single(test, true, &passes, &errors);
10197 if (!unpriv)
10198 set_admin(true);
10199 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010200
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010201 if (!unpriv) {
10202 printf("#%d/p %s ", i, test->descr);
10203 do_test_single(test, false, &passes, &errors);
10204 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010205 }
10206
10207 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +020010208 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010209}
10210
10211int main(int argc, char **argv)
10212{
10213 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
10214 struct rlimit rlim = { 1 << 20, 1 << 20 };
10215 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +010010216 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010217
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010218 if (argc == 3) {
10219 unsigned int l = atoi(argv[argc - 2]);
10220 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010221
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010222 if (l < to && u < to) {
10223 from = l;
10224 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010225 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010226 } else if (argc == 2) {
10227 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010228
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010229 if (t < to) {
10230 from = t;
10231 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070010232 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010233 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010234
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010235 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
10236 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070010237}