blob: 353d1701564178b10b004192cf7840765d25a742 [file] [log] [blame]
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07001/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020010
Daniel Borkmann2c460622017-08-04 22:24:41 +020011#include <endian.h>
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -080012#include <asm/types.h>
13#include <linux/types.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010014#include <stdint.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070015#include <stdio.h>
Mickaël Salaün702498a2017-02-10 00:21:44 +010016#include <stdlib.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070017#include <unistd.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070018#include <errno.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070019#include <string.h>
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -070020#include <stddef.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070021#include <stdbool.h>
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020022#include <sched.h>
23
Mickaël Salaünd02d8982017-02-10 00:21:37 +010024#include <sys/capability.h>
Alexei Starovoitovbf508872015-10-07 22:23:23 -070025#include <sys/resource.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070026
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020027#include <linux/unistd.h>
28#include <linux/filter.h>
29#include <linux/bpf_perf_event.h>
30#include <linux/bpf.h>
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070031
Mickaël Salaün2ee89fb2017-02-10 00:21:38 +010032#include <bpf/bpf.h>
33
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020034#ifdef HAVE_GENHDR
35# include "autoconf.h"
36#else
37# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
39# endif
40#endif
41
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020042#include "../../../include/linux/filter.h"
43
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020044#ifndef ARRAY_SIZE
45# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
46#endif
47
48#define MAX_INSNS 512
49#define MAX_FIXUPS 8
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070050#define MAX_NR_MAPS 4
Alexei Starovoitovbf508872015-10-07 22:23:23 -070051
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020052#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
Daniel Borkmann614d0d72017-05-25 01:05:09 +020053#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020054
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070055struct bpf_test {
56 const char *descr;
57 struct bpf_insn insns[MAX_INSNS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020058 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -070061 int fixup_map_in_map[MAX_FIXUPS];
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070062 const char *errstr;
Alexei Starovoitovbf508872015-10-07 22:23:23 -070063 const char *errstr_unpriv;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070064 enum {
Alexei Starovoitovbf508872015-10-07 22:23:23 -070065 UNDEF,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070066 ACCEPT,
67 REJECT
Alexei Starovoitovbf508872015-10-07 22:23:23 -070068 } result, result_unpriv;
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -070069 enum bpf_prog_type prog_type;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +020070 uint8_t flags;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070071};
72
Josef Bacik48461132016-09-28 10:54:32 -040073/* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
75 */
76#define MAX_ENTRIES 11
Josef Bacik48461132016-09-28 10:54:32 -040077
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +020078struct test_val {
79 unsigned int index;
80 int foo[MAX_ENTRIES];
Josef Bacik48461132016-09-28 10:54:32 -040081};
82
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -070083static struct bpf_test tests[] = {
84 {
85 "add+sub+mul",
86 .insns = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
94 BPF_EXIT_INSN(),
95 },
96 .result = ACCEPT,
97 },
98 {
99 "unreachable",
100 .insns = {
101 BPF_EXIT_INSN(),
102 BPF_EXIT_INSN(),
103 },
104 .errstr = "unreachable",
105 .result = REJECT,
106 },
107 {
108 "unreachable2",
109 .insns = {
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
112 BPF_EXIT_INSN(),
113 },
114 .errstr = "unreachable",
115 .result = REJECT,
116 },
117 {
118 "out of range jump",
119 .insns = {
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "jump out of range",
124 .result = REJECT,
125 },
126 {
127 "out of range jump2",
128 .insns = {
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "jump out of range",
133 .result = REJECT,
134 },
135 {
136 "test1 ld_imm64",
137 .insns = {
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
144 BPF_EXIT_INSN(),
145 },
146 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700147 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700148 .result = REJECT,
149 },
150 {
151 "test2 ld_imm64",
152 .insns = {
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
158 BPF_EXIT_INSN(),
159 },
160 .errstr = "invalid BPF_LD_IMM insn",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700161 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700162 .result = REJECT,
163 },
164 {
165 "test3 ld_imm64",
166 .insns = {
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
173 BPF_EXIT_INSN(),
174 },
175 .errstr = "invalid bpf_ld_imm64 insn",
176 .result = REJECT,
177 },
178 {
179 "test4 ld_imm64",
180 .insns = {
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
182 BPF_EXIT_INSN(),
183 },
184 .errstr = "invalid bpf_ld_imm64 insn",
185 .result = REJECT,
186 },
187 {
188 "test5 ld_imm64",
189 .insns = {
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
191 },
192 .errstr = "invalid bpf_ld_imm64 insn",
193 .result = REJECT,
194 },
195 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200196 "test6 ld_imm64",
197 .insns = {
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
200 BPF_EXIT_INSN(),
201 },
202 .result = ACCEPT,
203 },
204 {
205 "test7 ld_imm64",
206 .insns = {
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
209 BPF_EXIT_INSN(),
210 },
211 .result = ACCEPT,
212 },
213 {
214 "test8 ld_imm64",
215 .insns = {
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
218 BPF_EXIT_INSN(),
219 },
220 .errstr = "uses reserved fields",
221 .result = REJECT,
222 },
223 {
224 "test9 ld_imm64",
225 .insns = {
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
228 BPF_EXIT_INSN(),
229 },
230 .errstr = "invalid bpf_ld_imm64 insn",
231 .result = REJECT,
232 },
233 {
234 "test10 ld_imm64",
235 .insns = {
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "invalid bpf_ld_imm64 insn",
241 .result = REJECT,
242 },
243 {
244 "test11 ld_imm64",
245 .insns = {
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
248 BPF_EXIT_INSN(),
249 },
250 .errstr = "invalid bpf_ld_imm64 insn",
251 .result = REJECT,
252 },
253 {
254 "test12 ld_imm64",
255 .insns = {
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
259 BPF_EXIT_INSN(),
260 },
261 .errstr = "not pointing to valid bpf_map",
262 .result = REJECT,
263 },
264 {
265 "test13 ld_imm64",
266 .insns = {
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
270 BPF_EXIT_INSN(),
271 },
272 .errstr = "invalid bpf_ld_imm64 insn",
273 .result = REJECT,
274 },
275 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700276 "no bpf_exit",
277 .insns = {
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
279 },
280 .errstr = "jump out of range",
281 .result = REJECT,
282 },
283 {
284 "loop (back-edge)",
285 .insns = {
286 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
287 BPF_EXIT_INSN(),
288 },
289 .errstr = "back-edge",
290 .result = REJECT,
291 },
292 {
293 "loop2 (back-edge)",
294 .insns = {
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
299 BPF_EXIT_INSN(),
300 },
301 .errstr = "back-edge",
302 .result = REJECT,
303 },
304 {
305 "conditional loop",
306 .insns = {
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
311 BPF_EXIT_INSN(),
312 },
313 .errstr = "back-edge",
314 .result = REJECT,
315 },
316 {
317 "read uninitialized register",
318 .insns = {
319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
320 BPF_EXIT_INSN(),
321 },
322 .errstr = "R2 !read_ok",
323 .result = REJECT,
324 },
325 {
326 "read invalid register",
327 .insns = {
328 BPF_MOV64_REG(BPF_REG_0, -1),
329 BPF_EXIT_INSN(),
330 },
331 .errstr = "R15 is invalid",
332 .result = REJECT,
333 },
334 {
335 "program doesn't init R0 before exit",
336 .insns = {
337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
338 BPF_EXIT_INSN(),
339 },
340 .errstr = "R0 !read_ok",
341 .result = REJECT,
342 },
343 {
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700344 "program doesn't init R0 before exit in all branches",
345 .insns = {
346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0, 1),
348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
349 BPF_EXIT_INSN(),
350 },
351 .errstr = "R0 !read_ok",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700352 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov32bf08a2014-10-20 14:54:57 -0700353 .result = REJECT,
354 },
355 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700356 "stack out of bounds",
357 .insns = {
358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
359 BPF_EXIT_INSN(),
360 },
361 .errstr = "invalid stack",
362 .result = REJECT,
363 },
364 {
365 "invalid call insn1",
366 .insns = {
367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
368 BPF_EXIT_INSN(),
369 },
370 .errstr = "BPF_CALL uses reserved",
371 .result = REJECT,
372 },
373 {
374 "invalid call insn2",
375 .insns = {
376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
377 BPF_EXIT_INSN(),
378 },
379 .errstr = "BPF_CALL uses reserved",
380 .result = REJECT,
381 },
382 {
383 "invalid function call",
384 .insns = {
385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
386 BPF_EXIT_INSN(),
387 },
Daniel Borkmanne00c7b22016-11-26 01:28:09 +0100388 .errstr = "invalid func unknown#1234567",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700389 .result = REJECT,
390 },
391 {
392 "uninitialized stack1",
393 .insns = {
394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700399 BPF_EXIT_INSN(),
400 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200401 .fixup_map1 = { 2 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700402 .errstr = "invalid indirect read from stack",
403 .result = REJECT,
404 },
405 {
406 "uninitialized stack2",
407 .insns = {
408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "invalid read from stack",
413 .result = REJECT,
414 },
415 {
Daniel Borkmann728a8532017-04-27 01:39:32 +0200416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
418 .insns = {
419 BPF_MOV64_IMM(BPF_REG_0, 0),
420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(),
424 },
Edward Creef65b1842017-08-07 15:27:12 +0100425 .errstr_unpriv = "R1 subtraction from stack pointer",
Daniel Borkmann728a8532017-04-27 01:39:32 +0200426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT,
429 },
430 {
431 "non-invalid fp arithmetic",
432 .insns = {
433 BPF_MOV64_IMM(BPF_REG_0, 0),
434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
435 BPF_EXIT_INSN(),
436 },
437 .result = ACCEPT,
438 },
439 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200440 "invalid argument register",
441 .insns = {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid),
444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200446 BPF_EXIT_INSN(),
447 },
448 .errstr = "R1 !read_ok",
449 .result = REJECT,
450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 },
452 {
453 "non-invalid argument register",
454 .insns = {
455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +0200461 BPF_EXIT_INSN(),
462 },
463 .result = ACCEPT,
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 },
466 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700467 "check valid spill/fill",
468 .insns = {
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700473 /* should be able to access R0 = *(R2 + 8) */
Daniel Borkmannf91fe172015-03-01 12:31:41 +0100474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700476 BPF_EXIT_INSN(),
477 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700478 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700479 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700480 .result_unpriv = REJECT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700481 },
482 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +0200483 "check valid spill/fill, skb mark",
484 .insns = {
485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 offsetof(struct __sk_buff, mark)),
490 BPF_EXIT_INSN(),
491 },
492 .result = ACCEPT,
493 .result_unpriv = ACCEPT,
494 },
495 {
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700496 "check corrupted spill/fill",
497 .insns = {
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700504 BPF_EXIT_INSN(),
505 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700506 .errstr_unpriv = "attempt to corrupt spilled",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700507 .errstr = "corrupted spill",
508 .result = REJECT,
509 },
510 {
511 "invalid src register in STX",
512 .insns = {
513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
514 BPF_EXIT_INSN(),
515 },
516 .errstr = "R15 is invalid",
517 .result = REJECT,
518 },
519 {
520 "invalid dst register in STX",
521 .insns = {
522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
523 BPF_EXIT_INSN(),
524 },
525 .errstr = "R14 is invalid",
526 .result = REJECT,
527 },
528 {
529 "invalid dst register in ST",
530 .insns = {
531 BPF_ST_MEM(BPF_B, 14, -1, -1),
532 BPF_EXIT_INSN(),
533 },
534 .errstr = "R14 is invalid",
535 .result = REJECT,
536 },
537 {
538 "invalid src register in LDX",
539 .insns = {
540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
541 BPF_EXIT_INSN(),
542 },
543 .errstr = "R12 is invalid",
544 .result = REJECT,
545 },
546 {
547 "invalid dst register in LDX",
548 .insns = {
549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
550 BPF_EXIT_INSN(),
551 },
552 .errstr = "R11 is invalid",
553 .result = REJECT,
554 },
555 {
556 "junk insn",
557 .insns = {
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
559 BPF_EXIT_INSN(),
560 },
561 .errstr = "invalid BPF_LD_IMM",
562 .result = REJECT,
563 },
564 {
565 "junk insn2",
566 .insns = {
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
568 BPF_EXIT_INSN(),
569 },
570 .errstr = "BPF_LDX uses reserved fields",
571 .result = REJECT,
572 },
573 {
574 "junk insn3",
575 .insns = {
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
577 BPF_EXIT_INSN(),
578 },
579 .errstr = "invalid BPF_ALU opcode f0",
580 .result = REJECT,
581 },
582 {
583 "junk insn4",
584 .insns = {
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "invalid BPF_ALU opcode f0",
589 .result = REJECT,
590 },
591 {
592 "junk insn5",
593 .insns = {
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
595 BPF_EXIT_INSN(),
596 },
597 .errstr = "BPF_ALU uses reserved fields",
598 .result = REJECT,
599 },
600 {
601 "misaligned read from stack",
602 .insns = {
603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
605 BPF_EXIT_INSN(),
606 },
Edward Creef65b1842017-08-07 15:27:12 +0100607 .errstr = "misaligned stack access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700608 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700610 },
611 {
612 "invalid map_fd for function call",
613 .insns = {
614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
615 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
617 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
619 BPF_FUNC_map_delete_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700620 BPF_EXIT_INSN(),
621 },
622 .errstr = "fd 0 is not pointing to valid bpf_map",
623 .result = REJECT,
624 },
625 {
626 "don't check return value before access",
627 .insns = {
628 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
631 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
633 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
635 BPF_EXIT_INSN(),
636 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200637 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700638 .errstr = "R0 invalid mem access 'map_value_or_null'",
639 .result = REJECT,
640 },
641 {
642 "access memory with incorrect alignment",
643 .insns = {
644 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
647 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
649 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
652 BPF_EXIT_INSN(),
653 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200654 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +0100655 .errstr = "misaligned value access",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700656 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100657 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700658 },
659 {
660 "sometimes access memory with incorrect alignment",
661 .insns = {
662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
665 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
667 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
670 BPF_EXIT_INSN(),
671 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
672 BPF_EXIT_INSN(),
673 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200674 .fixup_map1 = { 3 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700675 .errstr = "R0 invalid mem access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700676 .errstr_unpriv = "R0 leaks addr",
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700677 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +0100678 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -0700679 },
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700680 {
681 "jump test 1",
682 .insns = {
683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
684 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
694 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
696 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
697 BPF_MOV64_IMM(BPF_REG_0, 0),
698 BPF_EXIT_INSN(),
699 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700700 .errstr_unpriv = "R1 pointer comparison",
701 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700702 .result = ACCEPT,
703 },
704 {
705 "jump test 2",
706 .insns = {
707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
709 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
710 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
712 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
713 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
715 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
716 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
718 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
721 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
722 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
723 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
724 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
725 BPF_MOV64_IMM(BPF_REG_0, 0),
726 BPF_EXIT_INSN(),
727 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700728 .errstr_unpriv = "R1 pointer comparison",
729 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700730 .result = ACCEPT,
731 },
732 {
733 "jump test 3",
734 .insns = {
735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
751 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
755 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
759 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
761 BPF_FUNC_map_delete_elem),
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700762 BPF_EXIT_INSN(),
763 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200764 .fixup_map1 = { 24 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700765 .errstr_unpriv = "R1 pointer comparison",
766 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700767 .result = ACCEPT,
768 },
769 {
770 "jump test 4",
771 .insns = {
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 BPF_MOV64_IMM(BPF_REG_0, 0),
813 BPF_EXIT_INSN(),
814 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700815 .errstr_unpriv = "R1 pointer comparison",
816 .result_unpriv = REJECT,
Alexei Starovoitovfd10c2e2014-09-29 18:50:02 -0700817 .result = ACCEPT,
818 },
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700819 {
820 "jump test 5",
821 .insns = {
822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
823 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
826 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
828 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
829 BPF_MOV64_IMM(BPF_REG_0, 0),
830 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
831 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
832 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
834 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
835 BPF_MOV64_IMM(BPF_REG_0, 0),
836 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
837 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
838 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
840 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
841 BPF_MOV64_IMM(BPF_REG_0, 0),
842 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
843 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
844 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
846 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
847 BPF_MOV64_IMM(BPF_REG_0, 0),
848 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
849 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
850 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
851 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
852 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
853 BPF_MOV64_IMM(BPF_REG_0, 0),
854 BPF_EXIT_INSN(),
855 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700856 .errstr_unpriv = "R1 pointer comparison",
857 .result_unpriv = REJECT,
Alexei Starovoitov342ded42014-10-28 15:11:42 -0700858 .result = ACCEPT,
859 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700860 {
861 "access skb fields ok",
862 .insns = {
863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
864 offsetof(struct __sk_buff, len)),
865 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
867 offsetof(struct __sk_buff, mark)),
868 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
870 offsetof(struct __sk_buff, pkt_type)),
871 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
873 offsetof(struct __sk_buff, queue_mapping)),
874 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitovc2497392015-03-16 18:06:02 -0700875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 offsetof(struct __sk_buff, protocol)),
877 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
879 offsetof(struct __sk_buff, vlan_present)),
880 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
882 offsetof(struct __sk_buff, vlan_tci)),
883 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Daniel Borkmannb1d9fc42017-04-19 23:01:17 +0200884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
885 offsetof(struct __sk_buff, napi_id)),
886 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700887 BPF_EXIT_INSN(),
888 },
889 .result = ACCEPT,
890 },
891 {
892 "access skb fields bad1",
893 .insns = {
894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
895 BPF_EXIT_INSN(),
896 },
897 .errstr = "invalid bpf_context access",
898 .result = REJECT,
899 },
900 {
901 "access skb fields bad2",
902 .insns = {
903 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
904 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
907 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
909 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700910 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
911 BPF_EXIT_INSN(),
912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, pkt_type)),
915 BPF_EXIT_INSN(),
916 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200917 .fixup_map1 = { 4 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700918 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700919 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700920 .result = REJECT,
921 },
922 {
923 "access skb fields bad3",
924 .insns = {
925 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
926 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
927 offsetof(struct __sk_buff, pkt_type)),
928 BPF_EXIT_INSN(),
929 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
932 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200933 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
934 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700935 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
936 BPF_EXIT_INSN(),
937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
938 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
939 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200940 .fixup_map1 = { 6 },
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700941 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700942 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov614cd3b2015-03-13 11:57:43 -0700943 .result = REJECT,
944 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700945 {
946 "access skb fields bad4",
947 .insns = {
948 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
949 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
950 offsetof(struct __sk_buff, len)),
951 BPF_MOV64_IMM(BPF_REG_0, 0),
952 BPF_EXIT_INSN(),
953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
956 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 BPF_FUNC_map_lookup_elem),
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700959 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
960 BPF_EXIT_INSN(),
961 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
962 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
963 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +0200964 .fixup_map1 = { 7 },
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700965 .errstr = "different pointers",
Alexei Starovoitovbf508872015-10-07 22:23:23 -0700966 .errstr_unpriv = "R1 pointer comparison",
Alexei Starovoitov725f9dc2015-04-15 16:19:33 -0700967 .result = REJECT,
968 },
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -0700969 {
John Fastabend41bc94f2017-08-15 22:33:56 -0700970 "invalid access __sk_buff family",
971 .insns = {
972 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
973 offsetof(struct __sk_buff, family)),
974 BPF_EXIT_INSN(),
975 },
976 .errstr = "invalid bpf_context access",
977 .result = REJECT,
978 },
979 {
980 "invalid access __sk_buff remote_ip4",
981 .insns = {
982 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
983 offsetof(struct __sk_buff, remote_ip4)),
984 BPF_EXIT_INSN(),
985 },
986 .errstr = "invalid bpf_context access",
987 .result = REJECT,
988 },
989 {
990 "invalid access __sk_buff local_ip4",
991 .insns = {
992 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
993 offsetof(struct __sk_buff, local_ip4)),
994 BPF_EXIT_INSN(),
995 },
996 .errstr = "invalid bpf_context access",
997 .result = REJECT,
998 },
999 {
1000 "invalid access __sk_buff remote_ip6",
1001 .insns = {
1002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1003 offsetof(struct __sk_buff, remote_ip6)),
1004 BPF_EXIT_INSN(),
1005 },
1006 .errstr = "invalid bpf_context access",
1007 .result = REJECT,
1008 },
1009 {
1010 "invalid access __sk_buff local_ip6",
1011 .insns = {
1012 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1013 offsetof(struct __sk_buff, local_ip6)),
1014 BPF_EXIT_INSN(),
1015 },
1016 .errstr = "invalid bpf_context access",
1017 .result = REJECT,
1018 },
1019 {
1020 "invalid access __sk_buff remote_port",
1021 .insns = {
1022 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1023 offsetof(struct __sk_buff, remote_port)),
1024 BPF_EXIT_INSN(),
1025 },
1026 .errstr = "invalid bpf_context access",
1027 .result = REJECT,
1028 },
1029 {
1030 "invalid access __sk_buff remote_port",
1031 .insns = {
1032 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1033 offsetof(struct __sk_buff, local_port)),
1034 BPF_EXIT_INSN(),
1035 },
1036 .errstr = "invalid bpf_context access",
1037 .result = REJECT,
1038 },
1039 {
1040 "valid access __sk_buff family",
1041 .insns = {
1042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1043 offsetof(struct __sk_buff, family)),
1044 BPF_EXIT_INSN(),
1045 },
1046 .result = ACCEPT,
1047 .prog_type = BPF_PROG_TYPE_SK_SKB,
1048 },
1049 {
1050 "valid access __sk_buff remote_ip4",
1051 .insns = {
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, remote_ip4)),
1054 BPF_EXIT_INSN(),
1055 },
1056 .result = ACCEPT,
1057 .prog_type = BPF_PROG_TYPE_SK_SKB,
1058 },
1059 {
1060 "valid access __sk_buff local_ip4",
1061 .insns = {
1062 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1063 offsetof(struct __sk_buff, local_ip4)),
1064 BPF_EXIT_INSN(),
1065 },
1066 .result = ACCEPT,
1067 .prog_type = BPF_PROG_TYPE_SK_SKB,
1068 },
1069 {
1070 "valid access __sk_buff remote_ip6",
1071 .insns = {
1072 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1073 offsetof(struct __sk_buff, remote_ip6[0])),
1074 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1075 offsetof(struct __sk_buff, remote_ip6[1])),
1076 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1077 offsetof(struct __sk_buff, remote_ip6[2])),
1078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1079 offsetof(struct __sk_buff, remote_ip6[3])),
1080 BPF_EXIT_INSN(),
1081 },
1082 .result = ACCEPT,
1083 .prog_type = BPF_PROG_TYPE_SK_SKB,
1084 },
1085 {
1086 "valid access __sk_buff local_ip6",
1087 .insns = {
1088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1089 offsetof(struct __sk_buff, local_ip6[0])),
1090 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1091 offsetof(struct __sk_buff, local_ip6[1])),
1092 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1093 offsetof(struct __sk_buff, local_ip6[2])),
1094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1095 offsetof(struct __sk_buff, local_ip6[3])),
1096 BPF_EXIT_INSN(),
1097 },
1098 .result = ACCEPT,
1099 .prog_type = BPF_PROG_TYPE_SK_SKB,
1100 },
1101 {
1102 "valid access __sk_buff remote_port",
1103 .insns = {
1104 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1105 offsetof(struct __sk_buff, remote_port)),
1106 BPF_EXIT_INSN(),
1107 },
1108 .result = ACCEPT,
1109 .prog_type = BPF_PROG_TYPE_SK_SKB,
1110 },
1111 {
1112 "valid access __sk_buff remote_port",
1113 .insns = {
1114 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1115 offsetof(struct __sk_buff, local_port)),
1116 BPF_EXIT_INSN(),
1117 },
1118 .result = ACCEPT,
1119 .prog_type = BPF_PROG_TYPE_SK_SKB,
1120 },
1121 {
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001122 "check skb->mark is not writeable by sockets",
1123 .insns = {
1124 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1125 offsetof(struct __sk_buff, mark)),
1126 BPF_EXIT_INSN(),
1127 },
1128 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001129 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001130 .result = REJECT,
1131 },
1132 {
1133 "check skb->tc_index is not writeable by sockets",
1134 .insns = {
1135 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1136 offsetof(struct __sk_buff, tc_index)),
1137 BPF_EXIT_INSN(),
1138 },
1139 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001140 .errstr_unpriv = "R1 leaks addr",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001141 .result = REJECT,
1142 },
1143 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001144 "check cb access: byte",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001145 .insns = {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001146 BPF_MOV64_IMM(BPF_REG_0, 0),
1147 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1148 offsetof(struct __sk_buff, cb[0])),
1149 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1150 offsetof(struct __sk_buff, cb[0]) + 1),
1151 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1152 offsetof(struct __sk_buff, cb[0]) + 2),
1153 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1154 offsetof(struct __sk_buff, cb[0]) + 3),
1155 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1156 offsetof(struct __sk_buff, cb[1])),
1157 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1158 offsetof(struct __sk_buff, cb[1]) + 1),
1159 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1160 offsetof(struct __sk_buff, cb[1]) + 2),
1161 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1162 offsetof(struct __sk_buff, cb[1]) + 3),
1163 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1164 offsetof(struct __sk_buff, cb[2])),
1165 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1166 offsetof(struct __sk_buff, cb[2]) + 1),
1167 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1168 offsetof(struct __sk_buff, cb[2]) + 2),
1169 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1170 offsetof(struct __sk_buff, cb[2]) + 3),
1171 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1172 offsetof(struct __sk_buff, cb[3])),
1173 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1174 offsetof(struct __sk_buff, cb[3]) + 1),
1175 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1176 offsetof(struct __sk_buff, cb[3]) + 2),
1177 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1178 offsetof(struct __sk_buff, cb[3]) + 3),
1179 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1180 offsetof(struct __sk_buff, cb[4])),
1181 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1182 offsetof(struct __sk_buff, cb[4]) + 1),
1183 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1184 offsetof(struct __sk_buff, cb[4]) + 2),
1185 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1186 offsetof(struct __sk_buff, cb[4]) + 3),
1187 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1188 offsetof(struct __sk_buff, cb[0])),
1189 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1190 offsetof(struct __sk_buff, cb[0]) + 1),
1191 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1192 offsetof(struct __sk_buff, cb[0]) + 2),
1193 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1194 offsetof(struct __sk_buff, cb[0]) + 3),
1195 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1196 offsetof(struct __sk_buff, cb[1])),
1197 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1198 offsetof(struct __sk_buff, cb[1]) + 1),
1199 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1200 offsetof(struct __sk_buff, cb[1]) + 2),
1201 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1202 offsetof(struct __sk_buff, cb[1]) + 3),
1203 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1204 offsetof(struct __sk_buff, cb[2])),
1205 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1206 offsetof(struct __sk_buff, cb[2]) + 1),
1207 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1208 offsetof(struct __sk_buff, cb[2]) + 2),
1209 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1210 offsetof(struct __sk_buff, cb[2]) + 3),
1211 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1212 offsetof(struct __sk_buff, cb[3])),
1213 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1214 offsetof(struct __sk_buff, cb[3]) + 1),
1215 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1216 offsetof(struct __sk_buff, cb[3]) + 2),
1217 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1218 offsetof(struct __sk_buff, cb[3]) + 3),
1219 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1220 offsetof(struct __sk_buff, cb[4])),
1221 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1222 offsetof(struct __sk_buff, cb[4]) + 1),
1223 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1224 offsetof(struct __sk_buff, cb[4]) + 2),
1225 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1226 offsetof(struct __sk_buff, cb[4]) + 3),
1227 BPF_EXIT_INSN(),
1228 },
1229 .result = ACCEPT,
1230 },
1231 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001232 "__sk_buff->hash, offset 0, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001233 .insns = {
1234 BPF_MOV64_IMM(BPF_REG_0, 0),
1235 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001236 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001237 BPF_EXIT_INSN(),
1238 },
1239 .errstr = "invalid bpf_context access",
1240 .result = REJECT,
1241 },
1242 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001243 "__sk_buff->tc_index, offset 3, byte store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001244 .insns = {
1245 BPF_MOV64_IMM(BPF_REG_0, 0),
1246 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001247 offsetof(struct __sk_buff, tc_index) + 3),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001248 BPF_EXIT_INSN(),
1249 },
1250 .errstr = "invalid bpf_context access",
1251 .result = REJECT,
1252 },
1253 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001254 "check skb->hash byte load permitted",
1255 .insns = {
1256 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001257#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001258 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1259 offsetof(struct __sk_buff, hash)),
1260#else
1261 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1262 offsetof(struct __sk_buff, hash) + 3),
1263#endif
1264 BPF_EXIT_INSN(),
1265 },
1266 .result = ACCEPT,
1267 },
1268 {
1269 "check skb->hash byte load not permitted 1",
1270 .insns = {
1271 BPF_MOV64_IMM(BPF_REG_0, 0),
1272 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1273 offsetof(struct __sk_buff, hash) + 1),
1274 BPF_EXIT_INSN(),
1275 },
1276 .errstr = "invalid bpf_context access",
1277 .result = REJECT,
1278 },
1279 {
1280 "check skb->hash byte load not permitted 2",
1281 .insns = {
1282 BPF_MOV64_IMM(BPF_REG_0, 0),
1283 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1284 offsetof(struct __sk_buff, hash) + 2),
1285 BPF_EXIT_INSN(),
1286 },
1287 .errstr = "invalid bpf_context access",
1288 .result = REJECT,
1289 },
1290 {
1291 "check skb->hash byte load not permitted 3",
1292 .insns = {
1293 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001294#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001295 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1296 offsetof(struct __sk_buff, hash) + 3),
1297#else
1298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1299 offsetof(struct __sk_buff, hash)),
1300#endif
1301 BPF_EXIT_INSN(),
1302 },
1303 .errstr = "invalid bpf_context access",
1304 .result = REJECT,
1305 },
1306 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001307 "check cb access: byte, wrong type",
1308 .insns = {
1309 BPF_MOV64_IMM(BPF_REG_0, 0),
1310 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001311 offsetof(struct __sk_buff, cb[0])),
1312 BPF_EXIT_INSN(),
1313 },
1314 .errstr = "invalid bpf_context access",
1315 .result = REJECT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001316 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1317 },
1318 {
1319 "check cb access: half",
1320 .insns = {
1321 BPF_MOV64_IMM(BPF_REG_0, 0),
1322 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1323 offsetof(struct __sk_buff, cb[0])),
1324 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1325 offsetof(struct __sk_buff, cb[0]) + 2),
1326 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1327 offsetof(struct __sk_buff, cb[1])),
1328 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1329 offsetof(struct __sk_buff, cb[1]) + 2),
1330 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1331 offsetof(struct __sk_buff, cb[2])),
1332 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1333 offsetof(struct __sk_buff, cb[2]) + 2),
1334 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1335 offsetof(struct __sk_buff, cb[3])),
1336 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1337 offsetof(struct __sk_buff, cb[3]) + 2),
1338 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1339 offsetof(struct __sk_buff, cb[4])),
1340 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1341 offsetof(struct __sk_buff, cb[4]) + 2),
1342 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1343 offsetof(struct __sk_buff, cb[0])),
1344 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1345 offsetof(struct __sk_buff, cb[0]) + 2),
1346 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1347 offsetof(struct __sk_buff, cb[1])),
1348 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1349 offsetof(struct __sk_buff, cb[1]) + 2),
1350 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1351 offsetof(struct __sk_buff, cb[2])),
1352 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1353 offsetof(struct __sk_buff, cb[2]) + 2),
1354 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1355 offsetof(struct __sk_buff, cb[3])),
1356 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1357 offsetof(struct __sk_buff, cb[3]) + 2),
1358 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1359 offsetof(struct __sk_buff, cb[4])),
1360 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1361 offsetof(struct __sk_buff, cb[4]) + 2),
1362 BPF_EXIT_INSN(),
1363 },
1364 .result = ACCEPT,
1365 },
1366 {
1367 "check cb access: half, unaligned",
1368 .insns = {
1369 BPF_MOV64_IMM(BPF_REG_0, 0),
1370 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1371 offsetof(struct __sk_buff, cb[0]) + 1),
1372 BPF_EXIT_INSN(),
1373 },
Edward Creef65b1842017-08-07 15:27:12 +01001374 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001375 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001376 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001377 },
1378 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001379 "check __sk_buff->hash, offset 0, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001380 .insns = {
1381 BPF_MOV64_IMM(BPF_REG_0, 0),
1382 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001383 offsetof(struct __sk_buff, hash)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001384 BPF_EXIT_INSN(),
1385 },
1386 .errstr = "invalid bpf_context access",
1387 .result = REJECT,
1388 },
1389 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001390 "check __sk_buff->tc_index, offset 2, half store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001391 .insns = {
1392 BPF_MOV64_IMM(BPF_REG_0, 0),
1393 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
Yonghong Song31fd8582017-06-13 15:52:13 -07001394 offsetof(struct __sk_buff, tc_index) + 2),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001395 BPF_EXIT_INSN(),
1396 },
1397 .errstr = "invalid bpf_context access",
1398 .result = REJECT,
1399 },
1400 {
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001401 "check skb->hash half load permitted",
1402 .insns = {
1403 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001404#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001405 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1406 offsetof(struct __sk_buff, hash)),
1407#else
1408 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1409 offsetof(struct __sk_buff, hash) + 2),
1410#endif
1411 BPF_EXIT_INSN(),
1412 },
1413 .result = ACCEPT,
1414 },
1415 {
1416 "check skb->hash half load not permitted",
1417 .insns = {
1418 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02001419#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07001420 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1421 offsetof(struct __sk_buff, hash) + 2),
1422#else
1423 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1424 offsetof(struct __sk_buff, hash)),
1425#endif
1426 BPF_EXIT_INSN(),
1427 },
1428 .errstr = "invalid bpf_context access",
1429 .result = REJECT,
1430 },
1431 {
Daniel Borkmann62c79892017-01-12 11:51:33 +01001432 "check cb access: half, wrong type",
1433 .insns = {
1434 BPF_MOV64_IMM(BPF_REG_0, 0),
1435 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1436 offsetof(struct __sk_buff, cb[0])),
1437 BPF_EXIT_INSN(),
1438 },
1439 .errstr = "invalid bpf_context access",
1440 .result = REJECT,
1441 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1442 },
1443 {
1444 "check cb access: word",
1445 .insns = {
1446 BPF_MOV64_IMM(BPF_REG_0, 0),
1447 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1448 offsetof(struct __sk_buff, cb[0])),
1449 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1450 offsetof(struct __sk_buff, cb[1])),
1451 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1452 offsetof(struct __sk_buff, cb[2])),
1453 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1454 offsetof(struct __sk_buff, cb[3])),
1455 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1456 offsetof(struct __sk_buff, cb[4])),
1457 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1458 offsetof(struct __sk_buff, cb[0])),
1459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, cb[1])),
1461 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1462 offsetof(struct __sk_buff, cb[2])),
1463 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 offsetof(struct __sk_buff, cb[3])),
1465 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1466 offsetof(struct __sk_buff, cb[4])),
1467 BPF_EXIT_INSN(),
1468 },
1469 .result = ACCEPT,
1470 },
1471 {
1472 "check cb access: word, unaligned 1",
1473 .insns = {
1474 BPF_MOV64_IMM(BPF_REG_0, 0),
1475 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1476 offsetof(struct __sk_buff, cb[0]) + 2),
1477 BPF_EXIT_INSN(),
1478 },
Edward Creef65b1842017-08-07 15:27:12 +01001479 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001480 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001481 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001482 },
1483 {
1484 "check cb access: word, unaligned 2",
1485 .insns = {
1486 BPF_MOV64_IMM(BPF_REG_0, 0),
1487 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1488 offsetof(struct __sk_buff, cb[4]) + 1),
1489 BPF_EXIT_INSN(),
1490 },
Edward Creef65b1842017-08-07 15:27:12 +01001491 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001492 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001493 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001494 },
1495 {
1496 "check cb access: word, unaligned 3",
1497 .insns = {
1498 BPF_MOV64_IMM(BPF_REG_0, 0),
1499 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1500 offsetof(struct __sk_buff, cb[4]) + 2),
1501 BPF_EXIT_INSN(),
1502 },
Edward Creef65b1842017-08-07 15:27:12 +01001503 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001504 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001505 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001506 },
1507 {
1508 "check cb access: word, unaligned 4",
1509 .insns = {
1510 BPF_MOV64_IMM(BPF_REG_0, 0),
1511 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1512 offsetof(struct __sk_buff, cb[4]) + 3),
1513 BPF_EXIT_INSN(),
1514 },
Edward Creef65b1842017-08-07 15:27:12 +01001515 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001516 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001517 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001518 },
1519 {
1520 "check cb access: double",
1521 .insns = {
1522 BPF_MOV64_IMM(BPF_REG_0, 0),
1523 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1524 offsetof(struct __sk_buff, cb[0])),
1525 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1526 offsetof(struct __sk_buff, cb[2])),
1527 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1528 offsetof(struct __sk_buff, cb[0])),
1529 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1530 offsetof(struct __sk_buff, cb[2])),
1531 BPF_EXIT_INSN(),
1532 },
1533 .result = ACCEPT,
1534 },
1535 {
1536 "check cb access: double, unaligned 1",
1537 .insns = {
1538 BPF_MOV64_IMM(BPF_REG_0, 0),
1539 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1540 offsetof(struct __sk_buff, cb[1])),
1541 BPF_EXIT_INSN(),
1542 },
Edward Creef65b1842017-08-07 15:27:12 +01001543 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001544 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001545 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001546 },
1547 {
1548 "check cb access: double, unaligned 2",
1549 .insns = {
1550 BPF_MOV64_IMM(BPF_REG_0, 0),
1551 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1552 offsetof(struct __sk_buff, cb[3])),
1553 BPF_EXIT_INSN(),
1554 },
Edward Creef65b1842017-08-07 15:27:12 +01001555 .errstr = "misaligned context access",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001556 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001557 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmann62c79892017-01-12 11:51:33 +01001558 },
1559 {
1560 "check cb access: double, oob 1",
1561 .insns = {
1562 BPF_MOV64_IMM(BPF_REG_0, 0),
1563 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1564 offsetof(struct __sk_buff, cb[4])),
1565 BPF_EXIT_INSN(),
1566 },
1567 .errstr = "invalid bpf_context access",
1568 .result = REJECT,
1569 },
1570 {
1571 "check cb access: double, oob 2",
1572 .insns = {
1573 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001574 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1575 offsetof(struct __sk_buff, cb[4])),
1576 BPF_EXIT_INSN(),
1577 },
1578 .errstr = "invalid bpf_context access",
1579 .result = REJECT,
1580 },
1581 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001582 "check __sk_buff->ifindex dw store not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001583 .insns = {
1584 BPF_MOV64_IMM(BPF_REG_0, 0),
Yonghong Song31fd8582017-06-13 15:52:13 -07001585 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1586 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001587 BPF_EXIT_INSN(),
1588 },
1589 .errstr = "invalid bpf_context access",
1590 .result = REJECT,
1591 },
1592 {
Yonghong Song31fd8582017-06-13 15:52:13 -07001593 "check __sk_buff->ifindex dw load not permitted",
Daniel Borkmann62c79892017-01-12 11:51:33 +01001594 .insns = {
1595 BPF_MOV64_IMM(BPF_REG_0, 0),
1596 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
Yonghong Song31fd8582017-06-13 15:52:13 -07001597 offsetof(struct __sk_buff, ifindex)),
Daniel Borkmann62c79892017-01-12 11:51:33 +01001598 BPF_EXIT_INSN(),
1599 },
1600 .errstr = "invalid bpf_context access",
1601 .result = REJECT,
1602 },
1603 {
1604 "check cb access: double, wrong type",
1605 .insns = {
1606 BPF_MOV64_IMM(BPF_REG_0, 0),
1607 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1608 offsetof(struct __sk_buff, cb[0])),
1609 BPF_EXIT_INSN(),
1610 },
1611 .errstr = "invalid bpf_context access",
1612 .result = REJECT,
1613 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001614 },
1615 {
1616 "check out of range skb->cb access",
1617 .insns = {
1618 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001619 offsetof(struct __sk_buff, cb[0]) + 256),
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001620 BPF_EXIT_INSN(),
1621 },
1622 .errstr = "invalid bpf_context access",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001623 .errstr_unpriv = "",
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001624 .result = REJECT,
1625 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1626 },
1627 {
1628 "write skb fields from socket prog",
1629 .insns = {
1630 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1631 offsetof(struct __sk_buff, cb[4])),
1632 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1633 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1634 offsetof(struct __sk_buff, mark)),
1635 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1636 offsetof(struct __sk_buff, tc_index)),
1637 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1638 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1639 offsetof(struct __sk_buff, cb[0])),
1640 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1641 offsetof(struct __sk_buff, cb[2])),
1642 BPF_EXIT_INSN(),
1643 },
1644 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001645 .errstr_unpriv = "R1 leaks addr",
1646 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001647 },
1648 {
1649 "write skb fields from tc_cls_act prog",
1650 .insns = {
1651 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1652 offsetof(struct __sk_buff, cb[0])),
1653 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1654 offsetof(struct __sk_buff, mark)),
1655 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1656 offsetof(struct __sk_buff, tc_index)),
1657 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1658 offsetof(struct __sk_buff, tc_index)),
1659 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1660 offsetof(struct __sk_buff, cb[3])),
1661 BPF_EXIT_INSN(),
1662 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001663 .errstr_unpriv = "",
1664 .result_unpriv = REJECT,
Alexei Starovoitovd691f9e2015-06-04 10:11:54 -07001665 .result = ACCEPT,
1666 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1667 },
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001668 {
1669 "PTR_TO_STACK store/load",
1670 .insns = {
1671 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1673 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1674 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1675 BPF_EXIT_INSN(),
1676 },
1677 .result = ACCEPT,
1678 },
1679 {
1680 "PTR_TO_STACK store/load - bad alignment on off",
1681 .insns = {
1682 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1684 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1686 BPF_EXIT_INSN(),
1687 },
1688 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001689 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1690 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001691 },
1692 {
1693 "PTR_TO_STACK store/load - bad alignment on reg",
1694 .insns = {
1695 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1697 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1698 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1699 BPF_EXIT_INSN(),
1700 },
1701 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001702 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1703 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
Alex Gartrell24b4d2a2015-07-23 14:24:40 -07001704 },
1705 {
1706 "PTR_TO_STACK store/load - out of bounds low",
1707 .insns = {
1708 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1709 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1710 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1711 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1712 BPF_EXIT_INSN(),
1713 },
1714 .result = REJECT,
1715 .errstr = "invalid stack off=-79992 size=8",
1716 },
1717 {
1718 "PTR_TO_STACK store/load - out of bounds high",
1719 .insns = {
1720 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1722 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1723 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1724 BPF_EXIT_INSN(),
1725 },
1726 .result = REJECT,
1727 .errstr = "invalid stack off=0 size=8",
1728 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001729 {
1730 "unpriv: return pointer",
1731 .insns = {
1732 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1733 BPF_EXIT_INSN(),
1734 },
1735 .result = ACCEPT,
1736 .result_unpriv = REJECT,
1737 .errstr_unpriv = "R0 leaks addr",
1738 },
1739 {
1740 "unpriv: add const to pointer",
1741 .insns = {
1742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1743 BPF_MOV64_IMM(BPF_REG_0, 0),
1744 BPF_EXIT_INSN(),
1745 },
1746 .result = ACCEPT,
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001747 },
1748 {
1749 "unpriv: add pointer to pointer",
1750 .insns = {
1751 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1752 BPF_MOV64_IMM(BPF_REG_0, 0),
1753 BPF_EXIT_INSN(),
1754 },
1755 .result = ACCEPT,
1756 .result_unpriv = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01001757 .errstr_unpriv = "R1 pointer += pointer",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001758 },
1759 {
1760 "unpriv: neg pointer",
1761 .insns = {
1762 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1763 BPF_MOV64_IMM(BPF_REG_0, 0),
1764 BPF_EXIT_INSN(),
1765 },
1766 .result = ACCEPT,
1767 .result_unpriv = REJECT,
1768 .errstr_unpriv = "R1 pointer arithmetic",
1769 },
1770 {
1771 "unpriv: cmp pointer with const",
1772 .insns = {
1773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1774 BPF_MOV64_IMM(BPF_REG_0, 0),
1775 BPF_EXIT_INSN(),
1776 },
1777 .result = ACCEPT,
1778 .result_unpriv = REJECT,
1779 .errstr_unpriv = "R1 pointer comparison",
1780 },
1781 {
1782 "unpriv: cmp pointer with pointer",
1783 .insns = {
1784 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1785 BPF_MOV64_IMM(BPF_REG_0, 0),
1786 BPF_EXIT_INSN(),
1787 },
1788 .result = ACCEPT,
1789 .result_unpriv = REJECT,
1790 .errstr_unpriv = "R10 pointer comparison",
1791 },
1792 {
1793 "unpriv: check that printk is disallowed",
1794 .insns = {
1795 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1798 BPF_MOV64_IMM(BPF_REG_2, 8),
1799 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001800 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1801 BPF_FUNC_trace_printk),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001802 BPF_MOV64_IMM(BPF_REG_0, 0),
1803 BPF_EXIT_INSN(),
1804 },
Daniel Borkmann0eb69842016-12-15 01:39:10 +01001805 .errstr_unpriv = "unknown func bpf_trace_printk#6",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001806 .result_unpriv = REJECT,
1807 .result = ACCEPT,
1808 },
1809 {
1810 "unpriv: pass pointer to helper function",
1811 .insns = {
1812 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1813 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1815 BPF_LD_MAP_FD(BPF_REG_1, 0),
1816 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1817 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001818 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1819 BPF_FUNC_map_update_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001820 BPF_MOV64_IMM(BPF_REG_0, 0),
1821 BPF_EXIT_INSN(),
1822 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001823 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001824 .errstr_unpriv = "R4 leaks addr",
1825 .result_unpriv = REJECT,
1826 .result = ACCEPT,
1827 },
1828 {
1829 "unpriv: indirectly pass pointer on stack to helper function",
1830 .insns = {
1831 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1832 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1834 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001835 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1836 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001837 BPF_MOV64_IMM(BPF_REG_0, 0),
1838 BPF_EXIT_INSN(),
1839 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001840 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001841 .errstr = "invalid indirect read from stack off -8+0 size 8",
1842 .result = REJECT,
1843 },
1844 {
1845 "unpriv: mangle pointer on stack 1",
1846 .insns = {
1847 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1848 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1849 BPF_MOV64_IMM(BPF_REG_0, 0),
1850 BPF_EXIT_INSN(),
1851 },
1852 .errstr_unpriv = "attempt to corrupt spilled",
1853 .result_unpriv = REJECT,
1854 .result = ACCEPT,
1855 },
1856 {
1857 "unpriv: mangle pointer on stack 2",
1858 .insns = {
1859 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1860 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1861 BPF_MOV64_IMM(BPF_REG_0, 0),
1862 BPF_EXIT_INSN(),
1863 },
1864 .errstr_unpriv = "attempt to corrupt spilled",
1865 .result_unpriv = REJECT,
1866 .result = ACCEPT,
1867 },
1868 {
1869 "unpriv: read pointer from stack in small chunks",
1870 .insns = {
1871 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1873 BPF_MOV64_IMM(BPF_REG_0, 0),
1874 BPF_EXIT_INSN(),
1875 },
1876 .errstr = "invalid size",
1877 .result = REJECT,
1878 },
1879 {
1880 "unpriv: write pointer into ctx",
1881 .insns = {
1882 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1883 BPF_MOV64_IMM(BPF_REG_0, 0),
1884 BPF_EXIT_INSN(),
1885 },
1886 .errstr_unpriv = "R1 leaks addr",
1887 .result_unpriv = REJECT,
1888 .errstr = "invalid bpf_context access",
1889 .result = REJECT,
1890 },
1891 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001892 "unpriv: spill/fill of ctx",
1893 .insns = {
1894 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1896 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1897 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1898 BPF_MOV64_IMM(BPF_REG_0, 0),
1899 BPF_EXIT_INSN(),
1900 },
1901 .result = ACCEPT,
1902 },
1903 {
1904 "unpriv: spill/fill of ctx 2",
1905 .insns = {
1906 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1908 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1909 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1911 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001912 BPF_EXIT_INSN(),
1913 },
1914 .result = ACCEPT,
1915 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1916 },
1917 {
1918 "unpriv: spill/fill of ctx 3",
1919 .insns = {
1920 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1922 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1923 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1924 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1926 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001927 BPF_EXIT_INSN(),
1928 },
1929 .result = REJECT,
1930 .errstr = "R1 type=fp expected=ctx",
1931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1932 },
1933 {
1934 "unpriv: spill/fill of ctx 4",
1935 .insns = {
1936 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1937 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1938 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1939 BPF_MOV64_IMM(BPF_REG_0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001940 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1941 BPF_REG_0, -8, 0),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001942 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02001943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1944 BPF_FUNC_get_hash_recalc),
Daniel Borkmann1a776b92016-10-17 14:28:35 +02001945 BPF_EXIT_INSN(),
1946 },
1947 .result = REJECT,
1948 .errstr = "R1 type=inv expected=ctx",
1949 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1950 },
1951 {
1952 "unpriv: spill/fill of different pointers stx",
1953 .insns = {
1954 BPF_MOV64_IMM(BPF_REG_3, 42),
1955 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1956 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1957 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1958 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1960 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1961 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1962 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1963 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1964 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1965 offsetof(struct __sk_buff, mark)),
1966 BPF_MOV64_IMM(BPF_REG_0, 0),
1967 BPF_EXIT_INSN(),
1968 },
1969 .result = REJECT,
1970 .errstr = "same insn cannot be used with different pointers",
1971 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1972 },
1973 {
1974 "unpriv: spill/fill of different pointers ldx",
1975 .insns = {
1976 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1979 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1981 -(__s32)offsetof(struct bpf_perf_event_data,
1982 sample_period) - 8),
1983 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1984 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1985 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1986 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1987 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1988 offsetof(struct bpf_perf_event_data,
1989 sample_period)),
1990 BPF_MOV64_IMM(BPF_REG_0, 0),
1991 BPF_EXIT_INSN(),
1992 },
1993 .result = REJECT,
1994 .errstr = "same insn cannot be used with different pointers",
1995 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1996 },
1997 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07001998 "unpriv: write pointer into map elem value",
1999 .insns = {
2000 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2001 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2003 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2005 BPF_FUNC_map_lookup_elem),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002006 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2007 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2008 BPF_EXIT_INSN(),
2009 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002010 .fixup_map1 = { 3 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002011 .errstr_unpriv = "R0 leaks addr",
2012 .result_unpriv = REJECT,
2013 .result = ACCEPT,
2014 },
2015 {
2016 "unpriv: partial copy of pointer",
2017 .insns = {
2018 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2019 BPF_MOV64_IMM(BPF_REG_0, 0),
2020 BPF_EXIT_INSN(),
2021 },
2022 .errstr_unpriv = "R10 partial copy",
2023 .result_unpriv = REJECT,
2024 .result = ACCEPT,
2025 },
2026 {
2027 "unpriv: pass pointer to tail_call",
2028 .insns = {
2029 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2030 BPF_LD_MAP_FD(BPF_REG_2, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2032 BPF_FUNC_tail_call),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002033 BPF_MOV64_IMM(BPF_REG_0, 0),
2034 BPF_EXIT_INSN(),
2035 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002036 .fixup_prog = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002037 .errstr_unpriv = "R3 leaks addr into helper",
2038 .result_unpriv = REJECT,
2039 .result = ACCEPT,
2040 },
2041 {
2042 "unpriv: cmp map pointer with zero",
2043 .insns = {
2044 BPF_MOV64_IMM(BPF_REG_1, 0),
2045 BPF_LD_MAP_FD(BPF_REG_1, 0),
2046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2047 BPF_MOV64_IMM(BPF_REG_0, 0),
2048 BPF_EXIT_INSN(),
2049 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002050 .fixup_map1 = { 1 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002051 .errstr_unpriv = "R1 pointer comparison",
2052 .result_unpriv = REJECT,
2053 .result = ACCEPT,
2054 },
2055 {
2056 "unpriv: write into frame pointer",
2057 .insns = {
2058 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2059 BPF_MOV64_IMM(BPF_REG_0, 0),
2060 BPF_EXIT_INSN(),
2061 },
2062 .errstr = "frame pointer is read only",
2063 .result = REJECT,
2064 },
2065 {
Daniel Borkmann1a776b92016-10-17 14:28:35 +02002066 "unpriv: spill/fill frame pointer",
2067 .insns = {
2068 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2070 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2071 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2072 BPF_MOV64_IMM(BPF_REG_0, 0),
2073 BPF_EXIT_INSN(),
2074 },
2075 .errstr = "frame pointer is read only",
2076 .result = REJECT,
2077 },
2078 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002079 "unpriv: cmp of frame pointer",
2080 .insns = {
2081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2082 BPF_MOV64_IMM(BPF_REG_0, 0),
2083 BPF_EXIT_INSN(),
2084 },
2085 .errstr_unpriv = "R10 pointer comparison",
2086 .result_unpriv = REJECT,
2087 .result = ACCEPT,
2088 },
2089 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002090 "unpriv: adding of fp",
2091 .insns = {
2092 BPF_MOV64_IMM(BPF_REG_0, 0),
2093 BPF_MOV64_IMM(BPF_REG_1, 0),
2094 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2095 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2096 BPF_EXIT_INSN(),
2097 },
Edward Creef65b1842017-08-07 15:27:12 +01002098 .result = ACCEPT,
Daniel Borkmann728a8532017-04-27 01:39:32 +02002099 },
2100 {
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002101 "unpriv: cmp of stack pointer",
2102 .insns = {
2103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2105 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2106 BPF_MOV64_IMM(BPF_REG_0, 0),
2107 BPF_EXIT_INSN(),
2108 },
2109 .errstr_unpriv = "R2 pointer comparison",
2110 .result_unpriv = REJECT,
2111 .result = ACCEPT,
2112 },
2113 {
Yonghong Song332270f2017-04-29 22:52:42 -07002114 "stack pointer arithmetic",
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002115 .insns = {
Yonghong Song332270f2017-04-29 22:52:42 -07002116 BPF_MOV64_IMM(BPF_REG_1, 4),
2117 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2118 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2121 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2122 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2123 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2124 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2126 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002127 BPF_MOV64_IMM(BPF_REG_0, 0),
2128 BPF_EXIT_INSN(),
2129 },
Alexei Starovoitovbf508872015-10-07 22:23:23 -07002130 .result = ACCEPT,
2131 },
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002132 {
2133 "raw_stack: no skb_load_bytes",
2134 .insns = {
2135 BPF_MOV64_IMM(BPF_REG_2, 4),
2136 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2138 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2139 BPF_MOV64_IMM(BPF_REG_4, 8),
2140 /* Call to skb_load_bytes() omitted. */
2141 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2142 BPF_EXIT_INSN(),
2143 },
2144 .result = REJECT,
2145 .errstr = "invalid read from stack off -8+0 size 8",
2146 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2147 },
2148 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002149 "raw_stack: skb_load_bytes, negative len",
2150 .insns = {
2151 BPF_MOV64_IMM(BPF_REG_2, 4),
2152 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2154 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2155 BPF_MOV64_IMM(BPF_REG_4, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002156 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2157 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002158 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2159 BPF_EXIT_INSN(),
2160 },
2161 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002162 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2164 },
2165 {
2166 "raw_stack: skb_load_bytes, negative len 2",
2167 .insns = {
2168 BPF_MOV64_IMM(BPF_REG_2, 4),
2169 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2171 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2172 BPF_MOV64_IMM(BPF_REG_4, ~0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2174 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002175 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2176 BPF_EXIT_INSN(),
2177 },
2178 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002179 .errstr = "R4 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002180 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2181 },
2182 {
2183 "raw_stack: skb_load_bytes, zero len",
2184 .insns = {
2185 BPF_MOV64_IMM(BPF_REG_2, 4),
2186 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2188 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2189 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002190 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2191 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002192 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2193 BPF_EXIT_INSN(),
2194 },
2195 .result = REJECT,
2196 .errstr = "invalid stack type R3",
2197 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2198 },
2199 {
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002200 "raw_stack: skb_load_bytes, no init",
2201 .insns = {
2202 BPF_MOV64_IMM(BPF_REG_2, 4),
2203 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2205 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2206 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2208 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002209 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2210 BPF_EXIT_INSN(),
2211 },
2212 .result = ACCEPT,
2213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2214 },
2215 {
2216 "raw_stack: skb_load_bytes, init",
2217 .insns = {
2218 BPF_MOV64_IMM(BPF_REG_2, 4),
2219 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2221 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2222 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2223 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002224 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2225 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002226 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2227 BPF_EXIT_INSN(),
2228 },
2229 .result = ACCEPT,
2230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2231 },
2232 {
2233 "raw_stack: skb_load_bytes, spilled regs around bounds",
2234 .insns = {
2235 BPF_MOV64_IMM(BPF_REG_2, 4),
2236 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002238 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2239 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002240 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2241 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002242 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2243 BPF_FUNC_skb_load_bytes),
2244 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2245 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002246 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2247 offsetof(struct __sk_buff, mark)),
2248 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2249 offsetof(struct __sk_buff, priority)),
2250 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2251 BPF_EXIT_INSN(),
2252 },
2253 .result = ACCEPT,
2254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2255 },
2256 {
2257 "raw_stack: skb_load_bytes, spilled regs corruption",
2258 .insns = {
2259 BPF_MOV64_IMM(BPF_REG_2, 4),
2260 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002262 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002263 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2264 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002265 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2266 BPF_FUNC_skb_load_bytes),
2267 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002268 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2269 offsetof(struct __sk_buff, mark)),
2270 BPF_EXIT_INSN(),
2271 },
2272 .result = REJECT,
2273 .errstr = "R0 invalid mem access 'inv'",
2274 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2275 },
2276 {
2277 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2278 .insns = {
2279 BPF_MOV64_IMM(BPF_REG_2, 4),
2280 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002282 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2283 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2284 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002285 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2286 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002287 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2288 BPF_FUNC_skb_load_bytes),
2289 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2290 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2291 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002292 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2293 offsetof(struct __sk_buff, mark)),
2294 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2295 offsetof(struct __sk_buff, priority)),
2296 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2297 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2298 offsetof(struct __sk_buff, pkt_type)),
2299 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2300 BPF_EXIT_INSN(),
2301 },
2302 .result = REJECT,
2303 .errstr = "R3 invalid mem access 'inv'",
2304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2305 },
2306 {
2307 "raw_stack: skb_load_bytes, spilled regs + data",
2308 .insns = {
2309 BPF_MOV64_IMM(BPF_REG_2, 4),
2310 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002312 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2313 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2314 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002315 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2316 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2318 BPF_FUNC_skb_load_bytes),
2319 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2320 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2321 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002322 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2323 offsetof(struct __sk_buff, mark)),
2324 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2325 offsetof(struct __sk_buff, priority)),
2326 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2327 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2328 BPF_EXIT_INSN(),
2329 },
2330 .result = ACCEPT,
2331 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2332 },
2333 {
2334 "raw_stack: skb_load_bytes, invalid access 1",
2335 .insns = {
2336 BPF_MOV64_IMM(BPF_REG_2, 4),
2337 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2339 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2340 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002341 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2342 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002343 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2344 BPF_EXIT_INSN(),
2345 },
2346 .result = REJECT,
2347 .errstr = "invalid stack type R3 off=-513 access_size=8",
2348 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2349 },
2350 {
2351 "raw_stack: skb_load_bytes, invalid access 2",
2352 .insns = {
2353 BPF_MOV64_IMM(BPF_REG_2, 4),
2354 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2356 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2357 BPF_MOV64_IMM(BPF_REG_4, 8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002358 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2359 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002360 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2361 BPF_EXIT_INSN(),
2362 },
2363 .result = REJECT,
2364 .errstr = "invalid stack type R3 off=-1 access_size=8",
2365 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2366 },
2367 {
2368 "raw_stack: skb_load_bytes, invalid access 3",
2369 .insns = {
2370 BPF_MOV64_IMM(BPF_REG_2, 4),
2371 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2372 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2373 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2374 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2376 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2378 BPF_EXIT_INSN(),
2379 },
2380 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002381 .errstr = "R4 min value is negative",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002382 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2383 },
2384 {
2385 "raw_stack: skb_load_bytes, invalid access 4",
2386 .insns = {
2387 BPF_MOV64_IMM(BPF_REG_2, 4),
2388 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2389 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2390 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2391 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002392 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2393 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002394 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2395 BPF_EXIT_INSN(),
2396 },
2397 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002398 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002399 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2400 },
2401 {
2402 "raw_stack: skb_load_bytes, invalid access 5",
2403 .insns = {
2404 BPF_MOV64_IMM(BPF_REG_2, 4),
2405 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2407 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2408 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002409 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2410 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002411 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2412 BPF_EXIT_INSN(),
2413 },
2414 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002415 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002416 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2417 },
2418 {
2419 "raw_stack: skb_load_bytes, invalid access 6",
2420 .insns = {
2421 BPF_MOV64_IMM(BPF_REG_2, 4),
2422 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2424 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2425 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2427 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002428 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2429 BPF_EXIT_INSN(),
2430 },
2431 .result = REJECT,
2432 .errstr = "invalid stack type R3 off=-512 access_size=0",
2433 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2434 },
2435 {
2436 "raw_stack: skb_load_bytes, large access",
2437 .insns = {
2438 BPF_MOV64_IMM(BPF_REG_2, 4),
2439 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2441 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2442 BPF_MOV64_IMM(BPF_REG_4, 512),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02002443 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2444 BPF_FUNC_skb_load_bytes),
Daniel Borkmann3f2050e2016-04-13 00:10:54 +02002445 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2446 BPF_EXIT_INSN(),
2447 },
2448 .result = ACCEPT,
2449 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2450 },
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002451 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002452 "direct packet access: test1",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002453 .insns = {
2454 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2455 offsetof(struct __sk_buff, data)),
2456 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2457 offsetof(struct __sk_buff, data_end)),
2458 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2460 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 .result = ACCEPT,
2466 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2467 },
2468 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002469 "direct packet access: test2",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002470 .insns = {
2471 BPF_MOV64_IMM(BPF_REG_0, 1),
2472 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2473 offsetof(struct __sk_buff, data_end)),
2474 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2475 offsetof(struct __sk_buff, data)),
2476 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2478 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2479 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2480 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2481 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2482 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2483 offsetof(struct __sk_buff, data)),
2484 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2485 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
Edward Cree1f9ab382017-08-07 15:29:11 +01002486 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2487 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002488 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2489 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2491 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2492 offsetof(struct __sk_buff, data_end)),
2493 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2494 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2495 BPF_MOV64_IMM(BPF_REG_0, 0),
2496 BPF_EXIT_INSN(),
2497 },
2498 .result = ACCEPT,
2499 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2500 },
2501 {
Aaron Yue1633ac02016-08-11 18:17:17 -07002502 "direct packet access: test3",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002503 .insns = {
2504 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2505 offsetof(struct __sk_buff, data)),
2506 BPF_MOV64_IMM(BPF_REG_0, 0),
2507 BPF_EXIT_INSN(),
2508 },
2509 .errstr = "invalid bpf_context access off=76",
2510 .result = REJECT,
2511 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2512 },
2513 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002514 "direct packet access: test4 (write)",
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002515 .insns = {
2516 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2517 offsetof(struct __sk_buff, data)),
2518 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2519 offsetof(struct __sk_buff, data_end)),
2520 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2522 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2523 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2524 BPF_MOV64_IMM(BPF_REG_0, 0),
2525 BPF_EXIT_INSN(),
2526 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002527 .result = ACCEPT,
Alexei Starovoitov883e44e2016-05-05 19:49:15 -07002528 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2529 },
Aaron Yue1633ac02016-08-11 18:17:17 -07002530 {
Daniel Borkmann2d2be8c2016-09-08 01:03:42 +02002531 "direct packet access: test5 (pkt_end >= reg, good access)",
2532 .insns = {
2533 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2534 offsetof(struct __sk_buff, data)),
2535 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2536 offsetof(struct __sk_buff, data_end)),
2537 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2539 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2540 BPF_MOV64_IMM(BPF_REG_0, 1),
2541 BPF_EXIT_INSN(),
2542 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2543 BPF_MOV64_IMM(BPF_REG_0, 0),
2544 BPF_EXIT_INSN(),
2545 },
2546 .result = ACCEPT,
2547 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2548 },
2549 {
2550 "direct packet access: test6 (pkt_end >= reg, bad access)",
2551 .insns = {
2552 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2553 offsetof(struct __sk_buff, data)),
2554 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2555 offsetof(struct __sk_buff, data_end)),
2556 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2558 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2559 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2560 BPF_MOV64_IMM(BPF_REG_0, 1),
2561 BPF_EXIT_INSN(),
2562 BPF_MOV64_IMM(BPF_REG_0, 0),
2563 BPF_EXIT_INSN(),
2564 },
2565 .errstr = "invalid access to packet",
2566 .result = REJECT,
2567 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2568 },
2569 {
2570 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2571 .insns = {
2572 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2573 offsetof(struct __sk_buff, data)),
2574 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2575 offsetof(struct __sk_buff, data_end)),
2576 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2578 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2579 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2580 BPF_MOV64_IMM(BPF_REG_0, 1),
2581 BPF_EXIT_INSN(),
2582 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2583 BPF_MOV64_IMM(BPF_REG_0, 0),
2584 BPF_EXIT_INSN(),
2585 },
2586 .errstr = "invalid access to packet",
2587 .result = REJECT,
2588 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2589 },
2590 {
2591 "direct packet access: test8 (double test, variant 1)",
2592 .insns = {
2593 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2594 offsetof(struct __sk_buff, data)),
2595 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2596 offsetof(struct __sk_buff, data_end)),
2597 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2598 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2599 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2600 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2601 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2602 BPF_MOV64_IMM(BPF_REG_0, 1),
2603 BPF_EXIT_INSN(),
2604 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2605 BPF_MOV64_IMM(BPF_REG_0, 0),
2606 BPF_EXIT_INSN(),
2607 },
2608 .result = ACCEPT,
2609 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2610 },
2611 {
2612 "direct packet access: test9 (double test, variant 2)",
2613 .insns = {
2614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2615 offsetof(struct __sk_buff, data)),
2616 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2617 offsetof(struct __sk_buff, data_end)),
2618 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2620 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2621 BPF_MOV64_IMM(BPF_REG_0, 1),
2622 BPF_EXIT_INSN(),
2623 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2624 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2625 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2626 BPF_MOV64_IMM(BPF_REG_0, 0),
2627 BPF_EXIT_INSN(),
2628 },
2629 .result = ACCEPT,
2630 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2631 },
2632 {
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02002633 "direct packet access: test10 (write invalid)",
2634 .insns = {
2635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2636 offsetof(struct __sk_buff, data)),
2637 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2638 offsetof(struct __sk_buff, data_end)),
2639 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2641 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2642 BPF_MOV64_IMM(BPF_REG_0, 0),
2643 BPF_EXIT_INSN(),
2644 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2645 BPF_MOV64_IMM(BPF_REG_0, 0),
2646 BPF_EXIT_INSN(),
2647 },
2648 .errstr = "invalid access to packet",
2649 .result = REJECT,
2650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2651 },
2652 {
Daniel Borkmann3fadc802017-01-24 01:06:30 +01002653 "direct packet access: test11 (shift, good access)",
2654 .insns = {
2655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2656 offsetof(struct __sk_buff, data)),
2657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2658 offsetof(struct __sk_buff, data_end)),
2659 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2661 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2662 BPF_MOV64_IMM(BPF_REG_3, 144),
2663 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2665 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2666 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2667 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2668 BPF_MOV64_IMM(BPF_REG_0, 1),
2669 BPF_EXIT_INSN(),
2670 BPF_MOV64_IMM(BPF_REG_0, 0),
2671 BPF_EXIT_INSN(),
2672 },
2673 .result = ACCEPT,
2674 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2675 },
2676 {
2677 "direct packet access: test12 (and, good access)",
2678 .insns = {
2679 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2680 offsetof(struct __sk_buff, data)),
2681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2682 offsetof(struct __sk_buff, data_end)),
2683 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2685 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2686 BPF_MOV64_IMM(BPF_REG_3, 144),
2687 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2689 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2690 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2691 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2692 BPF_MOV64_IMM(BPF_REG_0, 1),
2693 BPF_EXIT_INSN(),
2694 BPF_MOV64_IMM(BPF_REG_0, 0),
2695 BPF_EXIT_INSN(),
2696 },
2697 .result = ACCEPT,
2698 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2699 },
2700 {
2701 "direct packet access: test13 (branches, good access)",
2702 .insns = {
2703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2704 offsetof(struct __sk_buff, data)),
2705 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2706 offsetof(struct __sk_buff, data_end)),
2707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2709 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2710 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2711 offsetof(struct __sk_buff, mark)),
2712 BPF_MOV64_IMM(BPF_REG_4, 1),
2713 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2714 BPF_MOV64_IMM(BPF_REG_3, 14),
2715 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2716 BPF_MOV64_IMM(BPF_REG_3, 24),
2717 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2719 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2720 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2721 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2722 BPF_MOV64_IMM(BPF_REG_0, 1),
2723 BPF_EXIT_INSN(),
2724 BPF_MOV64_IMM(BPF_REG_0, 0),
2725 BPF_EXIT_INSN(),
2726 },
2727 .result = ACCEPT,
2728 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2729 },
2730 {
William Tu63dfef72017-02-04 08:37:29 -08002731 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2732 .insns = {
2733 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2734 offsetof(struct __sk_buff, data)),
2735 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2736 offsetof(struct __sk_buff, data_end)),
2737 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2739 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2740 BPF_MOV64_IMM(BPF_REG_5, 12),
2741 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2742 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2743 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2744 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2745 BPF_MOV64_IMM(BPF_REG_0, 1),
2746 BPF_EXIT_INSN(),
2747 BPF_MOV64_IMM(BPF_REG_0, 0),
2748 BPF_EXIT_INSN(),
2749 },
2750 .result = ACCEPT,
2751 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2752 },
2753 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02002754 "direct packet access: test15 (spill with xadd)",
2755 .insns = {
2756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2757 offsetof(struct __sk_buff, data)),
2758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2759 offsetof(struct __sk_buff, data_end)),
2760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2763 BPF_MOV64_IMM(BPF_REG_5, 4096),
2764 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2765 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2766 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2767 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2768 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2769 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2770 BPF_MOV64_IMM(BPF_REG_0, 0),
2771 BPF_EXIT_INSN(),
2772 },
2773 .errstr = "R2 invalid mem access 'inv'",
2774 .result = REJECT,
2775 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2776 },
2777 {
Daniel Borkmann728a8532017-04-27 01:39:32 +02002778 "direct packet access: test16 (arith on data_end)",
2779 .insns = {
2780 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2781 offsetof(struct __sk_buff, data)),
2782 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2783 offsetof(struct __sk_buff, data_end)),
2784 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2786 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2787 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2788 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2789 BPF_MOV64_IMM(BPF_REG_0, 0),
2790 BPF_EXIT_INSN(),
2791 },
2792 .errstr = "invalid access to packet",
2793 .result = REJECT,
2794 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2795 },
2796 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002797 "direct packet access: test17 (pruning, alignment)",
2798 .insns = {
2799 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2800 offsetof(struct __sk_buff, data)),
2801 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2802 offsetof(struct __sk_buff, data_end)),
2803 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2804 offsetof(struct __sk_buff, mark)),
2805 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2807 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2808 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2809 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2810 BPF_MOV64_IMM(BPF_REG_0, 0),
2811 BPF_EXIT_INSN(),
2812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2813 BPF_JMP_A(-6),
2814 },
Edward Creef65b1842017-08-07 15:27:12 +01002815 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
Daniel Borkmann614d0d72017-05-25 01:05:09 +02002816 .result = REJECT,
2817 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2818 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2819 },
2820 {
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002821 "direct packet access: test18 (imm += pkt_ptr, 1)",
2822 .insns = {
2823 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2824 offsetof(struct __sk_buff, data)),
2825 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2826 offsetof(struct __sk_buff, data_end)),
2827 BPF_MOV64_IMM(BPF_REG_0, 8),
2828 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2829 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2830 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2831 BPF_MOV64_IMM(BPF_REG_0, 0),
2832 BPF_EXIT_INSN(),
2833 },
2834 .result = ACCEPT,
2835 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2836 },
2837 {
2838 "direct packet access: test19 (imm += pkt_ptr, 2)",
2839 .insns = {
2840 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2841 offsetof(struct __sk_buff, data)),
2842 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2843 offsetof(struct __sk_buff, data_end)),
2844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2846 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2847 BPF_MOV64_IMM(BPF_REG_4, 4),
2848 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2849 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2850 BPF_MOV64_IMM(BPF_REG_0, 0),
2851 BPF_EXIT_INSN(),
2852 },
2853 .result = ACCEPT,
2854 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2855 },
2856 {
2857 "direct packet access: test20 (x += pkt_ptr, 1)",
2858 .insns = {
2859 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2860 offsetof(struct __sk_buff, data)),
2861 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2862 offsetof(struct __sk_buff, data_end)),
2863 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2864 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2865 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01002866 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002867 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2868 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2869 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01002870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002871 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2872 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2873 BPF_MOV64_IMM(BPF_REG_0, 0),
2874 BPF_EXIT_INSN(),
2875 },
2876 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2877 .result = ACCEPT,
2878 },
2879 {
2880 "direct packet access: test21 (x += pkt_ptr, 2)",
2881 .insns = {
2882 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2883 offsetof(struct __sk_buff, data)),
2884 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2885 offsetof(struct __sk_buff, data_end)),
2886 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2888 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2889 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2890 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2891 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01002892 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002893 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2894 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
Edward Cree1f9ab382017-08-07 15:29:11 +01002895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002896 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2897 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2898 BPF_MOV64_IMM(BPF_REG_0, 0),
2899 BPF_EXIT_INSN(),
2900 },
2901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2902 .result = ACCEPT,
2903 },
2904 {
2905 "direct packet access: test22 (x += pkt_ptr, 3)",
2906 .insns = {
2907 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2908 offsetof(struct __sk_buff, data)),
2909 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2910 offsetof(struct __sk_buff, data_end)),
2911 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2912 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2913 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
2914 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
2915 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
2916 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
2917 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
2918 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2919 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2920 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
Edward Cree1f9ab382017-08-07 15:29:11 +01002921 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002922 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2923 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
2924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
2925 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2926 BPF_MOV64_IMM(BPF_REG_2, 1),
2927 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
2928 BPF_MOV64_IMM(BPF_REG_0, 0),
2929 BPF_EXIT_INSN(),
2930 },
2931 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2932 .result = ACCEPT,
2933 },
2934 {
2935 "direct packet access: test23 (x += pkt_ptr, 4)",
2936 .insns = {
2937 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2938 offsetof(struct __sk_buff, data)),
2939 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2940 offsetof(struct __sk_buff, data_end)),
2941 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2942 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2943 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2944 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
2945 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2946 BPF_MOV64_IMM(BPF_REG_0, 31),
2947 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2948 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2949 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
2951 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2952 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2953 BPF_MOV64_IMM(BPF_REG_0, 0),
2954 BPF_EXIT_INSN(),
2955 },
2956 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2957 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01002958 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002959 },
2960 {
2961 "direct packet access: test24 (x += pkt_ptr, 5)",
2962 .insns = {
2963 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2964 offsetof(struct __sk_buff, data)),
2965 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2966 offsetof(struct __sk_buff, data_end)),
2967 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2968 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2969 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2970 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
2971 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2972 BPF_MOV64_IMM(BPF_REG_0, 64),
2973 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2974 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2975 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
Edward Cree1f9ab382017-08-07 15:29:11 +01002976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
Daniel Borkmann6d191ed42017-07-02 02:13:31 +02002977 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2978 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2979 BPF_MOV64_IMM(BPF_REG_0, 0),
2980 BPF_EXIT_INSN(),
2981 },
2982 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2983 .result = ACCEPT,
2984 },
2985 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02002986 "direct packet access: test25 (marking on <, good access)",
2987 .insns = {
2988 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2989 offsetof(struct __sk_buff, data)),
2990 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2991 offsetof(struct __sk_buff, data_end)),
2992 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2994 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
2995 BPF_MOV64_IMM(BPF_REG_0, 0),
2996 BPF_EXIT_INSN(),
2997 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2998 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
2999 },
3000 .result = ACCEPT,
3001 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3002 },
3003 {
3004 "direct packet access: test26 (marking on <, bad access)",
3005 .insns = {
3006 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3007 offsetof(struct __sk_buff, data)),
3008 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3009 offsetof(struct __sk_buff, data_end)),
3010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3012 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3013 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3014 BPF_MOV64_IMM(BPF_REG_0, 0),
3015 BPF_EXIT_INSN(),
3016 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3017 },
3018 .result = REJECT,
3019 .errstr = "invalid access to packet",
3020 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3021 },
3022 {
3023 "direct packet access: test27 (marking on <=, good access)",
3024 .insns = {
3025 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3026 offsetof(struct __sk_buff, data)),
3027 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3028 offsetof(struct __sk_buff, data_end)),
3029 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3031 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3032 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3033 BPF_MOV64_IMM(BPF_REG_0, 1),
3034 BPF_EXIT_INSN(),
3035 },
3036 .result = ACCEPT,
3037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3038 },
3039 {
3040 "direct packet access: test28 (marking on <=, bad access)",
3041 .insns = {
3042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3043 offsetof(struct __sk_buff, data)),
3044 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3045 offsetof(struct __sk_buff, data_end)),
3046 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3048 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3049 BPF_MOV64_IMM(BPF_REG_0, 1),
3050 BPF_EXIT_INSN(),
3051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3052 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3053 },
3054 .result = REJECT,
3055 .errstr = "invalid access to packet",
3056 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3057 },
3058 {
Aaron Yue1633ac02016-08-11 18:17:17 -07003059 "helper access to packet: test1, valid packet_ptr range",
3060 .insns = {
3061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3062 offsetof(struct xdp_md, data)),
3063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3064 offsetof(struct xdp_md, data_end)),
3065 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3067 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3068 BPF_LD_MAP_FD(BPF_REG_1, 0),
3069 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3070 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003071 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3072 BPF_FUNC_map_update_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003073 BPF_MOV64_IMM(BPF_REG_0, 0),
3074 BPF_EXIT_INSN(),
3075 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003076 .fixup_map1 = { 5 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003077 .result_unpriv = ACCEPT,
3078 .result = ACCEPT,
3079 .prog_type = BPF_PROG_TYPE_XDP,
3080 },
3081 {
3082 "helper access to packet: test2, unchecked packet_ptr",
3083 .insns = {
3084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3085 offsetof(struct xdp_md, data)),
3086 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003087 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3088 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003089 BPF_MOV64_IMM(BPF_REG_0, 0),
3090 BPF_EXIT_INSN(),
3091 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003092 .fixup_map1 = { 1 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003093 .result = REJECT,
3094 .errstr = "invalid access to packet",
3095 .prog_type = BPF_PROG_TYPE_XDP,
3096 },
3097 {
3098 "helper access to packet: test3, variable add",
3099 .insns = {
3100 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3101 offsetof(struct xdp_md, data)),
3102 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3103 offsetof(struct xdp_md, data_end)),
3104 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3105 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3106 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3107 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3108 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3109 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3110 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3112 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3113 BPF_LD_MAP_FD(BPF_REG_1, 0),
3114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003115 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3116 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003117 BPF_MOV64_IMM(BPF_REG_0, 0),
3118 BPF_EXIT_INSN(),
3119 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003120 .fixup_map1 = { 11 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003121 .result = ACCEPT,
3122 .prog_type = BPF_PROG_TYPE_XDP,
3123 },
3124 {
3125 "helper access to packet: test4, packet_ptr with bad range",
3126 .insns = {
3127 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3128 offsetof(struct xdp_md, data)),
3129 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3130 offsetof(struct xdp_md, data_end)),
3131 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3133 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3134 BPF_MOV64_IMM(BPF_REG_0, 0),
3135 BPF_EXIT_INSN(),
3136 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003137 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3138 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003139 BPF_MOV64_IMM(BPF_REG_0, 0),
3140 BPF_EXIT_INSN(),
3141 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003142 .fixup_map1 = { 7 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003143 .result = REJECT,
3144 .errstr = "invalid access to packet",
3145 .prog_type = BPF_PROG_TYPE_XDP,
3146 },
3147 {
3148 "helper access to packet: test5, packet_ptr with too short range",
3149 .insns = {
3150 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3151 offsetof(struct xdp_md, data)),
3152 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3153 offsetof(struct xdp_md, data_end)),
3154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3155 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3157 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3158 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003159 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3160 BPF_FUNC_map_lookup_elem),
Aaron Yue1633ac02016-08-11 18:17:17 -07003161 BPF_MOV64_IMM(BPF_REG_0, 0),
3162 BPF_EXIT_INSN(),
3163 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003164 .fixup_map1 = { 6 },
Aaron Yue1633ac02016-08-11 18:17:17 -07003165 .result = REJECT,
3166 .errstr = "invalid access to packet",
3167 .prog_type = BPF_PROG_TYPE_XDP,
3168 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003169 {
3170 "helper access to packet: test6, cls valid packet_ptr range",
3171 .insns = {
3172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3173 offsetof(struct __sk_buff, data)),
3174 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3175 offsetof(struct __sk_buff, data_end)),
3176 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3178 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3179 BPF_LD_MAP_FD(BPF_REG_1, 0),
3180 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3181 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 BPF_FUNC_map_update_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003184 BPF_MOV64_IMM(BPF_REG_0, 0),
3185 BPF_EXIT_INSN(),
3186 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003187 .fixup_map1 = { 5 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003188 .result = ACCEPT,
3189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3190 },
3191 {
3192 "helper access to packet: test7, cls unchecked packet_ptr",
3193 .insns = {
3194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3195 offsetof(struct __sk_buff, data)),
3196 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3198 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003199 BPF_MOV64_IMM(BPF_REG_0, 0),
3200 BPF_EXIT_INSN(),
3201 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003202 .fixup_map1 = { 1 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003203 .result = REJECT,
3204 .errstr = "invalid access to packet",
3205 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3206 },
3207 {
3208 "helper access to packet: test8, cls variable add",
3209 .insns = {
3210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3211 offsetof(struct __sk_buff, data)),
3212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3213 offsetof(struct __sk_buff, data_end)),
3214 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3216 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3217 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3218 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3219 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3220 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3222 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3223 BPF_LD_MAP_FD(BPF_REG_1, 0),
3224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3226 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003227 BPF_MOV64_IMM(BPF_REG_0, 0),
3228 BPF_EXIT_INSN(),
3229 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003230 .fixup_map1 = { 11 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003231 .result = ACCEPT,
3232 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3233 },
3234 {
3235 "helper access to packet: test9, cls packet_ptr with bad range",
3236 .insns = {
3237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3238 offsetof(struct __sk_buff, data)),
3239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3240 offsetof(struct __sk_buff, data_end)),
3241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3243 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3244 BPF_MOV64_IMM(BPF_REG_0, 0),
3245 BPF_EXIT_INSN(),
3246 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3248 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003249 BPF_MOV64_IMM(BPF_REG_0, 0),
3250 BPF_EXIT_INSN(),
3251 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003252 .fixup_map1 = { 7 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003253 .result = REJECT,
3254 .errstr = "invalid access to packet",
3255 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3256 },
3257 {
3258 "helper access to packet: test10, cls packet_ptr with too short range",
3259 .insns = {
3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3261 offsetof(struct __sk_buff, data)),
3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3263 offsetof(struct __sk_buff, data_end)),
3264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3265 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3267 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3268 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3270 BPF_FUNC_map_lookup_elem),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003271 BPF_MOV64_IMM(BPF_REG_0, 0),
3272 BPF_EXIT_INSN(),
3273 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003274 .fixup_map1 = { 6 },
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003275 .result = REJECT,
3276 .errstr = "invalid access to packet",
3277 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3278 },
3279 {
3280 "helper access to packet: test11, cls unsuitable helper 1",
3281 .insns = {
3282 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3283 offsetof(struct __sk_buff, data)),
3284 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3285 offsetof(struct __sk_buff, data_end)),
3286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3287 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3289 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3290 BPF_MOV64_IMM(BPF_REG_2, 0),
3291 BPF_MOV64_IMM(BPF_REG_4, 42),
3292 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3294 BPF_FUNC_skb_store_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003295 BPF_MOV64_IMM(BPF_REG_0, 0),
3296 BPF_EXIT_INSN(),
3297 },
3298 .result = REJECT,
3299 .errstr = "helper access to the packet",
3300 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3301 },
3302 {
3303 "helper access to packet: test12, cls unsuitable helper 2",
3304 .insns = {
3305 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3306 offsetof(struct __sk_buff, data)),
3307 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3308 offsetof(struct __sk_buff, data_end)),
3309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3311 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3312 BPF_MOV64_IMM(BPF_REG_2, 0),
3313 BPF_MOV64_IMM(BPF_REG_4, 4),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3315 BPF_FUNC_skb_load_bytes),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003316 BPF_MOV64_IMM(BPF_REG_0, 0),
3317 BPF_EXIT_INSN(),
3318 },
3319 .result = REJECT,
3320 .errstr = "helper access to the packet",
3321 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3322 },
3323 {
3324 "helper access to packet: test13, cls helper ok",
3325 .insns = {
3326 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3327 offsetof(struct __sk_buff, data)),
3328 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3329 offsetof(struct __sk_buff, data_end)),
3330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3333 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3335 BPF_MOV64_IMM(BPF_REG_2, 4),
3336 BPF_MOV64_IMM(BPF_REG_3, 0),
3337 BPF_MOV64_IMM(BPF_REG_4, 0),
3338 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003339 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3340 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003341 BPF_MOV64_IMM(BPF_REG_0, 0),
3342 BPF_EXIT_INSN(),
3343 },
3344 .result = ACCEPT,
3345 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3346 },
3347 {
Edward Creef65b1842017-08-07 15:27:12 +01003348 "helper access to packet: test14, cls helper ok sub",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003349 .insns = {
3350 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3351 offsetof(struct __sk_buff, data)),
3352 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3353 offsetof(struct __sk_buff, data_end)),
3354 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3357 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3358 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3359 BPF_MOV64_IMM(BPF_REG_2, 4),
3360 BPF_MOV64_IMM(BPF_REG_3, 0),
3361 BPF_MOV64_IMM(BPF_REG_4, 0),
3362 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003363 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3364 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003365 BPF_MOV64_IMM(BPF_REG_0, 0),
3366 BPF_EXIT_INSN(),
3367 },
Edward Creef65b1842017-08-07 15:27:12 +01003368 .result = ACCEPT,
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003369 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3370 },
3371 {
Edward Creef65b1842017-08-07 15:27:12 +01003372 "helper access to packet: test15, cls helper fail sub",
3373 .insns = {
3374 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3375 offsetof(struct __sk_buff, data)),
3376 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3377 offsetof(struct __sk_buff, data_end)),
3378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3380 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3381 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3382 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3383 BPF_MOV64_IMM(BPF_REG_2, 4),
3384 BPF_MOV64_IMM(BPF_REG_3, 0),
3385 BPF_MOV64_IMM(BPF_REG_4, 0),
3386 BPF_MOV64_IMM(BPF_REG_5, 0),
3387 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3388 BPF_FUNC_csum_diff),
3389 BPF_MOV64_IMM(BPF_REG_0, 0),
3390 BPF_EXIT_INSN(),
3391 },
3392 .result = REJECT,
3393 .errstr = "invalid access to packet",
3394 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3395 },
3396 {
3397 "helper access to packet: test16, cls helper fail range 1",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003398 .insns = {
3399 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3400 offsetof(struct __sk_buff, data)),
3401 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3402 offsetof(struct __sk_buff, data_end)),
3403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3404 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3406 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3408 BPF_MOV64_IMM(BPF_REG_2, 8),
3409 BPF_MOV64_IMM(BPF_REG_3, 0),
3410 BPF_MOV64_IMM(BPF_REG_4, 0),
3411 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003412 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3413 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003414 BPF_MOV64_IMM(BPF_REG_0, 0),
3415 BPF_EXIT_INSN(),
3416 },
3417 .result = REJECT,
3418 .errstr = "invalid access to packet",
3419 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3420 },
3421 {
Edward Creef65b1842017-08-07 15:27:12 +01003422 "helper access to packet: test17, cls helper fail range 2",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003423 .insns = {
3424 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3425 offsetof(struct __sk_buff, data)),
3426 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3427 offsetof(struct __sk_buff, data_end)),
3428 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3431 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3432 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3433 BPF_MOV64_IMM(BPF_REG_2, -9),
3434 BPF_MOV64_IMM(BPF_REG_3, 0),
3435 BPF_MOV64_IMM(BPF_REG_4, 0),
3436 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3438 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003439 BPF_MOV64_IMM(BPF_REG_0, 0),
3440 BPF_EXIT_INSN(),
3441 },
3442 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003443 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003444 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3445 },
3446 {
Edward Creef65b1842017-08-07 15:27:12 +01003447 "helper access to packet: test18, cls helper fail range 3",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003448 .insns = {
3449 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3450 offsetof(struct __sk_buff, data)),
3451 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3452 offsetof(struct __sk_buff, data_end)),
3453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3456 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3457 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3458 BPF_MOV64_IMM(BPF_REG_2, ~0),
3459 BPF_MOV64_IMM(BPF_REG_3, 0),
3460 BPF_MOV64_IMM(BPF_REG_4, 0),
3461 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003462 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3463 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003464 BPF_MOV64_IMM(BPF_REG_0, 0),
3465 BPF_EXIT_INSN(),
3466 },
3467 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003468 .errstr = "R2 min value is negative",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3470 },
3471 {
Edward Creef65b1842017-08-07 15:27:12 +01003472 "helper access to packet: test19, cls helper fail range zero",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003473 .insns = {
3474 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3475 offsetof(struct __sk_buff, data)),
3476 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3477 offsetof(struct __sk_buff, data_end)),
3478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3481 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3483 BPF_MOV64_IMM(BPF_REG_2, 0),
3484 BPF_MOV64_IMM(BPF_REG_3, 0),
3485 BPF_MOV64_IMM(BPF_REG_4, 0),
3486 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003487 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3488 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003489 BPF_MOV64_IMM(BPF_REG_0, 0),
3490 BPF_EXIT_INSN(),
3491 },
3492 .result = REJECT,
3493 .errstr = "invalid access to packet",
3494 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3495 },
3496 {
Edward Creef65b1842017-08-07 15:27:12 +01003497 "helper access to packet: test20, pkt end as input",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003498 .insns = {
3499 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3500 offsetof(struct __sk_buff, data)),
3501 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3502 offsetof(struct __sk_buff, data_end)),
3503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3504 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3506 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3507 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3508 BPF_MOV64_IMM(BPF_REG_2, 4),
3509 BPF_MOV64_IMM(BPF_REG_3, 0),
3510 BPF_MOV64_IMM(BPF_REG_4, 0),
3511 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3513 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003514 BPF_MOV64_IMM(BPF_REG_0, 0),
3515 BPF_EXIT_INSN(),
3516 },
3517 .result = REJECT,
3518 .errstr = "R1 type=pkt_end expected=fp",
3519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3520 },
3521 {
Edward Creef65b1842017-08-07 15:27:12 +01003522 "helper access to packet: test21, wrong reg",
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003523 .insns = {
3524 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3525 offsetof(struct __sk_buff, data)),
3526 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3527 offsetof(struct __sk_buff, data_end)),
3528 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3531 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3532 BPF_MOV64_IMM(BPF_REG_2, 4),
3533 BPF_MOV64_IMM(BPF_REG_3, 0),
3534 BPF_MOV64_IMM(BPF_REG_4, 0),
3535 BPF_MOV64_IMM(BPF_REG_5, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3537 BPF_FUNC_csum_diff),
Daniel Borkmann7d95b0a2016-09-20 00:26:14 +02003538 BPF_MOV64_IMM(BPF_REG_0, 0),
3539 BPF_EXIT_INSN(),
3540 },
3541 .result = REJECT,
3542 .errstr = "invalid access to packet",
3543 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3544 },
Josef Bacik48461132016-09-28 10:54:32 -04003545 {
3546 "valid map access into an array with a constant",
3547 .insns = {
3548 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3549 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3551 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003552 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3553 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003554 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003555 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3556 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003557 BPF_EXIT_INSN(),
3558 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003559 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003560 .errstr_unpriv = "R0 leaks addr",
3561 .result_unpriv = REJECT,
3562 .result = ACCEPT,
3563 },
3564 {
3565 "valid map access into an array with a register",
3566 .insns = {
3567 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3568 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3570 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003571 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3572 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3574 BPF_MOV64_IMM(BPF_REG_1, 4),
3575 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3576 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003577 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3578 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003579 BPF_EXIT_INSN(),
3580 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003581 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003582 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003583 .result_unpriv = REJECT,
3584 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003585 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003586 },
3587 {
3588 "valid map access into an array with a variable",
3589 .insns = {
3590 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3593 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3595 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3597 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3598 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3599 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3600 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003601 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3602 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003603 BPF_EXIT_INSN(),
3604 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003605 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003606 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003607 .result_unpriv = REJECT,
3608 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003609 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003610 },
3611 {
3612 "valid map access into an array with a signed variable",
3613 .insns = {
3614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3617 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3621 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3622 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3623 BPF_MOV32_IMM(BPF_REG_1, 0),
3624 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3625 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3626 BPF_MOV32_IMM(BPF_REG_1, 0),
3627 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3628 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003629 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3630 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003631 BPF_EXIT_INSN(),
3632 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003633 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003634 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003635 .result_unpriv = REJECT,
3636 .result = ACCEPT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003637 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003638 },
3639 {
3640 "invalid map access into an array with a constant",
3641 .insns = {
3642 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3643 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3644 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3645 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3647 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3649 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3650 offsetof(struct test_val, foo)),
3651 BPF_EXIT_INSN(),
3652 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003653 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003654 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3655 .result = REJECT,
3656 },
3657 {
3658 "invalid map access into an array with a register",
3659 .insns = {
3660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3663 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003664 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3665 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3667 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3668 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3669 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3671 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003672 BPF_EXIT_INSN(),
3673 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003674 .fixup_map2 = { 3 },
Josef Bacik48461132016-09-28 10:54:32 -04003675 .errstr = "R0 min value is outside of the array range",
3676 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003677 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003678 },
3679 {
3680 "invalid map access into an array with a variable",
3681 .insns = {
3682 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3685 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003686 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3687 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3689 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3690 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3691 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003692 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3693 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003694 BPF_EXIT_INSN(),
3695 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003696 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003697 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
Josef Bacik48461132016-09-28 10:54:32 -04003698 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003699 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003700 },
3701 {
3702 "invalid map access into an array with no floor check",
3703 .insns = {
3704 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3705 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3706 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3707 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003708 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3709 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
Edward Creef65b1842017-08-07 15:27:12 +01003711 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik48461132016-09-28 10:54:32 -04003712 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3713 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3714 BPF_MOV32_IMM(BPF_REG_1, 0),
3715 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3716 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003717 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3718 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003719 BPF_EXIT_INSN(),
3720 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003721 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003722 .errstr_unpriv = "R0 leaks addr",
3723 .errstr = "R0 unbounded memory access",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003724 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003725 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003726 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003727 },
3728 {
3729 "invalid map access into an array with a invalid max check",
3730 .insns = {
3731 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3732 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3733 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3734 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003735 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3736 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003737 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3738 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3739 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3740 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3741 BPF_MOV32_IMM(BPF_REG_1, 0),
3742 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3743 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003744 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3745 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003746 BPF_EXIT_INSN(),
3747 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003748 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003749 .errstr_unpriv = "R0 leaks addr",
Josef Bacik48461132016-09-28 10:54:32 -04003750 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003751 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003752 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003753 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003754 },
3755 {
3756 "invalid map access into an array with a invalid max check",
3757 .insns = {
3758 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3759 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3761 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003762 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3763 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003764 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3765 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3766 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3767 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3768 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3769 BPF_LD_MAP_FD(BPF_REG_1, 0),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003770 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3771 BPF_FUNC_map_lookup_elem),
Josef Bacik48461132016-09-28 10:54:32 -04003772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3773 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003774 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3775 offsetof(struct test_val, foo)),
Josef Bacik48461132016-09-28 10:54:32 -04003776 BPF_EXIT_INSN(),
3777 },
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003778 .fixup_map2 = { 3, 11 },
Edward Creef65b1842017-08-07 15:27:12 +01003779 .errstr_unpriv = "R0 pointer += pointer",
3780 .errstr = "R0 invalid mem access 'inv'",
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02003781 .result_unpriv = REJECT,
Josef Bacik48461132016-09-28 10:54:32 -04003782 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003783 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik48461132016-09-28 10:54:32 -04003784 },
Thomas Graf57a09bf2016-10-18 19:51:19 +02003785 {
3786 "multiple registers share map_lookup_elem result",
3787 .insns = {
3788 BPF_MOV64_IMM(BPF_REG_1, 10),
3789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3792 BPF_LD_MAP_FD(BPF_REG_1, 0),
3793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3794 BPF_FUNC_map_lookup_elem),
3795 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3797 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3798 BPF_EXIT_INSN(),
3799 },
3800 .fixup_map1 = { 4 },
3801 .result = ACCEPT,
3802 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3803 },
3804 {
Daniel Borkmann614d0d72017-05-25 01:05:09 +02003805 "alu ops on ptr_to_map_value_or_null, 1",
3806 .insns = {
3807 BPF_MOV64_IMM(BPF_REG_1, 10),
3808 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3809 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3810 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3811 BPF_LD_MAP_FD(BPF_REG_1, 0),
3812 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3813 BPF_FUNC_map_lookup_elem),
3814 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3818 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3819 BPF_EXIT_INSN(),
3820 },
3821 .fixup_map1 = { 4 },
3822 .errstr = "R4 invalid mem access",
3823 .result = REJECT,
3824 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3825 },
3826 {
3827 "alu ops on ptr_to_map_value_or_null, 2",
3828 .insns = {
3829 BPF_MOV64_IMM(BPF_REG_1, 10),
3830 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3833 BPF_LD_MAP_FD(BPF_REG_1, 0),
3834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3835 BPF_FUNC_map_lookup_elem),
3836 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3837 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3838 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3839 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3840 BPF_EXIT_INSN(),
3841 },
3842 .fixup_map1 = { 4 },
3843 .errstr = "R4 invalid mem access",
3844 .result = REJECT,
3845 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3846 },
3847 {
3848 "alu ops on ptr_to_map_value_or_null, 3",
3849 .insns = {
3850 BPF_MOV64_IMM(BPF_REG_1, 10),
3851 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3852 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3853 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3854 BPF_LD_MAP_FD(BPF_REG_1, 0),
3855 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3856 BPF_FUNC_map_lookup_elem),
3857 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3858 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3859 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3860 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3861 BPF_EXIT_INSN(),
3862 },
3863 .fixup_map1 = { 4 },
3864 .errstr = "R4 invalid mem access",
3865 .result = REJECT,
3866 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3867 },
3868 {
Thomas Graf57a09bf2016-10-18 19:51:19 +02003869 "invalid memory access with multiple map_lookup_elem calls",
3870 .insns = {
3871 BPF_MOV64_IMM(BPF_REG_1, 10),
3872 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3875 BPF_LD_MAP_FD(BPF_REG_1, 0),
3876 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3877 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3878 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3879 BPF_FUNC_map_lookup_elem),
3880 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3881 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3882 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3883 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3884 BPF_FUNC_map_lookup_elem),
3885 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3886 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3887 BPF_EXIT_INSN(),
3888 },
3889 .fixup_map1 = { 4 },
3890 .result = REJECT,
3891 .errstr = "R4 !read_ok",
3892 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3893 },
3894 {
3895 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3896 .insns = {
3897 BPF_MOV64_IMM(BPF_REG_1, 10),
3898 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3901 BPF_LD_MAP_FD(BPF_REG_1, 0),
3902 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3903 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3904 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3905 BPF_FUNC_map_lookup_elem),
3906 BPF_MOV64_IMM(BPF_REG_2, 10),
3907 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3908 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3909 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3911 BPF_FUNC_map_lookup_elem),
3912 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3913 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3914 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3915 BPF_EXIT_INSN(),
3916 },
3917 .fixup_map1 = { 4 },
3918 .result = ACCEPT,
3919 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3920 },
Josef Bacike9548902016-11-29 12:35:19 -05003921 {
3922 "invalid map access from else condition",
3923 .insns = {
3924 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3927 BPF_LD_MAP_FD(BPF_REG_1, 0),
3928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3929 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3930 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3931 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3933 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3934 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3935 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3936 BPF_EXIT_INSN(),
3937 },
3938 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01003939 .errstr = "R0 unbounded memory access",
Josef Bacike9548902016-11-29 12:35:19 -05003940 .result = REJECT,
Edward Creef65b1842017-08-07 15:27:12 +01003941 .errstr_unpriv = "R0 leaks addr",
Josef Bacike9548902016-11-29 12:35:19 -05003942 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02003943 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacike9548902016-11-29 12:35:19 -05003944 },
Gianluca Borello3c8397442016-12-03 12:31:33 -08003945 {
3946 "constant register |= constant should keep constant type",
3947 .insns = {
3948 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3949 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3950 BPF_MOV64_IMM(BPF_REG_2, 34),
3951 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3952 BPF_MOV64_IMM(BPF_REG_3, 0),
3953 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3954 BPF_EXIT_INSN(),
3955 },
3956 .result = ACCEPT,
3957 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3958 },
3959 {
3960 "constant register |= constant should not bypass stack boundary checks",
3961 .insns = {
3962 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3963 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3964 BPF_MOV64_IMM(BPF_REG_2, 34),
3965 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3966 BPF_MOV64_IMM(BPF_REG_3, 0),
3967 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3968 BPF_EXIT_INSN(),
3969 },
3970 .errstr = "invalid stack type R1 off=-48 access_size=58",
3971 .result = REJECT,
3972 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3973 },
3974 {
3975 "constant register |= constant register should keep constant type",
3976 .insns = {
3977 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3978 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3979 BPF_MOV64_IMM(BPF_REG_2, 34),
3980 BPF_MOV64_IMM(BPF_REG_4, 13),
3981 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3982 BPF_MOV64_IMM(BPF_REG_3, 0),
3983 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3984 BPF_EXIT_INSN(),
3985 },
3986 .result = ACCEPT,
3987 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3988 },
3989 {
3990 "constant register |= constant register should not bypass stack boundary checks",
3991 .insns = {
3992 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3994 BPF_MOV64_IMM(BPF_REG_2, 34),
3995 BPF_MOV64_IMM(BPF_REG_4, 24),
3996 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3997 BPF_MOV64_IMM(BPF_REG_3, 0),
3998 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3999 BPF_EXIT_INSN(),
4000 },
4001 .errstr = "invalid stack type R1 off=-48 access_size=58",
4002 .result = REJECT,
4003 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4004 },
Thomas Graf3f731d82016-12-05 10:30:52 +01004005 {
4006 "invalid direct packet write for LWT_IN",
4007 .insns = {
4008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4009 offsetof(struct __sk_buff, data)),
4010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4011 offsetof(struct __sk_buff, data_end)),
4012 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4014 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4015 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4016 BPF_MOV64_IMM(BPF_REG_0, 0),
4017 BPF_EXIT_INSN(),
4018 },
4019 .errstr = "cannot write into packet",
4020 .result = REJECT,
4021 .prog_type = BPF_PROG_TYPE_LWT_IN,
4022 },
4023 {
4024 "invalid direct packet write for LWT_OUT",
4025 .insns = {
4026 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4027 offsetof(struct __sk_buff, data)),
4028 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4029 offsetof(struct __sk_buff, data_end)),
4030 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4032 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4033 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4034 BPF_MOV64_IMM(BPF_REG_0, 0),
4035 BPF_EXIT_INSN(),
4036 },
4037 .errstr = "cannot write into packet",
4038 .result = REJECT,
4039 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4040 },
4041 {
4042 "direct packet write for LWT_XMIT",
4043 .insns = {
4044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4045 offsetof(struct __sk_buff, data)),
4046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4047 offsetof(struct __sk_buff, data_end)),
4048 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4050 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4051 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4052 BPF_MOV64_IMM(BPF_REG_0, 0),
4053 BPF_EXIT_INSN(),
4054 },
4055 .result = ACCEPT,
4056 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4057 },
4058 {
4059 "direct packet read for LWT_IN",
4060 .insns = {
4061 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4062 offsetof(struct __sk_buff, data)),
4063 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4064 offsetof(struct __sk_buff, data_end)),
4065 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4067 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4068 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4069 BPF_MOV64_IMM(BPF_REG_0, 0),
4070 BPF_EXIT_INSN(),
4071 },
4072 .result = ACCEPT,
4073 .prog_type = BPF_PROG_TYPE_LWT_IN,
4074 },
4075 {
4076 "direct packet read for LWT_OUT",
4077 .insns = {
4078 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4079 offsetof(struct __sk_buff, data)),
4080 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4081 offsetof(struct __sk_buff, data_end)),
4082 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4084 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4085 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4086 BPF_MOV64_IMM(BPF_REG_0, 0),
4087 BPF_EXIT_INSN(),
4088 },
4089 .result = ACCEPT,
4090 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4091 },
4092 {
4093 "direct packet read for LWT_XMIT",
4094 .insns = {
4095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4096 offsetof(struct __sk_buff, data)),
4097 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4098 offsetof(struct __sk_buff, data_end)),
4099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4101 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4103 BPF_MOV64_IMM(BPF_REG_0, 0),
4104 BPF_EXIT_INSN(),
4105 },
4106 .result = ACCEPT,
4107 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4108 },
4109 {
Alexei Starovoitovb1977682017-03-24 15:57:33 -07004110 "overlapping checks for direct packet access",
4111 .insns = {
4112 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4113 offsetof(struct __sk_buff, data)),
4114 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4115 offsetof(struct __sk_buff, data_end)),
4116 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4118 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4119 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4121 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4122 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4123 BPF_MOV64_IMM(BPF_REG_0, 0),
4124 BPF_EXIT_INSN(),
4125 },
4126 .result = ACCEPT,
4127 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4128 },
4129 {
Thomas Graf3f731d82016-12-05 10:30:52 +01004130 "invalid access of tc_classid for LWT_IN",
4131 .insns = {
4132 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4133 offsetof(struct __sk_buff, tc_classid)),
4134 BPF_EXIT_INSN(),
4135 },
4136 .result = REJECT,
4137 .errstr = "invalid bpf_context access",
4138 },
4139 {
4140 "invalid access of tc_classid for LWT_OUT",
4141 .insns = {
4142 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4143 offsetof(struct __sk_buff, tc_classid)),
4144 BPF_EXIT_INSN(),
4145 },
4146 .result = REJECT,
4147 .errstr = "invalid bpf_context access",
4148 },
4149 {
4150 "invalid access of tc_classid for LWT_XMIT",
4151 .insns = {
4152 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4153 offsetof(struct __sk_buff, tc_classid)),
4154 BPF_EXIT_INSN(),
4155 },
4156 .result = REJECT,
4157 .errstr = "invalid bpf_context access",
4158 },
Gianluca Borello57225692017-01-09 10:19:47 -08004159 {
Daniel Borkmann6bdf6ab2017-06-29 03:04:59 +02004160 "leak pointer into ctx 1",
4161 .insns = {
4162 BPF_MOV64_IMM(BPF_REG_0, 0),
4163 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4164 offsetof(struct __sk_buff, cb[0])),
4165 BPF_LD_MAP_FD(BPF_REG_2, 0),
4166 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4167 offsetof(struct __sk_buff, cb[0])),
4168 BPF_EXIT_INSN(),
4169 },
4170 .fixup_map1 = { 2 },
4171 .errstr_unpriv = "R2 leaks addr into mem",
4172 .result_unpriv = REJECT,
4173 .result = ACCEPT,
4174 },
4175 {
4176 "leak pointer into ctx 2",
4177 .insns = {
4178 BPF_MOV64_IMM(BPF_REG_0, 0),
4179 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4180 offsetof(struct __sk_buff, cb[0])),
4181 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4182 offsetof(struct __sk_buff, cb[0])),
4183 BPF_EXIT_INSN(),
4184 },
4185 .errstr_unpriv = "R10 leaks addr into mem",
4186 .result_unpriv = REJECT,
4187 .result = ACCEPT,
4188 },
4189 {
4190 "leak pointer into ctx 3",
4191 .insns = {
4192 BPF_MOV64_IMM(BPF_REG_0, 0),
4193 BPF_LD_MAP_FD(BPF_REG_2, 0),
4194 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4195 offsetof(struct __sk_buff, cb[0])),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map1 = { 1 },
4199 .errstr_unpriv = "R2 leaks addr into ctx",
4200 .result_unpriv = REJECT,
4201 .result = ACCEPT,
4202 },
4203 {
4204 "leak pointer into map val",
4205 .insns = {
4206 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4207 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4212 BPF_FUNC_map_lookup_elem),
4213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4214 BPF_MOV64_IMM(BPF_REG_3, 0),
4215 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4216 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4217 BPF_MOV64_IMM(BPF_REG_0, 0),
4218 BPF_EXIT_INSN(),
4219 },
4220 .fixup_map1 = { 4 },
4221 .errstr_unpriv = "R6 leaks addr into mem",
4222 .result_unpriv = REJECT,
4223 .result = ACCEPT,
4224 },
4225 {
Gianluca Borello57225692017-01-09 10:19:47 -08004226 "helper access to map: full range",
4227 .insns = {
4228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4230 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4231 BPF_LD_MAP_FD(BPF_REG_1, 0),
4232 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4233 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4234 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4235 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4236 BPF_MOV64_IMM(BPF_REG_3, 0),
4237 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4238 BPF_EXIT_INSN(),
4239 },
4240 .fixup_map2 = { 3 },
4241 .result = ACCEPT,
4242 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4243 },
4244 {
4245 "helper access to map: partial range",
4246 .insns = {
4247 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4249 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4250 BPF_LD_MAP_FD(BPF_REG_1, 0),
4251 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4252 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4254 BPF_MOV64_IMM(BPF_REG_2, 8),
4255 BPF_MOV64_IMM(BPF_REG_3, 0),
4256 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4257 BPF_EXIT_INSN(),
4258 },
4259 .fixup_map2 = { 3 },
4260 .result = ACCEPT,
4261 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4262 },
4263 {
4264 "helper access to map: empty range",
4265 .insns = {
4266 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4267 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4268 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4269 BPF_LD_MAP_FD(BPF_REG_1, 0),
4270 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4271 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4273 BPF_MOV64_IMM(BPF_REG_2, 0),
4274 BPF_MOV64_IMM(BPF_REG_3, 0),
4275 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4276 BPF_EXIT_INSN(),
4277 },
4278 .fixup_map2 = { 3 },
4279 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4280 .result = REJECT,
4281 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4282 },
4283 {
4284 "helper access to map: out-of-bound range",
4285 .insns = {
4286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4288 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4289 BPF_LD_MAP_FD(BPF_REG_1, 0),
4290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4293 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4294 BPF_MOV64_IMM(BPF_REG_3, 0),
4295 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4296 BPF_EXIT_INSN(),
4297 },
4298 .fixup_map2 = { 3 },
4299 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4300 .result = REJECT,
4301 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4302 },
4303 {
4304 "helper access to map: negative range",
4305 .insns = {
4306 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4308 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4309 BPF_LD_MAP_FD(BPF_REG_1, 0),
4310 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4311 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4313 BPF_MOV64_IMM(BPF_REG_2, -8),
4314 BPF_MOV64_IMM(BPF_REG_3, 0),
4315 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4316 BPF_EXIT_INSN(),
4317 },
4318 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004319 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004320 .result = REJECT,
4321 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4322 },
4323 {
4324 "helper access to adjusted map (via const imm): full range",
4325 .insns = {
4326 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4328 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4329 BPF_LD_MAP_FD(BPF_REG_1, 0),
4330 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4331 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4332 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4334 offsetof(struct test_val, foo)),
4335 BPF_MOV64_IMM(BPF_REG_2,
4336 sizeof(struct test_val) -
4337 offsetof(struct test_val, foo)),
4338 BPF_MOV64_IMM(BPF_REG_3, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4340 BPF_EXIT_INSN(),
4341 },
4342 .fixup_map2 = { 3 },
4343 .result = ACCEPT,
4344 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4345 },
4346 {
4347 "helper access to adjusted map (via const imm): partial range",
4348 .insns = {
4349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4351 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4352 BPF_LD_MAP_FD(BPF_REG_1, 0),
4353 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4356 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4357 offsetof(struct test_val, foo)),
4358 BPF_MOV64_IMM(BPF_REG_2, 8),
4359 BPF_MOV64_IMM(BPF_REG_3, 0),
4360 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4361 BPF_EXIT_INSN(),
4362 },
4363 .fixup_map2 = { 3 },
4364 .result = ACCEPT,
4365 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4366 },
4367 {
4368 "helper access to adjusted map (via const imm): empty range",
4369 .insns = {
4370 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4372 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4373 BPF_LD_MAP_FD(BPF_REG_1, 0),
4374 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4375 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4376 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4378 offsetof(struct test_val, foo)),
4379 BPF_MOV64_IMM(BPF_REG_2, 0),
4380 BPF_MOV64_IMM(BPF_REG_3, 0),
4381 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4382 BPF_EXIT_INSN(),
4383 },
4384 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004385 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
Gianluca Borello57225692017-01-09 10:19:47 -08004386 .result = REJECT,
4387 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4388 },
4389 {
4390 "helper access to adjusted map (via const imm): out-of-bound range",
4391 .insns = {
4392 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4394 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4395 BPF_LD_MAP_FD(BPF_REG_1, 0),
4396 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4397 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4398 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4400 offsetof(struct test_val, foo)),
4401 BPF_MOV64_IMM(BPF_REG_2,
4402 sizeof(struct test_val) -
4403 offsetof(struct test_val, foo) + 8),
4404 BPF_MOV64_IMM(BPF_REG_3, 0),
4405 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4406 BPF_EXIT_INSN(),
4407 },
4408 .fixup_map2 = { 3 },
4409 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4410 .result = REJECT,
4411 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4412 },
4413 {
4414 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4415 .insns = {
4416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4418 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4419 BPF_LD_MAP_FD(BPF_REG_1, 0),
4420 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4421 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4422 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4423 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4424 offsetof(struct test_val, foo)),
4425 BPF_MOV64_IMM(BPF_REG_2, -8),
4426 BPF_MOV64_IMM(BPF_REG_3, 0),
4427 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4428 BPF_EXIT_INSN(),
4429 },
4430 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004431 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004432 .result = REJECT,
4433 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4434 },
4435 {
4436 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4437 .insns = {
4438 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4440 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4441 BPF_LD_MAP_FD(BPF_REG_1, 0),
4442 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4443 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4446 offsetof(struct test_val, foo)),
4447 BPF_MOV64_IMM(BPF_REG_2, -1),
4448 BPF_MOV64_IMM(BPF_REG_3, 0),
4449 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4450 BPF_EXIT_INSN(),
4451 },
4452 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004453 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004454 .result = REJECT,
4455 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4456 },
4457 {
4458 "helper access to adjusted map (via const reg): full range",
4459 .insns = {
4460 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4462 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4463 BPF_LD_MAP_FD(BPF_REG_1, 0),
4464 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4465 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4466 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4467 BPF_MOV64_IMM(BPF_REG_3,
4468 offsetof(struct test_val, foo)),
4469 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4470 BPF_MOV64_IMM(BPF_REG_2,
4471 sizeof(struct test_val) -
4472 offsetof(struct test_val, foo)),
4473 BPF_MOV64_IMM(BPF_REG_3, 0),
4474 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4475 BPF_EXIT_INSN(),
4476 },
4477 .fixup_map2 = { 3 },
4478 .result = ACCEPT,
4479 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4480 },
4481 {
4482 "helper access to adjusted map (via const reg): partial range",
4483 .insns = {
4484 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4486 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4487 BPF_LD_MAP_FD(BPF_REG_1, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4490 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4491 BPF_MOV64_IMM(BPF_REG_3,
4492 offsetof(struct test_val, foo)),
4493 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4494 BPF_MOV64_IMM(BPF_REG_2, 8),
4495 BPF_MOV64_IMM(BPF_REG_3, 0),
4496 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4497 BPF_EXIT_INSN(),
4498 },
4499 .fixup_map2 = { 3 },
4500 .result = ACCEPT,
4501 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4502 },
4503 {
4504 "helper access to adjusted map (via const reg): empty range",
4505 .insns = {
4506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4508 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4509 BPF_LD_MAP_FD(BPF_REG_1, 0),
4510 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4513 BPF_MOV64_IMM(BPF_REG_3, 0),
4514 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4515 BPF_MOV64_IMM(BPF_REG_2, 0),
4516 BPF_MOV64_IMM(BPF_REG_3, 0),
4517 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4518 BPF_EXIT_INSN(),
4519 },
4520 .fixup_map2 = { 3 },
4521 .errstr = "R1 min value is outside of the array range",
4522 .result = REJECT,
4523 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4524 },
4525 {
4526 "helper access to adjusted map (via const reg): out-of-bound range",
4527 .insns = {
4528 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4529 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4530 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4531 BPF_LD_MAP_FD(BPF_REG_1, 0),
4532 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4533 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4534 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4535 BPF_MOV64_IMM(BPF_REG_3,
4536 offsetof(struct test_val, foo)),
4537 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4538 BPF_MOV64_IMM(BPF_REG_2,
4539 sizeof(struct test_val) -
4540 offsetof(struct test_val, foo) + 8),
4541 BPF_MOV64_IMM(BPF_REG_3, 0),
4542 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4543 BPF_EXIT_INSN(),
4544 },
4545 .fixup_map2 = { 3 },
4546 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4547 .result = REJECT,
4548 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4549 },
4550 {
4551 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4552 .insns = {
4553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4555 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4556 BPF_LD_MAP_FD(BPF_REG_1, 0),
4557 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4559 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4560 BPF_MOV64_IMM(BPF_REG_3,
4561 offsetof(struct test_val, foo)),
4562 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4563 BPF_MOV64_IMM(BPF_REG_2, -8),
4564 BPF_MOV64_IMM(BPF_REG_3, 0),
4565 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4566 BPF_EXIT_INSN(),
4567 },
4568 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004569 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004570 .result = REJECT,
4571 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4572 },
4573 {
4574 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4575 .insns = {
4576 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4577 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4578 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4579 BPF_LD_MAP_FD(BPF_REG_1, 0),
4580 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4582 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4583 BPF_MOV64_IMM(BPF_REG_3,
4584 offsetof(struct test_val, foo)),
4585 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4586 BPF_MOV64_IMM(BPF_REG_2, -1),
4587 BPF_MOV64_IMM(BPF_REG_3, 0),
4588 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4589 BPF_EXIT_INSN(),
4590 },
4591 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004592 .errstr = "R2 min value is negative",
Gianluca Borello57225692017-01-09 10:19:47 -08004593 .result = REJECT,
4594 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4595 },
4596 {
4597 "helper access to adjusted map (via variable): full range",
4598 .insns = {
4599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4601 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4602 BPF_LD_MAP_FD(BPF_REG_1, 0),
4603 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4604 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4606 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4607 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4608 offsetof(struct test_val, foo), 4),
4609 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4610 BPF_MOV64_IMM(BPF_REG_2,
4611 sizeof(struct test_val) -
4612 offsetof(struct test_val, foo)),
4613 BPF_MOV64_IMM(BPF_REG_3, 0),
4614 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4615 BPF_EXIT_INSN(),
4616 },
4617 .fixup_map2 = { 3 },
4618 .result = ACCEPT,
4619 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4620 },
4621 {
4622 "helper access to adjusted map (via variable): partial range",
4623 .insns = {
4624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4626 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4627 BPF_LD_MAP_FD(BPF_REG_1, 0),
4628 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4631 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4632 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4633 offsetof(struct test_val, foo), 4),
4634 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4635 BPF_MOV64_IMM(BPF_REG_2, 8),
4636 BPF_MOV64_IMM(BPF_REG_3, 0),
4637 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4638 BPF_EXIT_INSN(),
4639 },
4640 .fixup_map2 = { 3 },
4641 .result = ACCEPT,
4642 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4643 },
4644 {
4645 "helper access to adjusted map (via variable): empty range",
4646 .insns = {
4647 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4649 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4650 BPF_LD_MAP_FD(BPF_REG_1, 0),
4651 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4653 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4654 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4655 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4656 offsetof(struct test_val, foo), 4),
4657 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4658 BPF_MOV64_IMM(BPF_REG_2, 0),
4659 BPF_MOV64_IMM(BPF_REG_3, 0),
4660 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4661 BPF_EXIT_INSN(),
4662 },
4663 .fixup_map2 = { 3 },
4664 .errstr = "R1 min value is outside of the array range",
4665 .result = REJECT,
4666 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4667 },
4668 {
4669 "helper access to adjusted map (via variable): no max check",
4670 .insns = {
4671 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4673 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4674 BPF_LD_MAP_FD(BPF_REG_1, 0),
4675 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4677 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4678 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4679 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
Edward Creef65b1842017-08-07 15:27:12 +01004680 BPF_MOV64_IMM(BPF_REG_2, 1),
Gianluca Borello57225692017-01-09 10:19:47 -08004681 BPF_MOV64_IMM(BPF_REG_3, 0),
4682 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4683 BPF_EXIT_INSN(),
4684 },
4685 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01004686 .errstr = "R1 unbounded memory access",
Gianluca Borello57225692017-01-09 10:19:47 -08004687 .result = REJECT,
4688 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4689 },
4690 {
4691 "helper access to adjusted map (via variable): wrong max check",
4692 .insns = {
4693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4695 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4696 BPF_LD_MAP_FD(BPF_REG_1, 0),
4697 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4700 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4701 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4702 offsetof(struct test_val, foo), 4),
4703 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4704 BPF_MOV64_IMM(BPF_REG_2,
4705 sizeof(struct test_val) -
4706 offsetof(struct test_val, foo) + 1),
4707 BPF_MOV64_IMM(BPF_REG_3, 0),
4708 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4709 BPF_EXIT_INSN(),
4710 },
4711 .fixup_map2 = { 3 },
4712 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4713 .result = REJECT,
4714 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4715 },
Gianluca Borellof0318d02017-01-09 10:19:48 -08004716 {
Daniel Borkmann31e482b2017-08-10 01:40:03 +02004717 "helper access to map: bounds check using <, good access",
4718 .insns = {
4719 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4721 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4722 BPF_LD_MAP_FD(BPF_REG_1, 0),
4723 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4724 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4725 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4726 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4727 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4728 BPF_MOV64_IMM(BPF_REG_0, 0),
4729 BPF_EXIT_INSN(),
4730 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4731 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4732 BPF_MOV64_IMM(BPF_REG_0, 0),
4733 BPF_EXIT_INSN(),
4734 },
4735 .fixup_map2 = { 3 },
4736 .result = ACCEPT,
4737 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4738 },
4739 {
4740 "helper access to map: bounds check using <, bad access",
4741 .insns = {
4742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4744 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4745 BPF_LD_MAP_FD(BPF_REG_1, 0),
4746 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4748 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4749 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4750 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4751 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4752 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4753 BPF_MOV64_IMM(BPF_REG_0, 0),
4754 BPF_EXIT_INSN(),
4755 BPF_MOV64_IMM(BPF_REG_0, 0),
4756 BPF_EXIT_INSN(),
4757 },
4758 .fixup_map2 = { 3 },
4759 .result = REJECT,
4760 .errstr = "R1 unbounded memory access",
4761 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4762 },
4763 {
4764 "helper access to map: bounds check using <=, good access",
4765 .insns = {
4766 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4768 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4769 BPF_LD_MAP_FD(BPF_REG_1, 0),
4770 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4772 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4773 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4774 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4775 BPF_MOV64_IMM(BPF_REG_0, 0),
4776 BPF_EXIT_INSN(),
4777 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4778 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4779 BPF_MOV64_IMM(BPF_REG_0, 0),
4780 BPF_EXIT_INSN(),
4781 },
4782 .fixup_map2 = { 3 },
4783 .result = ACCEPT,
4784 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4785 },
4786 {
4787 "helper access to map: bounds check using <=, bad access",
4788 .insns = {
4789 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4790 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4791 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4792 BPF_LD_MAP_FD(BPF_REG_1, 0),
4793 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4795 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4796 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4797 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4798 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4799 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4800 BPF_MOV64_IMM(BPF_REG_0, 0),
4801 BPF_EXIT_INSN(),
4802 BPF_MOV64_IMM(BPF_REG_0, 0),
4803 BPF_EXIT_INSN(),
4804 },
4805 .fixup_map2 = { 3 },
4806 .result = REJECT,
4807 .errstr = "R1 unbounded memory access",
4808 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4809 },
4810 {
4811 "helper access to map: bounds check using s<, good access",
4812 .insns = {
4813 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4814 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4815 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4816 BPF_LD_MAP_FD(BPF_REG_1, 0),
4817 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4818 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4819 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4820 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4821 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4822 BPF_MOV64_IMM(BPF_REG_0, 0),
4823 BPF_EXIT_INSN(),
4824 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4825 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4826 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4827 BPF_MOV64_IMM(BPF_REG_0, 0),
4828 BPF_EXIT_INSN(),
4829 },
4830 .fixup_map2 = { 3 },
4831 .result = ACCEPT,
4832 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4833 },
4834 {
4835 "helper access to map: bounds check using s<, good access 2",
4836 .insns = {
4837 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4840 BPF_LD_MAP_FD(BPF_REG_1, 0),
4841 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4842 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4843 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4844 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4845 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4846 BPF_MOV64_IMM(BPF_REG_0, 0),
4847 BPF_EXIT_INSN(),
4848 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4849 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4850 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4851 BPF_MOV64_IMM(BPF_REG_0, 0),
4852 BPF_EXIT_INSN(),
4853 },
4854 .fixup_map2 = { 3 },
4855 .result = ACCEPT,
4856 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4857 },
4858 {
4859 "helper access to map: bounds check using s<, bad access",
4860 .insns = {
4861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4863 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4864 BPF_LD_MAP_FD(BPF_REG_1, 0),
4865 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4868 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4869 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4870 BPF_MOV64_IMM(BPF_REG_0, 0),
4871 BPF_EXIT_INSN(),
4872 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4873 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4874 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4875 BPF_MOV64_IMM(BPF_REG_0, 0),
4876 BPF_EXIT_INSN(),
4877 },
4878 .fixup_map2 = { 3 },
4879 .result = REJECT,
4880 .errstr = "R1 min value is negative",
4881 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4882 },
4883 {
4884 "helper access to map: bounds check using s<=, good access",
4885 .insns = {
4886 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4888 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4889 BPF_LD_MAP_FD(BPF_REG_1, 0),
4890 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4891 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4892 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4894 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4895 BPF_MOV64_IMM(BPF_REG_0, 0),
4896 BPF_EXIT_INSN(),
4897 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
4898 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4899 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4900 BPF_MOV64_IMM(BPF_REG_0, 0),
4901 BPF_EXIT_INSN(),
4902 },
4903 .fixup_map2 = { 3 },
4904 .result = ACCEPT,
4905 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4906 },
4907 {
4908 "helper access to map: bounds check using s<=, good access 2",
4909 .insns = {
4910 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4911 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4912 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4913 BPF_LD_MAP_FD(BPF_REG_1, 0),
4914 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4915 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4916 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4917 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4918 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4919 BPF_MOV64_IMM(BPF_REG_0, 0),
4920 BPF_EXIT_INSN(),
4921 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4922 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4923 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4924 BPF_MOV64_IMM(BPF_REG_0, 0),
4925 BPF_EXIT_INSN(),
4926 },
4927 .fixup_map2 = { 3 },
4928 .result = ACCEPT,
4929 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4930 },
4931 {
4932 "helper access to map: bounds check using s<=, bad access",
4933 .insns = {
4934 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4935 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4936 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4937 BPF_LD_MAP_FD(BPF_REG_1, 0),
4938 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4939 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4940 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4941 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4942 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4943 BPF_MOV64_IMM(BPF_REG_0, 0),
4944 BPF_EXIT_INSN(),
4945 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4946 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4947 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4948 BPF_MOV64_IMM(BPF_REG_0, 0),
4949 BPF_EXIT_INSN(),
4950 },
4951 .fixup_map2 = { 3 },
4952 .result = REJECT,
4953 .errstr = "R1 min value is negative",
4954 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4955 },
4956 {
Gianluca Borellof0318d02017-01-09 10:19:48 -08004957 "map element value is preserved across register spilling",
4958 .insns = {
4959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4961 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4962 BPF_LD_MAP_FD(BPF_REG_1, 0),
4963 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4965 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4968 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4969 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4971 BPF_EXIT_INSN(),
4972 },
4973 .fixup_map2 = { 3 },
4974 .errstr_unpriv = "R0 leaks addr",
4975 .result = ACCEPT,
4976 .result_unpriv = REJECT,
4977 },
4978 {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02004979 "map element value or null is marked on register spilling",
4980 .insns = {
4981 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4982 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4983 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4984 BPF_LD_MAP_FD(BPF_REG_1, 0),
4985 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4986 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4988 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4989 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4990 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4991 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4992 BPF_EXIT_INSN(),
4993 },
4994 .fixup_map2 = { 3 },
4995 .errstr_unpriv = "R0 leaks addr",
4996 .result = ACCEPT,
4997 .result_unpriv = REJECT,
4998 },
4999 {
5000 "map element value store of cleared call register",
5001 .insns = {
5002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5004 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5005 BPF_LD_MAP_FD(BPF_REG_1, 0),
5006 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5008 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5009 BPF_EXIT_INSN(),
5010 },
5011 .fixup_map2 = { 3 },
5012 .errstr_unpriv = "R1 !read_ok",
5013 .errstr = "R1 !read_ok",
5014 .result = REJECT,
5015 .result_unpriv = REJECT,
5016 },
5017 {
5018 "map element value with unaligned store",
5019 .insns = {
5020 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5022 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5023 BPF_LD_MAP_FD(BPF_REG_1, 0),
5024 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5025 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5026 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5027 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5028 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5029 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5030 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5031 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5032 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5033 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5035 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5036 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5037 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5038 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5040 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5041 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5042 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5043 BPF_EXIT_INSN(),
5044 },
5045 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005046 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005047 .result = ACCEPT,
5048 .result_unpriv = REJECT,
5049 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5050 },
5051 {
5052 "map element value with unaligned load",
5053 .insns = {
5054 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5056 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5057 BPF_LD_MAP_FD(BPF_REG_1, 0),
5058 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5059 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5060 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5061 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5062 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5063 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5064 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5065 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5066 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5067 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5069 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5070 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5071 BPF_EXIT_INSN(),
5072 },
5073 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005074 .errstr_unpriv = "R0 leaks addr",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005075 .result = ACCEPT,
5076 .result_unpriv = REJECT,
5077 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5078 },
5079 {
5080 "map element value illegal alu op, 1",
5081 .insns = {
5082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5084 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5085 BPF_LD_MAP_FD(BPF_REG_1, 0),
5086 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5088 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5089 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5090 BPF_EXIT_INSN(),
5091 },
5092 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005093 .errstr_unpriv = "R0 bitwise operator &= on pointer",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005094 .errstr = "invalid mem access 'inv'",
5095 .result = REJECT,
5096 .result_unpriv = REJECT,
5097 },
5098 {
5099 "map element value illegal alu op, 2",
5100 .insns = {
5101 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5103 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5104 BPF_LD_MAP_FD(BPF_REG_1, 0),
5105 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5106 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5107 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5108 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5109 BPF_EXIT_INSN(),
5110 },
5111 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005112 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005113 .errstr = "invalid mem access 'inv'",
5114 .result = REJECT,
5115 .result_unpriv = REJECT,
5116 },
5117 {
5118 "map element value illegal alu op, 3",
5119 .insns = {
5120 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5122 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5123 BPF_LD_MAP_FD(BPF_REG_1, 0),
5124 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5125 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5126 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5127 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5128 BPF_EXIT_INSN(),
5129 },
5130 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005131 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005132 .errstr = "invalid mem access 'inv'",
5133 .result = REJECT,
5134 .result_unpriv = REJECT,
5135 },
5136 {
5137 "map element value illegal alu op, 4",
5138 .insns = {
5139 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5141 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5142 BPF_LD_MAP_FD(BPF_REG_1, 0),
5143 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5144 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5145 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5146 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5147 BPF_EXIT_INSN(),
5148 },
5149 .fixup_map2 = { 3 },
5150 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5151 .errstr = "invalid mem access 'inv'",
5152 .result = REJECT,
5153 .result_unpriv = REJECT,
5154 },
5155 {
5156 "map element value illegal alu op, 5",
5157 .insns = {
5158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5160 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5161 BPF_LD_MAP_FD(BPF_REG_1, 0),
5162 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5163 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5164 BPF_MOV64_IMM(BPF_REG_3, 4096),
5165 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5167 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5168 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5169 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5170 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5171 BPF_EXIT_INSN(),
5172 },
5173 .fixup_map2 = { 3 },
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005174 .errstr = "R0 invalid mem access 'inv'",
5175 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005176 },
5177 {
5178 "map element value is preserved across register spilling",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005179 .insns = {
5180 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5182 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5183 BPF_LD_MAP_FD(BPF_REG_1, 0),
5184 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5185 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5187 offsetof(struct test_val, foo)),
5188 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5191 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5192 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5193 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5194 BPF_EXIT_INSN(),
5195 },
5196 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005197 .errstr_unpriv = "R0 leaks addr",
Gianluca Borellof0318d02017-01-09 10:19:48 -08005198 .result = ACCEPT,
5199 .result_unpriv = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005200 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Gianluca Borellof0318d02017-01-09 10:19:48 -08005201 },
Gianluca Borello06c1c042017-01-09 10:19:49 -08005202 {
5203 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5204 .insns = {
5205 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5206 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5207 BPF_MOV64_IMM(BPF_REG_0, 0),
5208 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5209 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5210 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5211 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5212 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5213 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5214 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5215 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5216 BPF_MOV64_IMM(BPF_REG_2, 16),
5217 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5218 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5219 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5220 BPF_MOV64_IMM(BPF_REG_4, 0),
5221 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5222 BPF_MOV64_IMM(BPF_REG_3, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5224 BPF_MOV64_IMM(BPF_REG_0, 0),
5225 BPF_EXIT_INSN(),
5226 },
5227 .result = ACCEPT,
5228 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5229 },
5230 {
5231 "helper access to variable memory: stack, bitwise AND, zero included",
5232 .insns = {
5233 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5235 BPF_MOV64_IMM(BPF_REG_2, 16),
5236 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5237 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5238 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5239 BPF_MOV64_IMM(BPF_REG_3, 0),
5240 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5241 BPF_EXIT_INSN(),
5242 },
5243 .errstr = "invalid stack type R1 off=-64 access_size=0",
5244 .result = REJECT,
5245 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5246 },
5247 {
5248 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5249 .insns = {
5250 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5252 BPF_MOV64_IMM(BPF_REG_2, 16),
5253 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5254 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5255 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5256 BPF_MOV64_IMM(BPF_REG_4, 0),
5257 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5258 BPF_MOV64_IMM(BPF_REG_3, 0),
5259 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5260 BPF_MOV64_IMM(BPF_REG_0, 0),
5261 BPF_EXIT_INSN(),
5262 },
5263 .errstr = "invalid stack type R1 off=-64 access_size=65",
5264 .result = REJECT,
5265 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5266 },
5267 {
5268 "helper access to variable memory: stack, JMP, correct bounds",
5269 .insns = {
5270 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5272 BPF_MOV64_IMM(BPF_REG_0, 0),
5273 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5274 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5275 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5276 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5277 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5278 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5279 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5280 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5281 BPF_MOV64_IMM(BPF_REG_2, 16),
5282 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5283 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5284 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5285 BPF_MOV64_IMM(BPF_REG_4, 0),
5286 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5287 BPF_MOV64_IMM(BPF_REG_3, 0),
5288 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5289 BPF_MOV64_IMM(BPF_REG_0, 0),
5290 BPF_EXIT_INSN(),
5291 },
5292 .result = ACCEPT,
5293 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5294 },
5295 {
5296 "helper access to variable memory: stack, JMP (signed), correct bounds",
5297 .insns = {
5298 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5299 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5300 BPF_MOV64_IMM(BPF_REG_0, 0),
5301 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5302 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5303 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5304 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5307 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5308 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5309 BPF_MOV64_IMM(BPF_REG_2, 16),
5310 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5311 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5312 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5313 BPF_MOV64_IMM(BPF_REG_4, 0),
5314 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5315 BPF_MOV64_IMM(BPF_REG_3, 0),
5316 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5317 BPF_MOV64_IMM(BPF_REG_0, 0),
5318 BPF_EXIT_INSN(),
5319 },
5320 .result = ACCEPT,
5321 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5322 },
5323 {
5324 "helper access to variable memory: stack, JMP, bounds + offset",
5325 .insns = {
5326 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5328 BPF_MOV64_IMM(BPF_REG_2, 16),
5329 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5330 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5331 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5332 BPF_MOV64_IMM(BPF_REG_4, 0),
5333 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5335 BPF_MOV64_IMM(BPF_REG_3, 0),
5336 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5337 BPF_MOV64_IMM(BPF_REG_0, 0),
5338 BPF_EXIT_INSN(),
5339 },
5340 .errstr = "invalid stack type R1 off=-64 access_size=65",
5341 .result = REJECT,
5342 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5343 },
5344 {
5345 "helper access to variable memory: stack, JMP, wrong max",
5346 .insns = {
5347 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5349 BPF_MOV64_IMM(BPF_REG_2, 16),
5350 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5351 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5352 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5353 BPF_MOV64_IMM(BPF_REG_4, 0),
5354 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5355 BPF_MOV64_IMM(BPF_REG_3, 0),
5356 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5357 BPF_MOV64_IMM(BPF_REG_0, 0),
5358 BPF_EXIT_INSN(),
5359 },
5360 .errstr = "invalid stack type R1 off=-64 access_size=65",
5361 .result = REJECT,
5362 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5363 },
5364 {
5365 "helper access to variable memory: stack, JMP, no max check",
5366 .insns = {
5367 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5369 BPF_MOV64_IMM(BPF_REG_2, 16),
5370 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5371 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5372 BPF_MOV64_IMM(BPF_REG_4, 0),
5373 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5374 BPF_MOV64_IMM(BPF_REG_3, 0),
5375 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5376 BPF_MOV64_IMM(BPF_REG_0, 0),
5377 BPF_EXIT_INSN(),
5378 },
Edward Creef65b1842017-08-07 15:27:12 +01005379 /* because max wasn't checked, signed min is negative */
5380 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005381 .result = REJECT,
5382 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5383 },
5384 {
5385 "helper access to variable memory: stack, JMP, no min check",
5386 .insns = {
5387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5389 BPF_MOV64_IMM(BPF_REG_2, 16),
5390 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5391 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5392 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5393 BPF_MOV64_IMM(BPF_REG_3, 0),
5394 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5395 BPF_MOV64_IMM(BPF_REG_0, 0),
5396 BPF_EXIT_INSN(),
5397 },
5398 .errstr = "invalid stack type R1 off=-64 access_size=0",
5399 .result = REJECT,
5400 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5401 },
5402 {
5403 "helper access to variable memory: stack, JMP (signed), no min check",
5404 .insns = {
5405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5407 BPF_MOV64_IMM(BPF_REG_2, 16),
5408 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5409 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5410 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5411 BPF_MOV64_IMM(BPF_REG_3, 0),
5412 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5413 BPF_MOV64_IMM(BPF_REG_0, 0),
5414 BPF_EXIT_INSN(),
5415 },
5416 .errstr = "R2 min value is negative",
5417 .result = REJECT,
5418 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5419 },
5420 {
5421 "helper access to variable memory: map, JMP, correct bounds",
5422 .insns = {
5423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5425 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5426 BPF_LD_MAP_FD(BPF_REG_1, 0),
5427 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5430 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5431 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5432 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5433 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5434 sizeof(struct test_val), 4),
5435 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005436 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005437 BPF_MOV64_IMM(BPF_REG_3, 0),
5438 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5439 BPF_MOV64_IMM(BPF_REG_0, 0),
5440 BPF_EXIT_INSN(),
5441 },
5442 .fixup_map2 = { 3 },
5443 .result = ACCEPT,
5444 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5445 },
5446 {
5447 "helper access to variable memory: map, JMP, wrong max",
5448 .insns = {
5449 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5451 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5452 BPF_LD_MAP_FD(BPF_REG_1, 0),
5453 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5454 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5455 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5456 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5457 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5458 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5459 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5460 sizeof(struct test_val) + 1, 4),
5461 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005462 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005463 BPF_MOV64_IMM(BPF_REG_3, 0),
5464 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5465 BPF_MOV64_IMM(BPF_REG_0, 0),
5466 BPF_EXIT_INSN(),
5467 },
5468 .fixup_map2 = { 3 },
5469 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5470 .result = REJECT,
5471 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5472 },
5473 {
5474 "helper access to variable memory: map adjusted, JMP, correct bounds",
5475 .insns = {
5476 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5478 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5479 BPF_LD_MAP_FD(BPF_REG_1, 0),
5480 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5481 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5484 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5485 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5486 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5487 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5488 sizeof(struct test_val) - 20, 4),
5489 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005490 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005491 BPF_MOV64_IMM(BPF_REG_3, 0),
5492 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5493 BPF_MOV64_IMM(BPF_REG_0, 0),
5494 BPF_EXIT_INSN(),
5495 },
5496 .fixup_map2 = { 3 },
5497 .result = ACCEPT,
5498 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5499 },
5500 {
5501 "helper access to variable memory: map adjusted, JMP, wrong max",
5502 .insns = {
5503 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5504 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5505 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5506 BPF_LD_MAP_FD(BPF_REG_1, 0),
5507 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5508 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5509 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5511 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5512 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5513 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5514 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5515 sizeof(struct test_val) - 19, 4),
5516 BPF_MOV64_IMM(BPF_REG_4, 0),
Daniel Borkmanna1502132017-07-21 00:00:23 +02005517 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005518 BPF_MOV64_IMM(BPF_REG_3, 0),
5519 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5520 BPF_MOV64_IMM(BPF_REG_0, 0),
5521 BPF_EXIT_INSN(),
5522 },
5523 .fixup_map2 = { 3 },
5524 .errstr = "R1 min value is outside of the array range",
5525 .result = REJECT,
5526 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5527 },
5528 {
Edward Creef65b1842017-08-07 15:27:12 +01005529 "helper access to variable memory: size = 0 allowed on NULL",
5530 .insns = {
5531 BPF_MOV64_IMM(BPF_REG_1, 0),
5532 BPF_MOV64_IMM(BPF_REG_2, 0),
5533 BPF_MOV64_IMM(BPF_REG_3, 0),
5534 BPF_MOV64_IMM(BPF_REG_4, 0),
5535 BPF_MOV64_IMM(BPF_REG_5, 0),
5536 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5537 BPF_EXIT_INSN(),
5538 },
5539 .result = ACCEPT,
5540 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5541 },
5542 {
Gianluca Borello06c1c042017-01-09 10:19:49 -08005543 "helper access to variable memory: size > 0 not allowed on NULL",
5544 .insns = {
5545 BPF_MOV64_IMM(BPF_REG_1, 0),
5546 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005547 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5548 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005549 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5550 BPF_MOV64_IMM(BPF_REG_3, 0),
5551 BPF_MOV64_IMM(BPF_REG_4, 0),
5552 BPF_MOV64_IMM(BPF_REG_5, 0),
5553 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5554 BPF_EXIT_INSN(),
5555 },
Edward Creef65b1842017-08-07 15:27:12 +01005556 .errstr = "R1 type=inv expected=fp",
Gianluca Borello06c1c042017-01-09 10:19:49 -08005557 .result = REJECT,
5558 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5559 },
5560 {
5561 "helper access to variable memory: size = 0 not allowed on != NULL",
5562 .insns = {
5563 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5565 BPF_MOV64_IMM(BPF_REG_2, 0),
5566 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5567 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5568 BPF_MOV64_IMM(BPF_REG_3, 0),
5569 BPF_MOV64_IMM(BPF_REG_4, 0),
5570 BPF_MOV64_IMM(BPF_REG_5, 0),
5571 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5572 BPF_EXIT_INSN(),
5573 },
5574 .errstr = "invalid stack type R1 off=-8 access_size=0",
5575 .result = REJECT,
5576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5577 },
5578 {
5579 "helper access to variable memory: 8 bytes leak",
5580 .insns = {
5581 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5582 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5583 BPF_MOV64_IMM(BPF_REG_0, 0),
5584 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5585 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5586 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5587 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5588 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5589 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5590 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5591 BPF_MOV64_IMM(BPF_REG_2, 0),
Daniel Borkmann3fadc802017-01-24 01:06:30 +01005592 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
Gianluca Borello06c1c042017-01-09 10:19:49 -08005594 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5596 BPF_MOV64_IMM(BPF_REG_3, 0),
5597 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5598 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5599 BPF_EXIT_INSN(),
5600 },
5601 .errstr = "invalid indirect read from stack off -64+32 size 64",
5602 .result = REJECT,
5603 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5604 },
5605 {
5606 "helper access to variable memory: 8 bytes no leak (init memory)",
5607 .insns = {
5608 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5609 BPF_MOV64_IMM(BPF_REG_0, 0),
5610 BPF_MOV64_IMM(BPF_REG_0, 0),
5611 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5612 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5613 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5614 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5615 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5616 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5617 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5618 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5620 BPF_MOV64_IMM(BPF_REG_2, 0),
5621 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5623 BPF_MOV64_IMM(BPF_REG_3, 0),
5624 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5625 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5626 BPF_EXIT_INSN(),
5627 },
5628 .result = ACCEPT,
5629 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5630 },
Josef Bacik29200c12017-02-03 16:25:23 -05005631 {
5632 "invalid and of negative number",
5633 .insns = {
5634 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5637 BPF_LD_MAP_FD(BPF_REG_1, 0),
5638 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5639 BPF_FUNC_map_lookup_elem),
5640 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
Edward Creef65b1842017-08-07 15:27:12 +01005641 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
Josef Bacik29200c12017-02-03 16:25:23 -05005642 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5643 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5644 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5645 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5646 offsetof(struct test_val, foo)),
5647 BPF_EXIT_INSN(),
5648 },
5649 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005650 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05005651 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005652 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Josef Bacik29200c12017-02-03 16:25:23 -05005653 },
5654 {
5655 "invalid range check",
5656 .insns = {
5657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5658 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5660 BPF_LD_MAP_FD(BPF_REG_1, 0),
5661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5662 BPF_FUNC_map_lookup_elem),
5663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5664 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5665 BPF_MOV64_IMM(BPF_REG_9, 1),
5666 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5667 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5668 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5669 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5670 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5671 BPF_MOV32_IMM(BPF_REG_3, 1),
5672 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5673 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5674 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5675 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5676 BPF_MOV64_REG(BPF_REG_0, 0),
5677 BPF_EXIT_INSN(),
5678 },
5679 .fixup_map2 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01005680 .errstr = "R0 max value is outside of the array range",
Josef Bacik29200c12017-02-03 16:25:23 -05005681 .result = REJECT,
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02005682 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005683 },
5684 {
5685 "map in map access",
5686 .insns = {
5687 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5688 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5690 BPF_LD_MAP_FD(BPF_REG_1, 0),
5691 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5692 BPF_FUNC_map_lookup_elem),
5693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5694 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5697 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5699 BPF_FUNC_map_lookup_elem),
5700 BPF_MOV64_REG(BPF_REG_0, 0),
5701 BPF_EXIT_INSN(),
5702 },
5703 .fixup_map_in_map = { 3 },
5704 .result = ACCEPT,
5705 },
5706 {
5707 "invalid inner map pointer",
5708 .insns = {
5709 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5712 BPF_LD_MAP_FD(BPF_REG_1, 0),
5713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5714 BPF_FUNC_map_lookup_elem),
5715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5716 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5717 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5721 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5722 BPF_FUNC_map_lookup_elem),
5723 BPF_MOV64_REG(BPF_REG_0, 0),
5724 BPF_EXIT_INSN(),
5725 },
5726 .fixup_map_in_map = { 3 },
5727 .errstr = "R1 type=inv expected=map_ptr",
Edward Creef65b1842017-08-07 15:27:12 +01005728 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07005729 .result = REJECT,
5730 },
5731 {
5732 "forgot null checking on the inner map pointer",
5733 .insns = {
5734 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5737 BPF_LD_MAP_FD(BPF_REG_1, 0),
5738 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5739 BPF_FUNC_map_lookup_elem),
5740 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5741 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5743 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5744 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5745 BPF_FUNC_map_lookup_elem),
5746 BPF_MOV64_REG(BPF_REG_0, 0),
5747 BPF_EXIT_INSN(),
5748 },
5749 .fixup_map_in_map = { 3 },
5750 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5751 .result = REJECT,
Daniel Borkmann614d0d72017-05-25 01:05:09 +02005752 },
5753 {
5754 "ld_abs: check calling conv, r1",
5755 .insns = {
5756 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5757 BPF_MOV64_IMM(BPF_REG_1, 0),
5758 BPF_LD_ABS(BPF_W, -0x200000),
5759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5760 BPF_EXIT_INSN(),
5761 },
5762 .errstr = "R1 !read_ok",
5763 .result = REJECT,
5764 },
5765 {
5766 "ld_abs: check calling conv, r2",
5767 .insns = {
5768 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5769 BPF_MOV64_IMM(BPF_REG_2, 0),
5770 BPF_LD_ABS(BPF_W, -0x200000),
5771 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5772 BPF_EXIT_INSN(),
5773 },
5774 .errstr = "R2 !read_ok",
5775 .result = REJECT,
5776 },
5777 {
5778 "ld_abs: check calling conv, r3",
5779 .insns = {
5780 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5781 BPF_MOV64_IMM(BPF_REG_3, 0),
5782 BPF_LD_ABS(BPF_W, -0x200000),
5783 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5784 BPF_EXIT_INSN(),
5785 },
5786 .errstr = "R3 !read_ok",
5787 .result = REJECT,
5788 },
5789 {
5790 "ld_abs: check calling conv, r4",
5791 .insns = {
5792 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5793 BPF_MOV64_IMM(BPF_REG_4, 0),
5794 BPF_LD_ABS(BPF_W, -0x200000),
5795 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5796 BPF_EXIT_INSN(),
5797 },
5798 .errstr = "R4 !read_ok",
5799 .result = REJECT,
5800 },
5801 {
5802 "ld_abs: check calling conv, r5",
5803 .insns = {
5804 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5805 BPF_MOV64_IMM(BPF_REG_5, 0),
5806 BPF_LD_ABS(BPF_W, -0x200000),
5807 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5808 BPF_EXIT_INSN(),
5809 },
5810 .errstr = "R5 !read_ok",
5811 .result = REJECT,
5812 },
5813 {
5814 "ld_abs: check calling conv, r7",
5815 .insns = {
5816 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5817 BPF_MOV64_IMM(BPF_REG_7, 0),
5818 BPF_LD_ABS(BPF_W, -0x200000),
5819 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5820 BPF_EXIT_INSN(),
5821 },
5822 .result = ACCEPT,
5823 },
5824 {
5825 "ld_ind: check calling conv, r1",
5826 .insns = {
5827 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5828 BPF_MOV64_IMM(BPF_REG_1, 1),
5829 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5830 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5831 BPF_EXIT_INSN(),
5832 },
5833 .errstr = "R1 !read_ok",
5834 .result = REJECT,
5835 },
5836 {
5837 "ld_ind: check calling conv, r2",
5838 .insns = {
5839 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5840 BPF_MOV64_IMM(BPF_REG_2, 1),
5841 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5843 BPF_EXIT_INSN(),
5844 },
5845 .errstr = "R2 !read_ok",
5846 .result = REJECT,
5847 },
5848 {
5849 "ld_ind: check calling conv, r3",
5850 .insns = {
5851 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5852 BPF_MOV64_IMM(BPF_REG_3, 1),
5853 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5854 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5855 BPF_EXIT_INSN(),
5856 },
5857 .errstr = "R3 !read_ok",
5858 .result = REJECT,
5859 },
5860 {
5861 "ld_ind: check calling conv, r4",
5862 .insns = {
5863 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5864 BPF_MOV64_IMM(BPF_REG_4, 1),
5865 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5867 BPF_EXIT_INSN(),
5868 },
5869 .errstr = "R4 !read_ok",
5870 .result = REJECT,
5871 },
5872 {
5873 "ld_ind: check calling conv, r5",
5874 .insns = {
5875 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5876 BPF_MOV64_IMM(BPF_REG_5, 1),
5877 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5879 BPF_EXIT_INSN(),
5880 },
5881 .errstr = "R5 !read_ok",
5882 .result = REJECT,
5883 },
5884 {
5885 "ld_ind: check calling conv, r7",
5886 .insns = {
5887 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5888 BPF_MOV64_IMM(BPF_REG_7, 1),
5889 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5891 BPF_EXIT_INSN(),
5892 },
5893 .result = ACCEPT,
5894 },
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005895 {
5896 "check bpf_perf_event_data->sample_period byte load permitted",
5897 .insns = {
5898 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02005899#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005900 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5901 offsetof(struct bpf_perf_event_data, sample_period)),
5902#else
5903 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5904 offsetof(struct bpf_perf_event_data, sample_period) + 7),
5905#endif
5906 BPF_EXIT_INSN(),
5907 },
5908 .result = ACCEPT,
5909 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5910 },
5911 {
5912 "check bpf_perf_event_data->sample_period half load permitted",
5913 .insns = {
5914 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02005915#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005916 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5917 offsetof(struct bpf_perf_event_data, sample_period)),
5918#else
5919 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5920 offsetof(struct bpf_perf_event_data, sample_period) + 6),
5921#endif
5922 BPF_EXIT_INSN(),
5923 },
5924 .result = ACCEPT,
5925 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5926 },
5927 {
5928 "check bpf_perf_event_data->sample_period word load permitted",
5929 .insns = {
5930 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02005931#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005932 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5933 offsetof(struct bpf_perf_event_data, sample_period)),
5934#else
5935 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5936 offsetof(struct bpf_perf_event_data, sample_period) + 4),
5937#endif
5938 BPF_EXIT_INSN(),
5939 },
5940 .result = ACCEPT,
5941 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5942 },
5943 {
5944 "check bpf_perf_event_data->sample_period dword load permitted",
5945 .insns = {
5946 BPF_MOV64_IMM(BPF_REG_0, 0),
5947 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5948 offsetof(struct bpf_perf_event_data, sample_period)),
5949 BPF_EXIT_INSN(),
5950 },
5951 .result = ACCEPT,
5952 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5953 },
5954 {
5955 "check skb->data half load not permitted",
5956 .insns = {
5957 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02005958#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005959 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5960 offsetof(struct __sk_buff, data)),
5961#else
5962 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5963 offsetof(struct __sk_buff, data) + 2),
5964#endif
5965 BPF_EXIT_INSN(),
5966 },
5967 .result = REJECT,
5968 .errstr = "invalid bpf_context access",
5969 },
5970 {
5971 "check skb->tc_classid half load not permitted for lwt prog",
5972 .insns = {
5973 BPF_MOV64_IMM(BPF_REG_0, 0),
Daniel Borkmann2c460622017-08-04 22:24:41 +02005974#if __BYTE_ORDER == __LITTLE_ENDIAN
Yonghong Song18f3d6b2017-06-13 15:52:14 -07005975 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5976 offsetof(struct __sk_buff, tc_classid)),
5977#else
5978 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5979 offsetof(struct __sk_buff, tc_classid) + 2),
5980#endif
5981 BPF_EXIT_INSN(),
5982 },
5983 .result = REJECT,
5984 .errstr = "invalid bpf_context access",
5985 .prog_type = BPF_PROG_TYPE_LWT_IN,
5986 },
Edward Creeb712296a2017-07-21 00:00:24 +02005987 {
5988 "bounds checks mixing signed and unsigned, positive bounds",
5989 .insns = {
5990 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5992 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5993 BPF_LD_MAP_FD(BPF_REG_1, 0),
5994 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5995 BPF_FUNC_map_lookup_elem),
5996 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5997 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5998 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5999 BPF_MOV64_IMM(BPF_REG_2, 2),
6000 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6001 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6002 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6003 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6004 BPF_MOV64_IMM(BPF_REG_0, 0),
6005 BPF_EXIT_INSN(),
6006 },
6007 .fixup_map1 = { 3 },
Edward Creeb712296a2017-07-21 00:00:24 +02006008 .errstr = "R0 min value is negative",
6009 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006010 },
6011 {
6012 "bounds checks mixing signed and unsigned",
6013 .insns = {
6014 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6015 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6016 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6017 BPF_LD_MAP_FD(BPF_REG_1, 0),
6018 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6019 BPF_FUNC_map_lookup_elem),
6020 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6021 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6022 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6023 BPF_MOV64_IMM(BPF_REG_2, -1),
6024 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6025 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6026 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6027 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6028 BPF_MOV64_IMM(BPF_REG_0, 0),
6029 BPF_EXIT_INSN(),
6030 },
6031 .fixup_map1 = { 3 },
Edward Creeb712296a2017-07-21 00:00:24 +02006032 .errstr = "R0 min value is negative",
6033 .result = REJECT,
Edward Creeb712296a2017-07-21 00:00:24 +02006034 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006035 {
6036 "bounds checks mixing signed and unsigned, variant 2",
6037 .insns = {
6038 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6039 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6041 BPF_LD_MAP_FD(BPF_REG_1, 0),
6042 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6043 BPF_FUNC_map_lookup_elem),
6044 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6045 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6046 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6047 BPF_MOV64_IMM(BPF_REG_2, -1),
6048 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6049 BPF_MOV64_IMM(BPF_REG_8, 0),
6050 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6051 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6052 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6053 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6054 BPF_MOV64_IMM(BPF_REG_0, 0),
6055 BPF_EXIT_INSN(),
6056 },
6057 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006058 .errstr = "R8 invalid mem access 'inv'",
6059 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006060 },
6061 {
6062 "bounds checks mixing signed and unsigned, variant 3",
6063 .insns = {
6064 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6067 BPF_LD_MAP_FD(BPF_REG_1, 0),
6068 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6069 BPF_FUNC_map_lookup_elem),
6070 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6071 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6072 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6073 BPF_MOV64_IMM(BPF_REG_2, -1),
6074 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6075 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6076 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6077 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6078 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6079 BPF_MOV64_IMM(BPF_REG_0, 0),
6080 BPF_EXIT_INSN(),
6081 },
6082 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006083 .errstr = "R8 invalid mem access 'inv'",
6084 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006085 },
6086 {
6087 "bounds checks mixing signed and unsigned, variant 4",
6088 .insns = {
6089 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6090 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6091 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6092 BPF_LD_MAP_FD(BPF_REG_1, 0),
6093 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6094 BPF_FUNC_map_lookup_elem),
6095 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6096 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6097 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6098 BPF_MOV64_IMM(BPF_REG_2, 1),
6099 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6100 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6101 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6102 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6103 BPF_MOV64_IMM(BPF_REG_0, 0),
6104 BPF_EXIT_INSN(),
6105 },
6106 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006107 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006108 },
6109 {
6110 "bounds checks mixing signed and unsigned, variant 5",
6111 .insns = {
6112 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6113 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6115 BPF_LD_MAP_FD(BPF_REG_1, 0),
6116 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6117 BPF_FUNC_map_lookup_elem),
6118 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6119 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6120 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6121 BPF_MOV64_IMM(BPF_REG_2, -1),
6122 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6123 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6124 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6125 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6126 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6127 BPF_MOV64_IMM(BPF_REG_0, 0),
6128 BPF_EXIT_INSN(),
6129 },
6130 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006131 .errstr = "R0 min value is negative",
Daniel Borkmann86412502017-07-21 00:00:25 +02006132 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006133 },
6134 {
6135 "bounds checks mixing signed and unsigned, variant 6",
6136 .insns = {
6137 BPF_MOV64_IMM(BPF_REG_2, 0),
6138 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6139 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6140 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6141 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6142 BPF_MOV64_IMM(BPF_REG_6, -1),
6143 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6144 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6145 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6146 BPF_MOV64_IMM(BPF_REG_5, 0),
6147 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6149 BPF_FUNC_skb_load_bytes),
6150 BPF_MOV64_IMM(BPF_REG_0, 0),
6151 BPF_EXIT_INSN(),
6152 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006153 .errstr = "R4 min value is negative, either use unsigned",
6154 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006155 },
6156 {
6157 "bounds checks mixing signed and unsigned, variant 7",
6158 .insns = {
6159 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6160 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6161 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6162 BPF_LD_MAP_FD(BPF_REG_1, 0),
6163 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6164 BPF_FUNC_map_lookup_elem),
6165 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6166 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6167 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6168 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6169 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6170 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6171 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6172 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6173 BPF_MOV64_IMM(BPF_REG_0, 0),
6174 BPF_EXIT_INSN(),
6175 },
6176 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006177 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006178 },
6179 {
6180 "bounds checks mixing signed and unsigned, variant 8",
6181 .insns = {
6182 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6183 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6185 BPF_LD_MAP_FD(BPF_REG_1, 0),
6186 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6187 BPF_FUNC_map_lookup_elem),
Daniel Borkmann86412502017-07-21 00:00:25 +02006188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6189 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6190 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6191 BPF_MOV64_IMM(BPF_REG_2, -1),
6192 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6193 BPF_MOV64_IMM(BPF_REG_0, 0),
6194 BPF_EXIT_INSN(),
6195 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6196 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6197 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6198 BPF_MOV64_IMM(BPF_REG_0, 0),
6199 BPF_EXIT_INSN(),
6200 },
6201 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006202 .errstr = "R0 min value is negative",
6203 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006204 },
6205 {
Edward Creef65b1842017-08-07 15:27:12 +01006206 "bounds checks mixing signed and unsigned, variant 9",
Daniel Borkmann86412502017-07-21 00:00:25 +02006207 .insns = {
6208 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6211 BPF_LD_MAP_FD(BPF_REG_1, 0),
6212 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6213 BPF_FUNC_map_lookup_elem),
6214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6215 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6216 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6217 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6218 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6219 BPF_MOV64_IMM(BPF_REG_0, 0),
6220 BPF_EXIT_INSN(),
6221 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6222 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6223 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6224 BPF_MOV64_IMM(BPF_REG_0, 0),
6225 BPF_EXIT_INSN(),
6226 },
6227 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006228 .result = ACCEPT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006229 },
6230 {
Edward Creef65b1842017-08-07 15:27:12 +01006231 "bounds checks mixing signed and unsigned, variant 10",
Daniel Borkmann86412502017-07-21 00:00:25 +02006232 .insns = {
6233 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6236 BPF_LD_MAP_FD(BPF_REG_1, 0),
6237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6238 BPF_FUNC_map_lookup_elem),
6239 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6240 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6241 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6242 BPF_MOV64_IMM(BPF_REG_2, 0),
6243 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6244 BPF_MOV64_IMM(BPF_REG_0, 0),
6245 BPF_EXIT_INSN(),
6246 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6247 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6248 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6249 BPF_MOV64_IMM(BPF_REG_0, 0),
6250 BPF_EXIT_INSN(),
6251 },
6252 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006253 .errstr = "R0 min value is negative",
6254 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006255 },
6256 {
Edward Creef65b1842017-08-07 15:27:12 +01006257 "bounds checks mixing signed and unsigned, variant 11",
Daniel Borkmann86412502017-07-21 00:00:25 +02006258 .insns = {
6259 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6260 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6262 BPF_LD_MAP_FD(BPF_REG_1, 0),
6263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6264 BPF_FUNC_map_lookup_elem),
6265 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6267 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6268 BPF_MOV64_IMM(BPF_REG_2, -1),
6269 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6270 /* Dead branch. */
6271 BPF_MOV64_IMM(BPF_REG_0, 0),
6272 BPF_EXIT_INSN(),
6273 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6274 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6275 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6276 BPF_MOV64_IMM(BPF_REG_0, 0),
6277 BPF_EXIT_INSN(),
6278 },
6279 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006280 .errstr = "R0 min value is negative",
6281 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006282 },
6283 {
Edward Creef65b1842017-08-07 15:27:12 +01006284 "bounds checks mixing signed and unsigned, variant 12",
Daniel Borkmann86412502017-07-21 00:00:25 +02006285 .insns = {
6286 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6289 BPF_LD_MAP_FD(BPF_REG_1, 0),
6290 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6291 BPF_FUNC_map_lookup_elem),
6292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6293 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6294 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6295 BPF_MOV64_IMM(BPF_REG_2, -6),
6296 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6297 BPF_MOV64_IMM(BPF_REG_0, 0),
6298 BPF_EXIT_INSN(),
6299 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6300 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6301 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6302 BPF_MOV64_IMM(BPF_REG_0, 0),
6303 BPF_EXIT_INSN(),
6304 },
6305 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006306 .errstr = "R0 min value is negative",
6307 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006308 },
6309 {
Edward Creef65b1842017-08-07 15:27:12 +01006310 "bounds checks mixing signed and unsigned, variant 13",
Daniel Borkmann86412502017-07-21 00:00:25 +02006311 .insns = {
6312 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6313 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6315 BPF_LD_MAP_FD(BPF_REG_1, 0),
6316 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6317 BPF_FUNC_map_lookup_elem),
6318 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6319 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6320 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6321 BPF_MOV64_IMM(BPF_REG_2, 2),
6322 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6323 BPF_MOV64_IMM(BPF_REG_7, 1),
6324 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6325 BPF_MOV64_IMM(BPF_REG_0, 0),
6326 BPF_EXIT_INSN(),
6327 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6328 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6330 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6331 BPF_MOV64_IMM(BPF_REG_0, 0),
6332 BPF_EXIT_INSN(),
6333 },
6334 .fixup_map1 = { 3 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006335 .errstr = "R0 min value is negative",
6336 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006337 },
6338 {
Edward Creef65b1842017-08-07 15:27:12 +01006339 "bounds checks mixing signed and unsigned, variant 14",
Daniel Borkmann86412502017-07-21 00:00:25 +02006340 .insns = {
6341 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6342 offsetof(struct __sk_buff, mark)),
6343 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6344 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6345 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6346 BPF_LD_MAP_FD(BPF_REG_1, 0),
6347 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6348 BPF_FUNC_map_lookup_elem),
6349 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6350 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6351 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6352 BPF_MOV64_IMM(BPF_REG_2, -1),
6353 BPF_MOV64_IMM(BPF_REG_8, 2),
6354 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6355 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6356 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6357 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6358 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6359 BPF_MOV64_IMM(BPF_REG_0, 0),
6360 BPF_EXIT_INSN(),
6361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6362 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6363 },
6364 .fixup_map1 = { 4 },
Daniel Borkmann86412502017-07-21 00:00:25 +02006365 .errstr = "R0 min value is negative",
6366 .result = REJECT,
Daniel Borkmann86412502017-07-21 00:00:25 +02006367 },
6368 {
Edward Creef65b1842017-08-07 15:27:12 +01006369 "bounds checks mixing signed and unsigned, variant 15",
Daniel Borkmann86412502017-07-21 00:00:25 +02006370 .insns = {
6371 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6372 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6374 BPF_LD_MAP_FD(BPF_REG_1, 0),
6375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6376 BPF_FUNC_map_lookup_elem),
6377 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6378 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6379 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6380 BPF_MOV64_IMM(BPF_REG_2, -6),
6381 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6382 BPF_MOV64_IMM(BPF_REG_0, 0),
6383 BPF_EXIT_INSN(),
6384 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6385 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6386 BPF_MOV64_IMM(BPF_REG_0, 0),
6387 BPF_EXIT_INSN(),
6388 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6389 BPF_MOV64_IMM(BPF_REG_0, 0),
6390 BPF_EXIT_INSN(),
6391 },
6392 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006393 .errstr_unpriv = "R0 pointer comparison prohibited",
Daniel Borkmann86412502017-07-21 00:00:25 +02006394 .errstr = "R0 min value is negative",
6395 .result = REJECT,
6396 .result_unpriv = REJECT,
6397 },
Edward Cree545722c2017-07-21 14:36:57 +01006398 {
Edward Creef65b1842017-08-07 15:27:12 +01006399 "subtraction bounds (map value) variant 1",
Edward Cree545722c2017-07-21 14:36:57 +01006400 .insns = {
6401 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6402 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6404 BPF_LD_MAP_FD(BPF_REG_1, 0),
6405 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6406 BPF_FUNC_map_lookup_elem),
6407 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6408 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6409 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6410 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6411 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6412 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6413 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6414 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6415 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6416 BPF_EXIT_INSN(),
6417 BPF_MOV64_IMM(BPF_REG_0, 0),
6418 BPF_EXIT_INSN(),
6419 },
6420 .fixup_map1 = { 3 },
Edward Creef65b1842017-08-07 15:27:12 +01006421 .errstr = "R0 max value is outside of the array range",
6422 .result = REJECT,
6423 },
6424 {
6425 "subtraction bounds (map value) variant 2",
6426 .insns = {
6427 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6428 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6430 BPF_LD_MAP_FD(BPF_REG_1, 0),
6431 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6432 BPF_FUNC_map_lookup_elem),
6433 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6434 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6435 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6436 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6437 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6438 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6439 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6440 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6441 BPF_EXIT_INSN(),
6442 BPF_MOV64_IMM(BPF_REG_0, 0),
6443 BPF_EXIT_INSN(),
6444 },
6445 .fixup_map1 = { 3 },
Edward Cree545722c2017-07-21 14:36:57 +01006446 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6447 .result = REJECT,
Edward Cree545722c2017-07-21 14:36:57 +01006448 },
Edward Cree69c4e8a2017-08-07 15:29:51 +01006449 {
6450 "variable-offset ctx access",
6451 .insns = {
6452 /* Get an unknown value */
6453 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6454 /* Make it small and 4-byte aligned */
6455 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6456 /* add it to skb. We now have either &skb->len or
6457 * &skb->pkt_type, but we don't know which
6458 */
6459 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6460 /* dereference it */
6461 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6462 BPF_EXIT_INSN(),
6463 },
6464 .errstr = "variable ctx access var_off=(0x0; 0x4)",
6465 .result = REJECT,
6466 .prog_type = BPF_PROG_TYPE_LWT_IN,
6467 },
6468 {
6469 "variable-offset stack access",
6470 .insns = {
6471 /* Fill the top 8 bytes of the stack */
6472 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6473 /* Get an unknown value */
6474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6475 /* Make it small and 4-byte aligned */
6476 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6477 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6478 /* add it to fp. We now have either fp-4 or fp-8, but
6479 * we don't know which
6480 */
6481 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6482 /* dereference it */
6483 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6484 BPF_EXIT_INSN(),
6485 },
6486 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6487 .result = REJECT,
6488 .prog_type = BPF_PROG_TYPE_LWT_IN,
6489 },
Edward Creed893dc22017-08-23 15:09:46 +01006490 {
6491 "liveness pruning and write screening",
6492 .insns = {
6493 /* Get an unknown value */
6494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6495 /* branch conditions teach us nothing about R2 */
6496 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6497 BPF_MOV64_IMM(BPF_REG_0, 0),
6498 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6499 BPF_MOV64_IMM(BPF_REG_0, 0),
6500 BPF_EXIT_INSN(),
6501 },
6502 .errstr = "R0 !read_ok",
6503 .result = REJECT,
6504 .prog_type = BPF_PROG_TYPE_LWT_IN,
6505 },
Alexei Starovoitovdf20cb72017-08-23 15:10:26 +01006506 {
6507 "varlen_map_value_access pruning",
6508 .insns = {
6509 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6510 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6511 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6512 BPF_LD_MAP_FD(BPF_REG_1, 0),
6513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6514 BPF_FUNC_map_lookup_elem),
6515 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6516 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6517 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
6518 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
6519 BPF_MOV32_IMM(BPF_REG_1, 0),
6520 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
6521 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6522 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
6523 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6524 offsetof(struct test_val, foo)),
6525 BPF_EXIT_INSN(),
6526 },
6527 .fixup_map2 = { 3 },
6528 .errstr_unpriv = "R0 leaks addr",
6529 .errstr = "R0 unbounded memory access",
6530 .result_unpriv = REJECT,
6531 .result = REJECT,
6532 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6533 },
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006534};
6535
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006536static int probe_filter_length(const struct bpf_insn *fp)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006537{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006538 int len;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006539
6540 for (len = MAX_INSNS - 1; len > 0; --len)
6541 if (fp[len].code != 0 || fp[len].imm != 0)
6542 break;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006543 return len + 1;
6544}
6545
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006546static int create_map(uint32_t size_value, uint32_t max_elem)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006547{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006548 int fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006549
Mickaël Salaünf4874d02017-02-10 00:21:43 +01006550 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006551 size_value, max_elem, BPF_F_NO_PREALLOC);
6552 if (fd < 0)
6553 printf("Failed to create hash map '%s'!\n", strerror(errno));
Alexei Starovoitovbf508872015-10-07 22:23:23 -07006554
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006555 return fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07006556}
6557
6558static int create_prog_array(void)
6559{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006560 int fd;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07006561
Mickaël Salaünf4874d02017-02-10 00:21:43 +01006562 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006563 sizeof(int), 4, 0);
6564 if (fd < 0)
6565 printf("Failed to create prog array '%s'!\n", strerror(errno));
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006566
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006567 return fd;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006568}
6569
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006570static int create_map_in_map(void)
6571{
6572 int inner_map_fd, outer_map_fd;
6573
6574 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
6575 sizeof(int), 1, 0);
6576 if (inner_map_fd < 0) {
6577 printf("Failed to create array '%s'!\n", strerror(errno));
6578 return inner_map_fd;
6579 }
6580
6581 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
6582 sizeof(int), inner_map_fd, 1, 0);
6583 if (outer_map_fd < 0)
6584 printf("Failed to create array of maps '%s'!\n",
6585 strerror(errno));
6586
6587 close(inner_map_fd);
6588
6589 return outer_map_fd;
6590}
6591
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006592static char bpf_vlog[32768];
6593
6594static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006595 int *map_fds)
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006596{
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006597 int *fixup_map1 = test->fixup_map1;
6598 int *fixup_map2 = test->fixup_map2;
6599 int *fixup_prog = test->fixup_prog;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006600 int *fixup_map_in_map = test->fixup_map_in_map;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006601
6602 /* Allocating HTs with 1 elem is fine here, since we only test
6603 * for verifier and not do a runtime lookup, so the only thing
6604 * that really matters is value size in this case.
6605 */
6606 if (*fixup_map1) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006607 map_fds[0] = create_map(sizeof(long long), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006608 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006609 prog[*fixup_map1].imm = map_fds[0];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006610 fixup_map1++;
6611 } while (*fixup_map1);
6612 }
6613
6614 if (*fixup_map2) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006615 map_fds[1] = create_map(sizeof(struct test_val), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006616 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006617 prog[*fixup_map2].imm = map_fds[1];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006618 fixup_map2++;
6619 } while (*fixup_map2);
6620 }
6621
6622 if (*fixup_prog) {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006623 map_fds[2] = create_prog_array();
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006624 do {
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006625 prog[*fixup_prog].imm = map_fds[2];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006626 fixup_prog++;
6627 } while (*fixup_prog);
6628 }
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006629
6630 if (*fixup_map_in_map) {
6631 map_fds[3] = create_map_in_map();
6632 do {
6633 prog[*fixup_map_in_map].imm = map_fds[3];
6634 fixup_map_in_map++;
6635 } while (*fixup_map_in_map);
6636 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006637}
6638
6639static void do_test_single(struct bpf_test *test, bool unpriv,
6640 int *passes, int *errors)
6641{
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006642 int fd_prog, expected_ret, reject_from_alignment;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006643 struct bpf_insn *prog = test->insns;
6644 int prog_len = probe_filter_length(prog);
6645 int prog_type = test->prog_type;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006646 int map_fds[MAX_NR_MAPS];
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006647 const char *expected_err;
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006648 int i;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006649
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006650 for (i = 0; i < MAX_NR_MAPS; i++)
6651 map_fds[i] = -1;
6652
6653 do_test_fixup(test, prog, map_fds);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006654
Daniel Borkmann614d0d72017-05-25 01:05:09 +02006655 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
6656 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
Daniel Borkmannd6554902017-07-21 00:00:22 +02006657 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006658
6659 expected_ret = unpriv && test->result_unpriv != UNDEF ?
6660 test->result_unpriv : test->result;
6661 expected_err = unpriv && test->errstr_unpriv ?
6662 test->errstr_unpriv : test->errstr;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006663
6664 reject_from_alignment = fd_prog < 0 &&
6665 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
6666 strstr(bpf_vlog, "Unknown alignment.");
6667#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6668 if (reject_from_alignment) {
6669 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6670 strerror(errno));
6671 goto fail_log;
6672 }
6673#endif
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006674 if (expected_ret == ACCEPT) {
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006675 if (fd_prog < 0 && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006676 printf("FAIL\nFailed to load prog '%s'!\n",
6677 strerror(errno));
6678 goto fail_log;
6679 }
6680 } else {
6681 if (fd_prog >= 0) {
6682 printf("FAIL\nUnexpected success to load!\n");
6683 goto fail_log;
6684 }
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006685 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006686 printf("FAIL\nUnexpected error message!\n");
6687 goto fail_log;
6688 }
6689 }
6690
6691 (*passes)++;
Daniel Borkmann02ea80b2017-03-31 02:24:04 +02006692 printf("OK%s\n", reject_from_alignment ?
6693 " (NOTE: reject due to unknown alignment)" : "");
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006694close_fds:
6695 close(fd_prog);
Martin KaFai Laufb30d4b2017-03-22 10:00:35 -07006696 for (i = 0; i < MAX_NR_MAPS; i++)
6697 close(map_fds[i]);
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006698 sched_yield();
6699 return;
6700fail_log:
6701 (*errors)++;
6702 printf("%s", bpf_vlog);
6703 goto close_fds;
6704}
6705
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006706static bool is_admin(void)
6707{
6708 cap_t caps;
6709 cap_flag_value_t sysadmin = CAP_CLEAR;
6710 const cap_value_t cap_val = CAP_SYS_ADMIN;
6711
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08006712#ifdef CAP_IS_SUPPORTED
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006713 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
6714 perror("cap_get_flag");
6715 return false;
6716 }
Alexei Starovoitov1da8ac72017-03-10 22:05:55 -08006717#endif
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006718 caps = cap_get_proc();
6719 if (!caps) {
6720 perror("cap_get_proc");
6721 return false;
6722 }
6723 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
6724 perror("cap_get_flag");
6725 if (cap_free(caps))
6726 perror("cap_free");
6727 return (sysadmin == CAP_SET);
6728}
6729
6730static int set_admin(bool admin)
6731{
6732 cap_t caps;
6733 const cap_value_t cap_val = CAP_SYS_ADMIN;
6734 int ret = -1;
6735
6736 caps = cap_get_proc();
6737 if (!caps) {
6738 perror("cap_get_proc");
6739 return -1;
6740 }
6741 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
6742 admin ? CAP_SET : CAP_CLEAR)) {
6743 perror("cap_set_flag");
6744 goto out;
6745 }
6746 if (cap_set_proc(caps)) {
6747 perror("cap_set_proc");
6748 goto out;
6749 }
6750 ret = 0;
6751out:
6752 if (cap_free(caps))
6753 perror("cap_free");
6754 return ret;
6755}
6756
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006757static int do_test(bool unpriv, unsigned int from, unsigned int to)
6758{
6759 int i, passes = 0, errors = 0;
6760
6761 for (i = from; i < to; i++) {
6762 struct bpf_test *test = &tests[i];
6763
6764 /* Program types that are not supported by non-root we
6765 * skip right away.
6766 */
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006767 if (!test->prog_type) {
6768 if (!unpriv)
6769 set_admin(false);
6770 printf("#%d/u %s ", i, test->descr);
6771 do_test_single(test, true, &passes, &errors);
6772 if (!unpriv)
6773 set_admin(true);
6774 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006775
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006776 if (!unpriv) {
6777 printf("#%d/p %s ", i, test->descr);
6778 do_test_single(test, false, &passes, &errors);
6779 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006780 }
6781
6782 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
Jesper Dangaard Brouerefe5f9c2017-06-13 15:17:19 +02006783 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006784}
6785
6786int main(int argc, char **argv)
6787{
6788 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
6789 struct rlimit rlim = { 1 << 20, 1 << 20 };
6790 unsigned int from = 0, to = ARRAY_SIZE(tests);
Mickaël Salaünd02d8982017-02-10 00:21:37 +01006791 bool unpriv = !is_admin();
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006792
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006793 if (argc == 3) {
6794 unsigned int l = atoi(argv[argc - 2]);
6795 unsigned int u = atoi(argv[argc - 1]);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006796
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006797 if (l < to && u < to) {
6798 from = l;
6799 to = u + 1;
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006800 }
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006801 } else if (argc == 2) {
6802 unsigned int t = atoi(argv[argc - 1]);
Alexei Starovoitovbf508872015-10-07 22:23:23 -07006803
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006804 if (t < to) {
6805 from = t;
6806 to = t + 1;
Alexei Starovoitovbf508872015-10-07 22:23:23 -07006807 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006808 }
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006809
Daniel Borkmann5aa5bd12016-10-17 14:28:36 +02006810 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
6811 return do_test(unpriv, from, to);
Alexei Starovoitov3c731eb2014-09-26 00:17:07 -07006812}