blob: 9babb3fef8e2e16b8e5e6beb5157a877f027e5a6 [file] [log] [blame]
/*
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
#include <endian.h>
#include <asm/types.h>
#include <linux/types.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stddef.h>
#include <stdbool.h>
#include <sched.h>
#include <sys/capability.h>
#include <sys/resource.h>
#include <linux/unistd.h>
#include <linux/filter.h>
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#ifdef HAVE_GENHDR
# include "autoconf.h"
#else
# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
# endif
#endif
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
#define MAX_INSNS 512
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 4
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
int fixup_map1[MAX_FIXUPS];
int fixup_map2[MAX_FIXUPS];
int fixup_prog[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
enum {
UNDEF,
ACCEPT,
REJECT
} result, result_unpriv;
enum bpf_prog_type prog_type;
uint8_t flags;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
* actually the end of the structure.
*/
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
static struct bpf_test tests[] = {
{
"add+sub+mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_2, 3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unreachable",
.insns = {
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"unreachable2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"out of range jump",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"out of range jump2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -2),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"test1 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test2 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"test3 ld_imm64",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 0),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test4 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test5 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test6 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test7 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"test8 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "uses reserved fields",
.result = REJECT,
},
{
"test9 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, 0, 1, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test10 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test11 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"test12 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "not pointing to valid bpf_map",
.result = REJECT,
},
{
"test13 ld_imm64",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"arsh32 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_ARSH not supported for 32 bit ALU",
},
{
"arsh32 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_ARSH not supported for 32 bit ALU",
},
{
"arsh64 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"arsh64 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"loop (back-edge)",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"loop2 (back-edge)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"read uninitialized register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"read invalid register",
.insns = {
BPF_MOV64_REG(BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"program doesn't init R0 before exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"program doesn't init R0 before exit in all branches",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"stack out of bounds",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack",
.result = REJECT,
},
{
"invalid call insn1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid call insn2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid function call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
"uninitialized stack1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 2 },
.errstr = "invalid indirect read from stack",
.result = REJECT,
},
{
"uninitialized stack2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.errstr = "invalid read from stack",
.result = REJECT,
},
{
"invalid fp arithmetic",
/* If this gets ever changed, make sure JITs can deal with it. */
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 subtraction from stack pointer",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT,
},
{
"non-invalid fp arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"non-invalid argument register",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* fill it back into R2 */
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
/* should be able to access R0 = *(R2 + 8) */
/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
},
{
"check valid spill/fill, skb mark",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = ACCEPT,
},
{
"check corrupted spill/fill",
.insns = {
/* spill R1(ctx) into stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* mess up with R1 pointer on stack */
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
/* fill back into R0 should fail */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.errstr = "corrupted spill",
.result = REJECT,
},
{
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"invalid dst register in STX",
.insns = {
BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid dst register in ST",
.insns = {
BPF_ST_MEM(BPF_B, 14, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid src register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
BPF_EXIT_INSN(),
},
.errstr = "R12 is invalid",
.result = REJECT,
},
{
"invalid dst register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R11 is invalid",
.result = REJECT,
},
{
"junk insn",
.insns = {
BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM",
.result = REJECT,
},
{
"junk insn2",
.insns = {
BPF_RAW_INSN(1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_LDX uses reserved fields",
.result = REJECT,
},
{
"junk insn3",
.insns = {
BPF_RAW_INSN(-1, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn4",
.insns = {
BPF_RAW_INSN(-1, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_ALU opcode f0",
.result = REJECT,
},
{
"junk insn5",
.insns = {
BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ALU uses reserved fields",
.result = REJECT,
},
{
"misaligned read from stack",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
BPF_EXIT_INSN(),
},
.errstr = "misaligned stack access",
.result = REJECT,
},
{
"invalid map_fd for function call",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.errstr = "fd 0 is not pointing to valid bpf_map",
.result = REJECT,
},
{
"don't check return value before access",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access 'map_value_or_null'",
.result = REJECT,
},
{
"access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "misaligned value access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"sometimes access memory with incorrect alignment",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 invalid mem access",
.errstr_unpriv = "R0 leaks addr",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"jump test 1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 14),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 3",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 19),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_JMP_IMM(BPF_JA, 0, 0, 15),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
BPF_JMP_IMM(BPF_JA, 0, 0, 11),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
BPF_JMP_IMM(BPF_JA, 0, 0, 7),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 24 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 4",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"jump test 5",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"access skb fields ok",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, queue_mapping)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, protocol)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_present)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, napi_id)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"access skb fields bad1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"access skb fields bad2",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad3",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
},
.fixup_map1 = { 6 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"access skb fields bad4",
.insns = {
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
},
.fixup_map1 = { 7 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
},
{
"invalid access __sk_buff family",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, family)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip4)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff local_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip4)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff local_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_port)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"invalid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_port)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"valid access __sk_buff family",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, family)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff local_ip4",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff local_ip6",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, remote_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access __sk_buff remote_port",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, local_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"invalid access of tc_classid for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"invalid access of skb->mark for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"check skb->mark is not writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
.errstr = "invalid bpf_context access",
},
{
"check skb->tc_index is writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"check skb->priority is writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, priority)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"direct packet read for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"direct packet write for SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"overlapping checks for direct packet access SK_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"check skb->mark is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check skb->tc_index is not writeable by sockets",
.insns = {
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "R1 leaks addr",
.result = REJECT,
},
{
"check cb access: byte",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"__sk_buff->hash, offset 0, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"__sk_buff->tc_index, offset 3, byte store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 3),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash byte load not permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load not permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash byte load not permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 3),
#else
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: byte, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: half",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3]) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: half, unaligned",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check __sk_buff->hash, offset 0, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, hash)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->tc_index, offset 2, half store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index) + 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check skb->hash half load permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#endif
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check skb->hash half load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
#else
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
#endif
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: half, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check cb access: word",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: word, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 1),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 2),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: word, unaligned 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4]) + 3),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check cb access: double, unaligned 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[1])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double, unaligned 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr = "misaligned context access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"check cb access: double, oob 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, oob 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw store not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check __sk_buff->ifindex dw load not permitted",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, ifindex)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"check cb access: double, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0]) + 256),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.errstr_unpriv = "",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_ACT,
},
{
"write skb fields from socket prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
},
{
"write skb fields from tc_cls_act prog",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "",
.result_unpriv = REJECT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"PTR_TO_STACK store/load",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"PTR_TO_STACK store/load - bad alignment on off",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
},
{
"PTR_TO_STACK store/load - out of bounds low",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=-79992 size=8",
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
},
{
"PTR_TO_STACK store/load - out of bounds high",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off=0 size=8",
},
{
"unpriv: return pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr",
},
{
"unpriv: add const to pointer",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unpriv: add pointer to pointer",
.insns = {
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer += pointer",
},
{
"unpriv: neg pointer",
.insns = {
BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer arithmetic",
},
{
"unpriv: cmp pointer with const",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 pointer comparison",
},
{
"unpriv: cmp pointer with pointer",
.insns = {
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R10 pointer comparison",
},
{
"unpriv: check that printk is disallowed",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_trace_printk),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "unknown func bpf_trace_printk#6",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to helper function",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_update_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R4 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: indirectly pass pointer on stack to helper function",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
},
{
"unpriv: mangle pointer on stack 1",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: mangle pointer on stack 2",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: read pointer from stack in small chunks",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid size",
.result = REJECT,
},
{
"unpriv: write pointer into ctx",
.insns = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 leaks addr",
.result_unpriv = REJECT,
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
"unpriv: spill/fill of ctx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"unpriv: spill/fill of ctx 2",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 3",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=fp expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of ctx 4",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
BPF_REG_0, -8, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_hash_recalc),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers stx",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 42),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"unpriv: spill/fill of different pointers ldx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
-(__s32)offsetof(struct bpf_perf_event_data,
sample_period) - 8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
offsetof(struct bpf_perf_event_data,
sample_period)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "same insn cannot be used with different pointers",
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"unpriv: write pointer into map elem value",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: partial copy of pointer",
.insns = {
BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 partial copy",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: pass pointer to tail_call",
.insns = {
BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.errstr_unpriv = "R3 leaks addr into helper",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: cmp map pointer with zero",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 1 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: write into frame pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
{
"unpriv: spill/fill frame pointer",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "frame pointer is read only",
.result = REJECT,
},
{
"unpriv: cmp of frame pointer",
.insns = {
BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R10 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: adding of fp",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
},
{
"unpriv: cmp of stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R2 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"runtime/jit: pass negative index to tail_call",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, -1),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 1 },
.result = ACCEPT,
},
{
"runtime/jit: pass > 32bit index to tail_call",
.insns = {
BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_tail_call),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_prog = { 2 },
.result = ACCEPT,
},
{
"stack pointer arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_ST_MEM(0, BPF_REG_2, 4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"raw_stack: no skb_load_bytes",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
/* Call to skb_load_bytes() omitted. */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -8+0 size 8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R4 min value is negative",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, negative len 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, ~0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R4 min value is negative",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, zero len",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs around bounds",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
offsetof(struct __sk_buff, pkt_type)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, spilled regs + data",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
offsetof(struct __sk_buff, priority)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-513 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-1 access_size=8",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R4 min value is negative",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, invalid access 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack type R3 off=-512 access_size=0",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"raw_stack: skb_load_bytes, large access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
BPF_MOV64_IMM(BPF_REG_4, 512),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_skb_load_bytes),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"context stores via ST",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ST stores into R1 context is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"context stores via XADD",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_XADD stores into R1 context is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access off=76",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
"direct packet access: test4 (write)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test5 (pkt_end >= reg, good access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test6 (pkt_end >= reg, bad access)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test7 (pkt_end >= reg, both accesses)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test8 (double test, variant 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test10 (write invalid)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid access to packet"