blob: 359b6f5d3d90c8cbefa2307cc8c6df05e4918c57 [file] [log] [blame]
Daniel Mack30070982016-11-23 16:52:26 +01001#ifndef _BPF_CGROUP_H
2#define _BPF_CGROUP_H
3
Daniel Mack30070982016-11-23 16:52:26 +01004#include <linux/jump_label.h>
5#include <uapi/linux/bpf.h>
6
7struct sock;
8struct cgroup;
9struct sk_buff;
Lawrence Brakmo40304b22017-06-30 20:02:40 -070010struct bpf_sock_ops_kern;
Daniel Mack30070982016-11-23 16:52:26 +010011
12#ifdef CONFIG_CGROUP_BPF
13
14extern struct static_key_false cgroup_bpf_enabled_key;
15#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
16
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070017struct bpf_prog_list {
18 struct list_head node;
19 struct bpf_prog *prog;
20};
21
22struct bpf_prog_array;
23
Daniel Mack30070982016-11-23 16:52:26 +010024struct cgroup_bpf {
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070025 /* array of effective progs in this cgroup */
26 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
27
28 /* attached progs to this cgroup and attach flags
29 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
30 * have either zero or one element
31 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
Daniel Mack30070982016-11-23 16:52:26 +010032 */
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070033 struct list_head progs[MAX_BPF_ATTACH_TYPE];
34 u32 flags[MAX_BPF_ATTACH_TYPE];
35
36 /* temp storage for effective prog array used by prog_attach/detach */
37 struct bpf_prog_array __rcu *inactive;
Daniel Mack30070982016-11-23 16:52:26 +010038};
39
40void cgroup_bpf_put(struct cgroup *cgrp);
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070041int cgroup_bpf_inherit(struct cgroup *cgrp);
Daniel Mack30070982016-11-23 16:52:26 +010042
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070043int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
44 enum bpf_attach_type type, u32 flags);
45int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
46 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070047int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
48 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010049
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -070050/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
51int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
52 enum bpf_attach_type type, u32 flags);
53int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
54 enum bpf_attach_type type, u32 flags);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -070055int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
56 union bpf_attr __user *uattr);
Daniel Mack30070982016-11-23 16:52:26 +010057
David Ahernb2cd1252016-12-01 08:48:03 -080058int __cgroup_bpf_run_filter_skb(struct sock *sk,
59 struct sk_buff *skb,
60 enum bpf_attach_type type);
Daniel Mack30070982016-11-23 16:52:26 +010061
David Ahern610236582016-12-01 08:48:04 -080062int __cgroup_bpf_run_filter_sk(struct sock *sk,
63 enum bpf_attach_type type);
64
Lawrence Brakmo40304b22017-06-30 20:02:40 -070065int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
66 struct bpf_sock_ops_kern *sock_ops,
67 enum bpf_attach_type type);
68
David Ahernb2cd1252016-12-01 08:48:03 -080069/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
70#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
71({ \
72 int __ret = 0; \
73 if (cgroup_bpf_enabled) \
74 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
75 BPF_CGROUP_INET_INGRESS); \
76 \
77 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010078})
79
David Ahernb2cd1252016-12-01 08:48:03 -080080#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
81({ \
82 int __ret = 0; \
83 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
84 typeof(sk) __sk = sk_to_full_sk(sk); \
85 if (sk_fullsock(__sk)) \
86 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
87 BPF_CGROUP_INET_EGRESS); \
88 } \
89 __ret; \
Daniel Mack30070982016-11-23 16:52:26 +010090})
91
David Ahern610236582016-12-01 08:48:04 -080092#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
93({ \
94 int __ret = 0; \
95 if (cgroup_bpf_enabled && sk) { \
96 __ret = __cgroup_bpf_run_filter_sk(sk, \
97 BPF_CGROUP_INET_SOCK_CREATE); \
98 } \
99 __ret; \
100})
101
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700102#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
103({ \
104 int __ret = 0; \
105 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
106 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
WANG Congdf39a9f2017-07-17 11:42:55 -0700107 if (__sk && sk_fullsock(__sk)) \
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700108 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
109 sock_ops, \
110 BPF_CGROUP_SOCK_OPS); \
111 } \
112 __ret; \
113})
Daniel Mack30070982016-11-23 16:52:26 +0100114#else
115
116struct cgroup_bpf {};
117static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -0700118static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
Daniel Mack30070982016-11-23 16:52:26 +0100119
120#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
121#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
David Ahern610236582016-12-01 08:48:04 -0800122#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Lawrence Brakmo40304b22017-06-30 20:02:40 -0700123#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
Daniel Mack30070982016-11-23 16:52:26 +0100124
125#endif /* CONFIG_CGROUP_BPF */
126
127#endif /* _BPF_CGROUP_H */