blob: d5e7a1af346fe06b6ad8f8e091a3ad8a77e7d399 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
Cong Wang7aa00452017-10-26 18:24:28 -07006#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <net/sch_generic.h>
8#include <net/act_api.h>
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01009#include <net/flow_offload.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Paolo Abenicd11b1642018-07-30 14:30:44 +020011/* TC action not accessible from user space */
12#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/* Basic packet classifier frontend definitions. */
15
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000016struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 int stop;
18 int skip;
19 int count;
Vlad Buslov6676d5e2019-02-25 17:38:31 +020020 bool nonempty;
Vlad Buslov01683a12018-07-09 13:29:11 +030021 unsigned long cookie;
WANG Cong8113c092017-08-04 21:31:43 -070022 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023};
24
Joe Perches5c152572013-07-30 22:47:13 -070025int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Jiri Pirko8c4083b2017-10-19 15:50:29 +020028enum tcf_block_binder_type {
29 TCF_BLOCK_BINDER_TYPE_UNSPEC,
Jiri Pirko6e40cf22017-10-19 15:50:30 +020030 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
31 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020032};
33
34struct tcf_block_ext_info {
35 enum tcf_block_binder_type binder_type;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010036 tcf_chain_head_change_t *chain_head_change;
37 void *chain_head_change_priv;
Jiri Pirko48617382018-01-17 11:46:46 +010038 u32 block_index;
Jiri Pirko8c4083b2017-10-19 15:50:29 +020039};
40
Jiri Pirkoacb67442017-10-19 15:50:31 +020041struct tcf_block_cb;
Cong Wangaaa908f2018-05-23 15:26:53 -070042bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
Jiri Pirkoacb67442017-10-19 15:50:31 +020043
Jiri Pirko8ae70032017-02-15 11:57:50 +010044#ifdef CONFIG_NET_CLS
Jiri Pirko1f3ed382018-07-27 09:45:05 +020045struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 u32 chain_index);
Jiri Pirko1f3ed382018-07-27 09:45:05 +020047void tcf_chain_put_by_act(struct tcf_chain *chain);
Vlad Buslovbbf73832019-02-11 10:55:36 +020048struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 struct tcf_chain *chain);
Vlad Buslovfe2923a2019-02-11 10:55:40 +020050struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
Vlad Buslov12db03b2019-02-11 10:55:45 +020051 struct tcf_proto *tp, bool rtnl_held);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +010052void tcf_block_netif_keep_dst(struct tcf_block *block);
Jiri Pirko6529eab2017-05-17 11:07:55 +020053int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050054 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 struct netlink_ext_ack *extack);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010056int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050057 struct tcf_block_ext_info *ei,
58 struct netlink_ext_ack *extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +020059void tcf_block_put(struct tcf_block *block);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010060void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020061 struct tcf_block_ext_info *ei);
Jiri Pirko44186462017-10-13 14:00:59 +020062
Jiri Pirko48617382018-01-17 11:46:46 +010063static inline bool tcf_block_shared(struct tcf_block *block)
64{
65 return block->index;
66}
67
Jiri Pirko44186462017-10-13 14:00:59 +020068static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
69{
Jiri Pirko48617382018-01-17 11:46:46 +010070 WARN_ON(tcf_block_shared(block));
Jiri Pirko44186462017-10-13 14:00:59 +020071 return block->q;
72}
73
Jiri Pirkoacb67442017-10-19 15:50:31 +020074void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
75struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
76 tc_setup_cb_t *cb, void *cb_ident);
77void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
78unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
79struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
80 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -070081 void *cb_priv,
82 struct netlink_ext_ack *extack);
Jiri Pirkoacb67442017-10-19 15:50:31 +020083int tcf_block_cb_register(struct tcf_block *block,
84 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -070085 void *cb_priv, struct netlink_ext_ack *extack);
John Hurley32636742018-06-25 14:30:10 -070086void __tcf_block_cb_unregister(struct tcf_block *block,
87 struct tcf_block_cb *block_cb);
Jiri Pirkoacb67442017-10-19 15:50:31 +020088void tcf_block_cb_unregister(struct tcf_block *block,
89 tc_setup_cb_t *cb, void *cb_ident);
John Hurley7f76fa32018-11-09 21:21:26 -080090int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
91 tc_indr_block_bind_cb_t *cb, void *cb_ident);
92int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
93 tc_indr_block_bind_cb_t *cb, void *cb_ident);
94void __tc_indr_block_cb_unregister(struct net_device *dev,
95 tc_indr_block_bind_cb_t *cb, void *cb_ident);
96void tc_indr_block_cb_unregister(struct net_device *dev,
97 tc_indr_block_bind_cb_t *cb, void *cb_ident);
Jiri Pirkoacb67442017-10-19 15:50:31 +020098
Jiri Pirko87d83092017-05-17 11:07:54 +020099int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
100 struct tcf_result *res, bool compat_mode);
101
Jiri Pirko8ae70032017-02-15 11:57:50 +0100102#else
Jiri Pirko6529eab2017-05-17 11:07:55 +0200103static inline
104int tcf_block_get(struct tcf_block **p_block,
Sudip Mukherjee3c149092017-12-22 15:52:05 +0000105 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
106 struct netlink_ext_ack *extack)
Jiri Pirko6529eab2017-05-17 11:07:55 +0200107{
108 return 0;
109}
110
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200111static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100112int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Quentin Monnet33c30a82018-01-03 17:30:45 -0800113 struct tcf_block_ext_info *ei,
114 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200115{
116 return 0;
117}
118
Jiri Pirko6529eab2017-05-17 11:07:55 +0200119static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +0100120{
121}
Jiri Pirko87d83092017-05-17 11:07:54 +0200122
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200123static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100124void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200125 struct tcf_block_ext_info *ei)
126{
127}
128
Jiri Pirko44186462017-10-13 14:00:59 +0200129static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
130{
131 return NULL;
132}
133
Jiri Pirkoacb67442017-10-19 15:50:31 +0200134static inline
135int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
136 void *cb_priv)
137{
138 return 0;
139}
140
141static inline
142void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
143 void *cb_priv)
144{
145}
146
147static inline
148void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
149{
150 return NULL;
151}
152
153static inline
154struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
155 tc_setup_cb_t *cb, void *cb_ident)
156{
157 return NULL;
158}
159
160static inline
161void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
162{
163}
164
165static inline
166unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
167{
168 return 0;
169}
170
171static inline
172struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
173 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -0700174 void *cb_priv,
175 struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200176{
177 return NULL;
178}
179
180static inline
181int tcf_block_cb_register(struct tcf_block *block,
182 tc_setup_cb_t *cb, void *cb_ident,
John Hurley60513bd2018-06-25 14:30:04 -0700183 void *cb_priv, struct netlink_ext_ack *extack)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200184{
185 return 0;
186}
187
188static inline
John Hurley32636742018-06-25 14:30:10 -0700189void __tcf_block_cb_unregister(struct tcf_block *block,
190 struct tcf_block_cb *block_cb)
Jiri Pirkoacb67442017-10-19 15:50:31 +0200191{
192}
193
194static inline
195void tcf_block_cb_unregister(struct tcf_block *block,
196 tc_setup_cb_t *cb, void *cb_ident)
197{
198}
199
John Hurley7f76fa32018-11-09 21:21:26 -0800200static inline
201int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
202 tc_indr_block_bind_cb_t *cb, void *cb_ident)
203{
204 return 0;
205}
206
207static inline
208int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
209 tc_indr_block_bind_cb_t *cb, void *cb_ident)
210{
211 return 0;
212}
213
214static inline
215void __tc_indr_block_cb_unregister(struct net_device *dev,
216 tc_indr_block_bind_cb_t *cb, void *cb_ident)
217{
218}
219
220static inline
221void tc_indr_block_cb_unregister(struct net_device *dev,
222 tc_indr_block_bind_cb_t *cb, void *cb_ident)
223{
224}
225
Jiri Pirko87d83092017-05-17 11:07:54 +0200226static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
227 struct tcf_result *res, bool compat_mode)
228{
229 return TC_ACT_UNSPEC;
230}
Jiri Pirko8ae70032017-02-15 11:57:50 +0100231#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233static inline unsigned long
234__cls_set_class(unsigned long *clp, unsigned long cl)
235{
WANG Conga0efb802014-09-30 16:07:24 -0700236 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237}
238
239static inline unsigned long
Jiri Pirko34e37592017-10-13 14:01:00 +0200240cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 unsigned long old_cl;
Jiri Pirko34e37592017-10-13 14:01:00 +0200243
244 sch_tree_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 old_cl = __cls_set_class(clp, cl);
Jiri Pirko34e37592017-10-13 14:01:00 +0200246 sch_tree_unlock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 return old_cl;
248}
249
250static inline void
251tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
252{
Jiri Pirko34e37592017-10-13 14:01:00 +0200253 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 unsigned long cl;
255
Jiri Pirko34e37592017-10-13 14:01:00 +0200256 /* Check q as it is not set for shared blocks. In that case,
257 * setting class is not supported.
258 */
259 if (!q)
260 return;
261 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
262 cl = cls_set_class(q, &r->class, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (cl)
Jiri Pirko34e37592017-10-13 14:01:00 +0200264 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static inline void
268tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
269{
Jiri Pirko34e37592017-10-13 14:01:00 +0200270 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 unsigned long cl;
272
Jiri Pirko34e37592017-10-13 14:01:00 +0200273 if (!q)
274 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if ((cl = __cls_set_class(&r->class, 0)) != 0)
Jiri Pirko34e37592017-10-13 14:01:00 +0200276 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
278
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000279struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800281 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -0700282 int nr_actions;
283 struct tc_action **actions;
Cong Wange4b95c42017-11-06 13:47:19 -0800284 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800286 /* Map to export classifier specific extension TLV types to the
287 * generic extensions API. Unsupported extensions must be set to 0.
288 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 int action;
290 int police;
291};
292
Cong Wang14215102019-02-20 21:37:42 -0800293static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
294 int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800295{
296#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800297 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700298 exts->nr_actions = 0;
Cong Wang14215102019-02-20 21:37:42 -0800299 exts->net = net;
WANG Cong22dc13c2016-08-13 22:35:00 -0700300 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
301 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700302 if (!exts->actions)
303 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800304#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800305 exts->action = action;
306 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700307 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800308}
309
Cong Wange4b95c42017-11-06 13:47:19 -0800310/* Return false if the netns is being destroyed in cleanup_net(). Callers
311 * need to do cleanup synchronously in this case, otherwise may race with
312 * tc_action_net_exit(). Return true for other cases.
313 */
314static inline bool tcf_exts_get_net(struct tcf_exts *exts)
315{
316#ifdef CONFIG_NET_CLS_ACT
317 exts->net = maybe_get_net(exts->net);
318 return exts->net != NULL;
319#else
320 return true;
321#endif
322}
323
324static inline void tcf_exts_put_net(struct tcf_exts *exts)
325{
326#ifdef CONFIG_NET_CLS_ACT
327 if (exts->net)
328 put_net(exts->net);
329#endif
330}
331
WANG Cong22dc13c2016-08-13 22:35:00 -0700332#ifdef CONFIG_NET_CLS_ACT
Cong Wang244cd962018-08-19 12:22:09 -0700333#define tcf_exts_for_each_action(i, a, exts) \
334 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
335#else
336#define tcf_exts_for_each_action(i, a, exts) \
Arnd Bergmann191672c2018-08-22 17:25:44 +0200337 for (; 0; (void)(i), (void)(a), (void)(exts))
WANG Cong22dc13c2016-08-13 22:35:00 -0700338#endif
WANG Cong22dc13c2016-08-13 22:35:00 -0700339
Jakub Kicinskid897a632017-05-31 08:06:43 -0700340static inline void
341tcf_exts_stats_update(const struct tcf_exts *exts,
342 u64 bytes, u64 packets, u64 lastuse)
343{
344#ifdef CONFIG_NET_CLS_ACT
345 int i;
346
347 preempt_disable();
348
349 for (i = 0; i < exts->nr_actions; i++) {
350 struct tc_action *a = exts->actions[i];
351
Eelco Chaudron28169ab2018-09-21 07:14:02 -0400352 tcf_action_stats_update(a, bytes, packets, lastuse, true);
Jakub Kicinskid897a632017-05-31 08:06:43 -0700353 }
354
355 preempt_enable();
356#endif
357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200360 * tcf_exts_has_actions - check if at least one action is present
361 * @exts: tc filter extensions handle
362 *
363 * Returns true if at least one action is present.
364 */
365static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
366{
WANG Cong2734437e2016-08-13 22:34:59 -0700367#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200368 return exts->nr_actions;
369#else
370 return false;
371#endif
372}
WANG Cong2734437e2016-08-13 22:34:59 -0700373
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200374/**
375 * tcf_exts_has_one_action - check if exactly one action is present
376 * @exts: tc filter extensions handle
377 *
378 * Returns true if exactly one action is present.
379 */
380static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
381{
382#ifdef CONFIG_NET_CLS_ACT
383 return exts->nr_actions == 1;
384#else
385 return false;
386#endif
387}
WANG Cong2734437e2016-08-13 22:34:59 -0700388
Cong Wang244cd962018-08-19 12:22:09 -0700389static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
390{
391#ifdef CONFIG_NET_CLS_ACT
392 return exts->actions[0];
393#else
394 return NULL;
395#endif
396}
397
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200398/**
399 * tcf_exts_exec - execute tc filter extensions
400 * @skb: socket buffer
401 * @exts: tc filter extensions handle
402 * @res: desired result
403 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200404 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200405 * a negative number if the filter must be considered unmatched or
406 * a positive action code (TC_ACT_*) which must be returned to the
407 * underlying layer.
408 */
409static inline int
410tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
411 struct tcf_result *res)
412{
413#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200414 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200415#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200416 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200417}
418
Joe Perches5c152572013-07-30 22:47:13 -0700419int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
420 struct nlattr **tb, struct nlattr *rate_tlv,
Vlad Buslovec6743a2019-02-11 10:55:43 +0200421 struct tcf_exts *exts, bool ovr, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -0500422 struct netlink_ext_ack *extack);
WANG Cong18d02642014-09-25 10:26:37 -0700423void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200424void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800425int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
426int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428/**
429 * struct tcf_pkt_info - packet information
430 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000431struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 unsigned char * ptr;
433 int nexthdr;
434};
435
436#ifdef CONFIG_NET_EMATCH
437
438struct tcf_ematch_ops;
439
440/**
441 * struct tcf_ematch - extended match (ematch)
442 *
443 * @matchid: identifier to allow userspace to reidentify a match
444 * @flags: flags specifying attributes and the relation to other matches
445 * @ops: the operations lookup table of the corresponding ematch module
446 * @datalen: length of the ematch specific configuration data
447 * @data: ematch specific data
448 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000449struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 struct tcf_ematch_ops * ops;
451 unsigned long data;
452 unsigned int datalen;
453 u16 matchid;
454 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700455 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456};
457
458static inline int tcf_em_is_container(struct tcf_ematch *em)
459{
460 return !em->ops;
461}
462
463static inline int tcf_em_is_simple(struct tcf_ematch *em)
464{
465 return em->flags & TCF_EM_SIMPLE;
466}
467
468static inline int tcf_em_is_inverted(struct tcf_ematch *em)
469{
470 return em->flags & TCF_EM_INVERT;
471}
472
473static inline int tcf_em_last_match(struct tcf_ematch *em)
474{
475 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
476}
477
478static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
479{
480 if (tcf_em_last_match(em))
481 return 1;
482
483 if (result == 0 && em->flags & TCF_EM_REL_AND)
484 return 1;
485
486 if (result != 0 && em->flags & TCF_EM_REL_OR)
487 return 1;
488
489 return 0;
490}
491
492/**
493 * struct tcf_ematch_tree - ematch tree handle
494 *
495 * @hdr: ematch tree header supplied by userspace
496 * @matches: array of ematches
497 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000498struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 struct tcf_ematch_tree_hdr hdr;
500 struct tcf_ematch * matches;
501
502};
503
504/**
505 * struct tcf_ematch_ops - ematch module operations
506 *
507 * @kind: identifier (kind) of this ematch module
508 * @datalen: length of expected configuration data (optional)
509 * @change: called during validation (optional)
510 * @match: called during ematch tree evaluation, must return 1/0
511 * @destroy: called during destroyage (optional)
512 * @dump: called during dumping process (optional)
513 * @owner: owner, must be set to THIS_MODULE
514 * @link: link to previous/next ematch module (internal use)
515 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000516struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 int kind;
518 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700519 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 int, struct tcf_ematch *);
521 int (*match)(struct sk_buff *, struct tcf_ematch *,
522 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700523 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 int (*dump)(struct sk_buff *, struct tcf_ematch *);
525 struct module *owner;
526 struct list_head link;
527};
528
Joe Perches5c152572013-07-30 22:47:13 -0700529int tcf_em_register(struct tcf_ematch_ops *);
530void tcf_em_unregister(struct tcf_ematch_ops *);
531int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
532 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700533void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700534int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
535int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
536 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 * tcf_em_tree_match - evaulate an ematch tree
540 *
541 * @skb: socket buffer of the packet in question
542 * @tree: ematch tree to be used for evaluation
543 * @info: packet information examined by classifier
544 *
545 * This function matches @skb against the ematch tree in @tree by going
546 * through all ematches respecting their logic relations returning
547 * as soon as the result is obvious.
548 *
549 * Returns 1 if the ematch tree as-one matches, no ematches are configured
550 * or ematch is not enabled in the kernel, otherwise 0 is returned.
551 */
552static inline int tcf_em_tree_match(struct sk_buff *skb,
553 struct tcf_ematch_tree *tree,
554 struct tcf_pkt_info *info)
555{
556 if (tree->hdr.nmatches)
557 return __tcf_em_tree_match(skb, tree, info);
558 else
559 return 1;
560}
561
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700562#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564#else /* CONFIG_NET_EMATCH */
565
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000566struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567};
568
569#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700570#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
573
574#endif /* CONFIG_NET_EMATCH */
575
576static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
577{
578 switch (layer) {
579 case TCF_LAYER_LINK:
Wolfgang Bumillerd3303a62018-01-18 11:32:36 +0100580 return skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700582 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700584 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
586
587 return NULL;
588}
589
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700590static inline int tcf_valid_offset(const struct sk_buff *skb,
591 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
David S. Millerda521b22010-12-21 12:43:16 -0800593 return likely((ptr + len) <= skb_tail_pointer(skb) &&
594 ptr >= skb->head &&
595 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800599#include <net/net_namespace.h>
600
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601static inline int
Alexander Aring1057c552018-01-18 11:20:54 -0500602tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
603 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
WANG Cong2519a602014-01-09 16:14:02 -0800605 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700606 struct net_device *dev;
607
Alexander Aring1057c552018-01-18 11:20:54 -0500608 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
609 NL_SET_ERR_MSG(extack, "Interface name too long");
WANG Cong2519a602014-01-09 16:14:02 -0800610 return -EINVAL;
Alexander Aring1057c552018-01-18 11:20:54 -0500611 }
WANG Cong2519a602014-01-09 16:14:02 -0800612 dev = __dev_get_by_name(net, indev);
613 if (!dev)
614 return -ENODEV;
615 return dev->ifindex;
616}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
WANG Cong2519a602014-01-09 16:14:02 -0800618static inline bool
619tcf_match_indev(struct sk_buff *skb, int ifindex)
620{
621 if (!ifindex)
622 return true;
623 if (!skb->skb_iif)
624 return false;
625 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627#endif /* CONFIG_NET_CLS_IND */
628
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100629int tc_setup_flow_action(struct flow_action *flow_action,
630 const struct tcf_exts *exts);
Cong Wangaeb3fec2018-12-11 11:15:46 -0800631int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
632 void *type_data, bool err_stop);
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100633unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
Jiri Pirko717503b2017-10-11 09:41:09 +0200634
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200635enum tc_block_command {
636 TC_BLOCK_BIND,
637 TC_BLOCK_UNBIND,
638};
639
640struct tc_block_offload {
641 enum tc_block_command command;
642 enum tcf_block_binder_type binder_type;
643 struct tcf_block *block;
John Hurley60513bd2018-06-25 14:30:04 -0700644 struct netlink_ext_ack *extack;
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200645};
646
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200647struct tc_cls_common_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200648 u32 chain_index;
649 __be16 protocol;
Jiri Pirkod7c1c8d2017-08-07 10:15:30 +0200650 u32 prio;
Quentin Monnet8f0b4252018-01-19 17:44:47 -0800651 struct netlink_ext_ack *extack;
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200652};
653
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800654struct tc_cls_u32_knode {
655 struct tcf_exts *exts;
Jakub Kicinski068ceb32018-11-19 15:21:46 -0800656 struct tcf_result *res;
John Fastabende0148602016-02-17 14:59:30 -0800657 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800658 u32 handle;
659 u32 val;
660 u32 mask;
661 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800662 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800663};
664
665struct tc_cls_u32_hnode {
666 u32 handle;
667 u32 prio;
668 unsigned int divisor;
669};
670
671enum tc_clsu32_command {
672 TC_CLSU32_NEW_KNODE,
673 TC_CLSU32_REPLACE_KNODE,
674 TC_CLSU32_DELETE_KNODE,
675 TC_CLSU32_NEW_HNODE,
676 TC_CLSU32_REPLACE_HNODE,
677 TC_CLSU32_DELETE_HNODE,
678};
679
680struct tc_cls_u32_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200681 struct tc_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800682 /* knode values */
683 enum tc_clsu32_command command;
684 union {
685 struct tc_cls_u32_knode knode;
686 struct tc_cls_u32_hnode hnode;
687 };
688};
689
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200690static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800691{
Jiri Pirko70b5aee2017-11-01 11:47:41 +0100692 return dev->features & NETIF_F_HW_TC;
John Fastabend6843e7a2016-02-26 07:53:49 -0800693}
694
Quentin Monnetf9eda142018-01-19 17:44:48 -0800695static inline bool tc_can_offload_extack(const struct net_device *dev,
696 struct netlink_ext_ack *extack)
697{
698 bool can = tc_can_offload(dev);
699
700 if (!can)
701 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
702
703 return can;
704}
705
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800706static inline bool
707tc_cls_can_offload_and_chain0(const struct net_device *dev,
708 struct tc_cls_common_offload *common)
709{
710 if (!tc_can_offload_extack(dev, common->extack))
711 return false;
712 if (common->chain_index) {
713 NL_SET_ERR_MSG(common->extack,
714 "Driver supports only offload of chain 0");
715 return false;
716 }
717 return true;
718}
719
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200720static inline bool tc_skip_hw(u32 flags)
721{
722 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
723}
724
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700725static inline bool tc_skip_sw(u32 flags)
726{
727 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
728}
729
730/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
731static inline bool tc_flags_valid(u32 flags)
732{
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300733 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
734 TCA_CLS_FLAGS_VERBOSE))
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700735 return false;
736
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300737 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700738 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
739 return false;
740
741 return true;
742}
743
Or Gerlitze6960282017-02-16 10:31:12 +0200744static inline bool tc_in_hw(u32 flags)
745{
746 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
747}
748
Jakub Kicinski34832e12018-01-24 12:54:14 -0800749static inline void
750tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
751 const struct tcf_proto *tp, u32 flags,
752 struct netlink_ext_ack *extack)
753{
754 cls_common->chain_index = tp->chain->index;
755 cls_common->protocol = tp->protocol;
756 cls_common->prio = tp->prio;
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300757 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
Jakub Kicinski34832e12018-01-24 12:54:14 -0800758 cls_common->extack = extack;
759}
760
Amir Vadai5b33f482016-03-08 12:42:29 +0200761enum tc_fl_command {
762 TC_CLSFLOWER_REPLACE,
763 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000764 TC_CLSFLOWER_STATS,
Jiri Pirko34738452018-07-23 09:23:11 +0200765 TC_CLSFLOWER_TMPLT_CREATE,
766 TC_CLSFLOWER_TMPLT_DESTROY,
Amir Vadai5b33f482016-03-08 12:42:29 +0200767};
768
769struct tc_cls_flower_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200770 struct tc_cls_common_offload common;
Amir Vadai5b33f482016-03-08 12:42:29 +0200771 enum tc_fl_command command;
Amir Vadai8208d212016-03-11 11:08:45 +0200772 unsigned long cookie;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100773 struct flow_rule *rule;
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100774 struct flow_stats stats;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700775 u32 classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200776};
777
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100778static inline struct flow_rule *
779tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
780{
781 return tc_flow_cmd->rule;
782}
783
Yotam Gigib87f7932016-07-21 12:03:12 +0200784enum tc_matchall_command {
785 TC_CLSMATCHALL_REPLACE,
786 TC_CLSMATCHALL_DESTROY,
787};
788
789struct tc_cls_matchall_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200790 struct tc_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200791 enum tc_matchall_command command;
792 struct tcf_exts *exts;
793 unsigned long cookie;
794};
795
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100796enum tc_clsbpf_command {
Jakub Kicinski102740b2017-12-19 13:32:13 -0800797 TC_CLSBPF_OFFLOAD,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100798 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100799};
800
801struct tc_cls_bpf_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200802 struct tc_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100803 enum tc_clsbpf_command command;
804 struct tcf_exts *exts;
805 struct bpf_prog *prog;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800806 struct bpf_prog *oldprog;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100807 const char *name;
808 bool exts_integrated;
809};
810
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700811struct tc_mqprio_qopt_offload {
812 /* struct tc_mqprio_qopt must always be the first element */
813 struct tc_mqprio_qopt qopt;
814 u16 mode;
815 u16 shaper;
816 u32 flags;
817 u64 min_rate[TC_QOPT_MAX_QUEUE];
818 u64 max_rate[TC_QOPT_MAX_QUEUE];
819};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500820
821/* This structure holds cookie structure that is passed from user
822 * to the kernel for actions and classifiers
823 */
824struct tc_cookie {
825 u8 *data;
826 u32 len;
Vlad Busloveec94fd2018-07-05 17:24:23 +0300827 struct rcu_head rcu;
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500828};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100829
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100830struct tc_qopt_offload_stats {
831 struct gnet_stats_basic_packed *bstats;
832 struct gnet_stats_queue *qstats;
833};
834
Jakub Kicinskif971b132018-05-25 21:53:35 -0700835enum tc_mq_command {
836 TC_MQ_CREATE,
837 TC_MQ_DESTROY,
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700838 TC_MQ_STATS,
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800839 TC_MQ_GRAFT,
840};
841
842struct tc_mq_opt_offload_graft_params {
843 unsigned long queue;
844 u32 child_handle;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700845};
846
847struct tc_mq_qopt_offload {
848 enum tc_mq_command command;
849 u32 handle;
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800850 union {
851 struct tc_qopt_offload_stats stats;
852 struct tc_mq_opt_offload_graft_params graft_params;
853 };
Jakub Kicinskif971b132018-05-25 21:53:35 -0700854};
855
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100856enum tc_red_command {
857 TC_RED_REPLACE,
858 TC_RED_DESTROY,
859 TC_RED_STATS,
860 TC_RED_XSTATS,
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800861 TC_RED_GRAFT,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100862};
863
864struct tc_red_qopt_offload_params {
865 u32 min;
866 u32 max;
867 u32 probability;
Jakub Kicinskic0b74902018-11-12 14:58:16 -0800868 u32 limit;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100869 bool is_ecn;
Jakub Kicinski190852a2018-11-08 19:50:38 -0800870 bool is_harddrop;
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800871 struct gnet_stats_queue *qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100872};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100873
874struct tc_red_qopt_offload {
875 enum tc_red_command command;
876 u32 handle;
877 u32 parent;
878 union {
879 struct tc_red_qopt_offload_params set;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100880 struct tc_qopt_offload_stats stats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100881 struct red_stats *xstats;
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800882 u32 child_handle;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100883 };
884};
885
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800886enum tc_gred_command {
887 TC_GRED_REPLACE,
888 TC_GRED_DESTROY,
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800889 TC_GRED_STATS,
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800890};
891
892struct tc_gred_vq_qopt_offload_params {
893 bool present;
894 u32 limit;
895 u32 prio;
896 u32 min;
897 u32 max;
898 bool is_ecn;
899 bool is_harddrop;
900 u32 probability;
901 /* Only need backlog, see struct tc_prio_qopt_offload_params */
902 u32 *backlog;
903};
904
905struct tc_gred_qopt_offload_params {
906 bool grio_on;
907 bool wred_on;
908 unsigned int dp_cnt;
909 unsigned int dp_def;
910 struct gnet_stats_queue *qstats;
911 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
912};
913
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800914struct tc_gred_qopt_offload_stats {
915 struct gnet_stats_basic_packed bstats[MAX_DPs];
916 struct gnet_stats_queue qstats[MAX_DPs];
917 struct red_stats *xstats[MAX_DPs];
918};
919
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800920struct tc_gred_qopt_offload {
921 enum tc_gred_command command;
922 u32 handle;
923 u32 parent;
924 union {
925 struct tc_gred_qopt_offload_params set;
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800926 struct tc_gred_qopt_offload_stats stats;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800927 };
928};
929
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100930enum tc_prio_command {
931 TC_PRIO_REPLACE,
932 TC_PRIO_DESTROY,
933 TC_PRIO_STATS,
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100934 TC_PRIO_GRAFT,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100935};
936
937struct tc_prio_qopt_offload_params {
938 int bands;
939 u8 priomap[TC_PRIO_MAX + 1];
940 /* In case that a prio qdisc is offloaded and now is changed to a
941 * non-offloadedable config, it needs to update the backlog & qlen
942 * values to negate the HW backlog & qlen values (and only them).
943 */
944 struct gnet_stats_queue *qstats;
945};
946
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100947struct tc_prio_qopt_offload_graft_params {
948 u8 band;
949 u32 child_handle;
950};
951
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100952struct tc_prio_qopt_offload {
953 enum tc_prio_command command;
954 u32 handle;
955 u32 parent;
956 union {
957 struct tc_prio_qopt_offload_params replace_params;
958 struct tc_qopt_offload_stats stats;
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100959 struct tc_prio_qopt_offload_graft_params graft_params;
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100960 };
961};
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100962
Jakub Kicinski98b0e5f2018-11-12 14:58:10 -0800963enum tc_root_command {
964 TC_ROOT_GRAFT,
965};
966
967struct tc_root_qopt_offload {
968 enum tc_root_command command;
969 u32 handle;
970 bool ingress;
971};
972
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973#endif