blob: 61cf7dbb678298559ccf9ea432adef04507bc597 [file] [log] [blame]
Björn Töpeldac09142018-05-18 14:00:21 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
Björn Töpelc0c77d82018-05-02 13:01:23 +02003 * Copyright(c) 2018 Intel Corporation.
Björn Töpelc0c77d82018-05-02 13:01:23 +02004 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
Björn Töpele61e62b92018-06-04 14:05:51 +02009#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020011#include <linux/mutex.h>
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020012#include <linux/spinlock.h>
Björn Töpele61e62b92018-06-04 14:05:51 +020013#include <linux/mm.h>
Björn Töpelc0c77d82018-05-02 13:01:23 +020014#include <net/sock.h>
15
Björn Töpelb9b6b682018-05-02 13:01:25 +020016struct net_device;
17struct xsk_queue;
Björn Töpele61e62b92018-06-04 14:05:51 +020018
Björn Töpel8aef7342018-06-04 14:05:52 +020019struct xdp_umem_page {
20 void *addr;
Björn Töpel173d3ad2018-06-04 14:05:55 +020021 dma_addr_t dma;
Björn Töpel8aef7342018-06-04 14:05:52 +020022};
23
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020024struct xdp_umem_fq_reuse {
25 u32 nentries;
26 u32 length;
27 u64 handles[];
28};
29
Björn Töpele61e62b92018-06-04 14:05:51 +020030struct xdp_umem {
31 struct xsk_queue *fq;
32 struct xsk_queue *cq;
Björn Töpel8aef7342018-06-04 14:05:52 +020033 struct xdp_umem_page *pages;
Magnus Karlsson93ee30f2018-08-31 13:40:02 +020034 u64 chunk_mask;
35 u64 size;
Björn Töpele61e62b92018-06-04 14:05:51 +020036 u32 headroom;
37 u32 chunk_size_nohr;
38 struct user_struct *user;
39 struct pid *pid;
40 unsigned long address;
41 refcount_t users;
42 struct work_struct work;
Björn Töpel8aef7342018-06-04 14:05:52 +020043 struct page **pgs;
Björn Töpele61e62b92018-06-04 14:05:51 +020044 u32 npgs;
Björn Töpel50e74c02019-01-24 19:59:38 +010045 int id;
Björn Töpel173d3ad2018-06-04 14:05:55 +020046 struct net_device *dev;
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020047 struct xdp_umem_fq_reuse *fq_reuse;
Björn Töpel173d3ad2018-06-04 14:05:55 +020048 u16 queue_id;
49 bool zc;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020050 spinlock_t xsk_list_lock;
51 struct list_head xsk_list;
Björn Töpele61e62b92018-06-04 14:05:51 +020052};
Björn Töpelc0c77d82018-05-02 13:01:23 +020053
54struct xdp_sock {
55 /* struct sock must be the first member of struct xdp_sock */
56 struct sock sk;
Björn Töpelb9b6b682018-05-02 13:01:25 +020057 struct xsk_queue *rx;
58 struct net_device *dev;
Björn Töpelc0c77d82018-05-02 13:01:23 +020059 struct xdp_umem *umem;
Björn Töpelfbfc504a2018-05-02 13:01:28 +020060 struct list_head flush_node;
Magnus Karlsson965a9902018-05-02 13:01:26 +020061 u16 queue_id;
Magnus Karlssonf6145902018-05-02 13:01:32 +020062 struct xsk_queue *tx ____cacheline_aligned_in_smp;
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020063 struct list_head list;
64 bool zc;
Björn Töpelc0c77d82018-05-02 13:01:23 +020065 /* Protects multiple processes in the control path */
66 struct mutex mutex;
Magnus Karlssona9744f72018-06-29 09:48:20 +020067 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
68 * in the SKB destructor callback.
69 */
70 spinlock_t tx_completion_lock;
Björn Töpelc4971762018-05-02 13:01:27 +020071 u64 rx_dropped;
Björn Töpelc0c77d82018-05-02 13:01:23 +020072};
73
Björn Töpelc4971762018-05-02 13:01:27 +020074struct xdp_buff;
75#ifdef CONFIG_XDP_SOCKETS
76int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
77int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
78void xsk_flush(struct xdp_sock *xs);
Björn Töpelfbfc504a2018-05-02 13:01:28 +020079bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020080/* Used from netdev driver */
Björn Töpel173d3ad2018-06-04 14:05:55 +020081u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
82void xsk_umem_discard_addr(struct xdp_umem *umem);
Magnus Karlssonac98d8a2018-06-04 14:05:57 +020083void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
84bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
85void xsk_umem_consume_tx_done(struct xdp_umem *umem);
Jakub Kicinskif5bd9132018-09-07 10:18:46 +020086struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
87struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
88 struct xdp_umem_fq_reuse *newq);
89void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
Jakub Kicinski1661d342018-10-01 14:51:36 +020090struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
Björn Töpel90254032018-08-28 14:44:27 +020091
92static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
93{
94 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
95}
96
97static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
98{
99 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
100}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200101
102/* Reuse-queue aware version of FILL queue helpers */
103static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
104{
105 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
106
107 if (!rq->length)
108 return xsk_umem_peek_addr(umem, addr);
109
110 *addr = rq->handles[rq->length - 1];
111 return addr;
112}
113
114static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
115{
116 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
117
118 if (!rq->length)
119 xsk_umem_discard_addr(umem);
120 else
121 rq->length--;
122}
123
124static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
125{
126 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
127
128 rq->handles[rq->length++] = addr;
129}
Björn Töpelc4971762018-05-02 13:01:27 +0200130#else
131static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
132{
133 return -ENOTSUPP;
134}
135
136static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
137{
138 return -ENOTSUPP;
139}
140
141static inline void xsk_flush(struct xdp_sock *xs)
142{
143}
Björn Töpelfbfc504a2018-05-02 13:01:28 +0200144
145static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
146{
147 return false;
148}
Björn Töpel90254032018-08-28 14:44:27 +0200149
150static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
151{
152 return NULL;
153}
154
155static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
156{
157}
158
159static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
160{
161}
162
163static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
164 u32 *len)
165{
166 return false;
167}
168
169static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
170{
171}
172
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200173static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
174{
175 return NULL;
176}
177
178static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
179 struct xdp_umem *umem,
180 struct xdp_umem_fq_reuse *newq)
181{
182 return NULL;
183}
184static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
185{
186}
187
Jakub Kicinski1661d342018-10-01 14:51:36 +0200188static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
189 u16 queue_id)
190{
191 return NULL;
192}
193
Björn Töpel90254032018-08-28 14:44:27 +0200194static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
195{
196 return NULL;
197}
198
199static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
200{
201 return 0;
202}
Jakub Kicinskif5bd9132018-09-07 10:18:46 +0200203
204static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
205{
206 return NULL;
207}
208
209static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
210{
211}
212
213static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
214{
215}
216
Björn Töpelc4971762018-05-02 13:01:27 +0200217#endif /* CONFIG_XDP_SOCKETS */
218
Björn Töpelc0c77d82018-05-02 13:01:23 +0200219#endif /* _LINUX_XDP_SOCK_H */