Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /* Interface for implementing AF_XDP zero-copy support in drivers.
3 : * Copyright(c) 2020 Intel Corporation.
4 : */
5 :
6 : #ifndef _LINUX_XDP_SOCK_DRV_H
7 : #define _LINUX_XDP_SOCK_DRV_H
8 :
9 : #include <net/xdp_sock.h>
10 : #include <net/xsk_buff_pool.h>
11 :
12 : #ifdef CONFIG_XDP_SOCKETS
13 :
14 : void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15 : bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16 : u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
17 : void xsk_tx_release(struct xsk_buff_pool *pool);
18 : struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
19 : u16 queue_id);
20 : void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21 : void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22 : void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23 : void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24 : bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
25 :
26 : static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
27 : {
28 : return XDP_PACKET_HEADROOM + pool->headroom;
29 : }
30 :
31 : static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
32 : {
33 : return pool->chunk_size;
34 : }
35 :
36 : static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
37 : {
38 : return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
39 : }
40 :
41 : static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
42 : struct xdp_rxq_info *rxq)
43 : {
44 : xp_set_rxq_info(pool, rxq);
45 : }
46 :
47 : static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
48 : unsigned long attrs)
49 : {
50 : xp_dma_unmap(pool, attrs);
51 : }
52 :
53 : static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
54 : struct device *dev, unsigned long attrs)
55 : {
56 : struct xdp_umem *umem = pool->umem;
57 :
58 : return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
59 : }
60 :
61 : static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
62 : {
63 : struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
64 :
65 : return xp_get_dma(xskb);
66 : }
67 :
68 : static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
69 : {
70 : struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
71 :
72 : return xp_get_frame_dma(xskb);
73 : }
74 :
75 : static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
76 : {
77 : return xp_alloc(pool);
78 : }
79 :
80 : static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
81 : {
82 : return xp_can_alloc(pool, count);
83 : }
84 :
85 : static inline void xsk_buff_free(struct xdp_buff *xdp)
86 : {
87 : struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88 :
89 : xp_free(xskb);
90 : }
91 :
92 : static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
93 : u64 addr)
94 : {
95 : return xp_raw_get_dma(pool, addr);
96 : }
97 :
98 : static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
99 : {
100 : return xp_raw_get_data(pool, addr);
101 : }
102 :
103 : static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
104 : {
105 : struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
106 :
107 : if (!pool->dma_need_sync)
108 : return;
109 :
110 : xp_dma_sync_for_cpu(xskb);
111 : }
112 :
113 : static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
114 : dma_addr_t dma,
115 : size_t size)
116 : {
117 : xp_dma_sync_for_device(pool, dma, size);
118 : }
119 :
120 : #else
121 :
122 : static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
123 : {
124 : }
125 :
126 : static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
127 : struct xdp_desc *desc)
128 : {
129 : return false;
130 : }
131 :
132 : static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
133 : u32 max)
134 : {
135 : return 0;
136 : }
137 :
138 : static inline void xsk_tx_release(struct xsk_buff_pool *pool)
139 : {
140 : }
141 :
142 : static inline struct xsk_buff_pool *
143 : xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
144 : {
145 : return NULL;
146 : }
147 :
148 : static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
149 : {
150 : }
151 :
152 : static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
153 : {
154 : }
155 :
156 : static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
157 : {
158 : }
159 :
160 : static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
161 : {
162 : }
163 :
164 : static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
165 : {
166 : return false;
167 : }
168 :
169 : static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
170 : {
171 : return 0;
172 : }
173 :
174 : static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
175 : {
176 : return 0;
177 : }
178 :
179 : static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
180 : {
181 : return 0;
182 : }
183 :
184 : static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
185 : struct xdp_rxq_info *rxq)
186 : {
187 : }
188 :
189 : static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
190 : unsigned long attrs)
191 : {
192 : }
193 :
194 : static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
195 : struct device *dev, unsigned long attrs)
196 : {
197 : return 0;
198 : }
199 :
200 : static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
201 : {
202 : return 0;
203 : }
204 :
205 : static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
206 : {
207 : return 0;
208 : }
209 :
210 : static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
211 : {
212 : return NULL;
213 : }
214 :
215 : static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
216 : {
217 : return false;
218 : }
219 :
220 0 : static inline void xsk_buff_free(struct xdp_buff *xdp)
221 : {
222 0 : }
223 :
224 : static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
225 : u64 addr)
226 : {
227 : return 0;
228 : }
229 :
230 : static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
231 : {
232 : return NULL;
233 : }
234 :
235 : static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
236 : {
237 : }
238 :
239 : static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
240 : dma_addr_t dma,
241 : size_t size)
242 : {
243 : }
244 :
245 : #endif /* CONFIG_XDP_SOCKETS */
246 :
247 : #endif /* _LINUX_XDP_SOCK_DRV_H */
|