1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_malloc.h>
15 #include <rte_atomic.h>
17 #include <rte_ethdev.h>
19 #include <rte_bpf_ethdev.h>
23 * information about installed BPF rx/tx callback
27 /* used by both data & control path */
28 uint32_t use; /*usage counter */
29 const struct rte_eth_rxtx_callback *cb; /* callback handle */
31 struct rte_bpf_jit jit;
32 /* used by control path only */
33 LIST_ENTRY(bpf_eth_cbi) link;
36 } __rte_cache_aligned;
39 * Odd number means that callback is used by datapath.
40 * Even number means that callback is not used by datapath.
42 #define BPF_ETH_CBI_INUSE 1
45 * List to manage RX/TX installed callbacks.
47 LIST_HEAD(bpf_eth_cbi_list, bpf_eth_cbi);
56 * information about all installed BPF rx/tx callbacks
60 struct bpf_eth_cbi_list list;
64 static struct bpf_eth_cbh rx_cbh = {
65 .lock = RTE_SPINLOCK_INITIALIZER,
66 .list = LIST_HEAD_INITIALIZER(list),
70 static struct bpf_eth_cbh tx_cbh = {
71 .lock = RTE_SPINLOCK_INITIALIZER,
72 .list = LIST_HEAD_INITIALIZER(list),
77 * Marks given callback as used by datapath.
79 static __rte_always_inline void
80 bpf_eth_cbi_inuse(struct bpf_eth_cbi *cbi)
83 /* make sure no store/load reordering could happen */
88 * Marks given callback list as not used by datapath.
90 static __rte_always_inline void
91 bpf_eth_cbi_unuse(struct bpf_eth_cbi *cbi)
93 /* make sure all previous loads are completed */
99 * Waits till datapath finished using given callback.
102 bpf_eth_cbi_wait(const struct bpf_eth_cbi *cbi)
106 /* make sure all previous loads and stores are completed */
111 /* in use, busy wait till current RX/TX iteration is finished */
112 if ((puse & BPF_ETH_CBI_INUSE) != 0) {
113 RTE_WAIT_UNTIL_MASKED((uint32_t *)(uintptr_t)&cbi->use,
114 UINT32_MAX, !=, puse, __ATOMIC_RELAXED);
119 bpf_eth_cbi_cleanup(struct bpf_eth_cbi *bc)
122 memset(&bc->jit, 0, sizeof(bc->jit));
125 static struct bpf_eth_cbi *
126 bpf_eth_cbh_find(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
128 struct bpf_eth_cbi *cbi;
130 LIST_FOREACH(cbi, &cbh->list, link) {
131 if (cbi->port == port && cbi->queue == queue)
137 static struct bpf_eth_cbi *
138 bpf_eth_cbh_add(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
140 struct bpf_eth_cbi *cbi;
142 /* return an existing one */
143 cbi = bpf_eth_cbh_find(cbh, port, queue);
147 cbi = rte_zmalloc(NULL, sizeof(*cbi), RTE_CACHE_LINE_SIZE);
151 LIST_INSERT_HEAD(&cbh->list, cbi, link);
157 * BPF packet processing routines.
160 static inline uint32_t
161 apply_filter(struct rte_mbuf *mb[], const uint64_t rc[], uint32_t num,
165 struct rte_mbuf *dr[num];
167 for (i = 0, j = 0, k = 0; i != num; i++) {
178 /* free filtered out mbufs */
179 for (i = 0; i != k; i++)
180 rte_pktmbuf_free(dr[i]);
182 /* copy filtered out mbufs beyond good ones */
183 for (i = 0; i != k; i++)
190 static inline uint32_t
191 pkt_filter_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,
198 for (i = 0; i != num; i++)
199 dp[i] = rte_pktmbuf_mtod(mb[i], void *);
201 rte_bpf_exec_burst(bpf, dp, rc, num);
202 return apply_filter(mb, rc, num, drop);
205 static inline uint32_t
206 pkt_filter_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],
207 uint32_t num, uint32_t drop)
214 for (i = 0; i != num; i++) {
215 dp = rte_pktmbuf_mtod(mb[i], void *);
216 rc[i] = jit->func(dp);
221 num = apply_filter(mb, rc, num, drop);
226 static inline uint32_t
227 pkt_filter_mb_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,
232 rte_bpf_exec_burst(bpf, (void **)mb, rc, num);
233 return apply_filter(mb, rc, num, drop);
236 static inline uint32_t
237 pkt_filter_mb_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],
238 uint32_t num, uint32_t drop)
244 for (i = 0; i != num; i++) {
245 rc[i] = jit->func(mb[i]);
250 num = apply_filter(mb, rc, num, drop);
256 * RX/TX callbacks for raw data bpf.
260 bpf_rx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
261 struct rte_mbuf *pkt[], uint16_t nb_pkts,
262 __rte_unused uint16_t max_pkts, void *user_param)
264 struct bpf_eth_cbi *cbi;
269 bpf_eth_cbi_inuse(cbi);
270 rc = (cbi->cb != NULL) ?
271 pkt_filter_vm(cbi->bpf, pkt, nb_pkts, 1) :
273 bpf_eth_cbi_unuse(cbi);
278 bpf_rx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
279 struct rte_mbuf *pkt[], uint16_t nb_pkts,
280 __rte_unused uint16_t max_pkts, void *user_param)
282 struct bpf_eth_cbi *cbi;
286 bpf_eth_cbi_inuse(cbi);
287 rc = (cbi->cb != NULL) ?
288 pkt_filter_jit(&cbi->jit, pkt, nb_pkts, 1) :
290 bpf_eth_cbi_unuse(cbi);
295 bpf_tx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
296 struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
298 struct bpf_eth_cbi *cbi;
302 bpf_eth_cbi_inuse(cbi);
303 rc = (cbi->cb != NULL) ?
304 pkt_filter_vm(cbi->bpf, pkt, nb_pkts, 0) :
306 bpf_eth_cbi_unuse(cbi);
311 bpf_tx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
312 struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
314 struct bpf_eth_cbi *cbi;
318 bpf_eth_cbi_inuse(cbi);
319 rc = (cbi->cb != NULL) ?
320 pkt_filter_jit(&cbi->jit, pkt, nb_pkts, 0) :
322 bpf_eth_cbi_unuse(cbi);
327 * RX/TX callbacks for mbuf.
331 bpf_rx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
332 struct rte_mbuf *pkt[], uint16_t nb_pkts,
333 __rte_unused uint16_t max_pkts, void *user_param)
335 struct bpf_eth_cbi *cbi;
339 bpf_eth_cbi_inuse(cbi);
340 rc = (cbi->cb != NULL) ?
341 pkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 1) :
343 bpf_eth_cbi_unuse(cbi);
348 bpf_rx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
349 struct rte_mbuf *pkt[], uint16_t nb_pkts,
350 __rte_unused uint16_t max_pkts, void *user_param)
352 struct bpf_eth_cbi *cbi;
356 bpf_eth_cbi_inuse(cbi);
357 rc = (cbi->cb != NULL) ?
358 pkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 1) :
360 bpf_eth_cbi_unuse(cbi);
365 bpf_tx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
366 struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
368 struct bpf_eth_cbi *cbi;
372 bpf_eth_cbi_inuse(cbi);
373 rc = (cbi->cb != NULL) ?
374 pkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 0) :
376 bpf_eth_cbi_unuse(cbi);
381 bpf_tx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
382 struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
384 struct bpf_eth_cbi *cbi;
388 bpf_eth_cbi_inuse(cbi);
389 rc = (cbi->cb != NULL) ?
390 pkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 0) :
392 bpf_eth_cbi_unuse(cbi);
396 static rte_rx_callback_fn
397 select_rx_callback(enum rte_bpf_arg_type type, uint32_t flags)
399 if (flags & RTE_BPF_ETH_F_JIT) {
400 if (type == RTE_BPF_ARG_PTR)
401 return bpf_rx_callback_jit;
402 else if (type == RTE_BPF_ARG_PTR_MBUF)
403 return bpf_rx_callback_mb_jit;
404 } else if (type == RTE_BPF_ARG_PTR)
405 return bpf_rx_callback_vm;
406 else if (type == RTE_BPF_ARG_PTR_MBUF)
407 return bpf_rx_callback_mb_vm;
412 static rte_tx_callback_fn
413 select_tx_callback(enum rte_bpf_arg_type type, uint32_t flags)
415 if (flags & RTE_BPF_ETH_F_JIT) {
416 if (type == RTE_BPF_ARG_PTR)
417 return bpf_tx_callback_jit;
418 else if (type == RTE_BPF_ARG_PTR_MBUF)
419 return bpf_tx_callback_mb_jit;
420 } else if (type == RTE_BPF_ARG_PTR)
421 return bpf_tx_callback_vm;
422 else if (type == RTE_BPF_ARG_PTR_MBUF)
423 return bpf_tx_callback_mb_vm;
429 * helper function to perform BPF unload for given port/queue.
430 * have to introduce extra complexity (and possible slowdown) here,
431 * as right now there is no safe generic way to remove RX/TX callback
432 * while IO is active.
433 * Still don't free memory allocated for callback handle itself,
434 * again right now there is no safe way to do that without stopping RX/TX
435 * on given port/queue first.
438 bpf_eth_cbi_unload(struct bpf_eth_cbi *bc)
440 /* mark this cbi as empty */
444 /* make sure datapath doesn't use bpf anymore, then destroy bpf */
445 bpf_eth_cbi_wait(bc);
446 rte_bpf_destroy(bc->bpf);
447 bpf_eth_cbi_cleanup(bc);
451 bpf_eth_unload(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
453 struct bpf_eth_cbi *bc;
455 bc = bpf_eth_cbh_find(cbh, port, queue);
456 if (bc == NULL || bc->cb == NULL)
459 if (cbh->type == BPF_ETH_RX)
460 rte_eth_remove_rx_callback(port, queue, bc->cb);
462 rte_eth_remove_tx_callback(port, queue, bc->cb);
464 bpf_eth_cbi_unload(bc);
469 rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)
471 struct bpf_eth_cbh *cbh;
474 rte_spinlock_lock(&cbh->lock);
475 bpf_eth_unload(cbh, port, queue);
476 rte_spinlock_unlock(&cbh->lock);
480 rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)
482 struct bpf_eth_cbh *cbh;
485 rte_spinlock_lock(&cbh->lock);
486 bpf_eth_unload(cbh, port, queue);
487 rte_spinlock_unlock(&cbh->lock);
491 bpf_eth_elf_load(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue,
492 const struct rte_bpf_prm *prm, const char *fname, const char *sname,
496 struct bpf_eth_cbi *bc;
498 rte_rx_callback_fn frx;
499 rte_tx_callback_fn ftx;
500 struct rte_bpf_jit jit;
505 if (prm == NULL || rte_eth_dev_is_valid_port(port) == 0 ||
506 queue >= RTE_MAX_QUEUES_PER_PORT)
509 if (cbh->type == BPF_ETH_RX)
510 frx = select_rx_callback(prm->prog_arg.type, flags);
512 ftx = select_tx_callback(prm->prog_arg.type, flags);
514 if (frx == NULL && ftx == NULL) {
515 RTE_BPF_LOG(ERR, "%s(%u, %u): no callback selected;\n",
516 __func__, port, queue);
520 bpf = rte_bpf_elf_load(prm, fname, sname);
524 rte_bpf_get_jit(bpf, &jit);
526 if ((flags & RTE_BPF_ETH_F_JIT) != 0 && jit.func == NULL) {
527 RTE_BPF_LOG(ERR, "%s(%u, %u): no JIT generated;\n",
528 __func__, port, queue);
529 rte_bpf_destroy(bpf);
533 /* setup/update global callback info */
534 bc = bpf_eth_cbh_add(cbh, port, queue);
538 /* remove old one, if any */
540 bpf_eth_unload(cbh, port, queue);
545 if (cbh->type == BPF_ETH_RX)
546 bc->cb = rte_eth_add_rx_callback(port, queue, frx, bc);
548 bc->cb = rte_eth_add_tx_callback(port, queue, ftx, bc);
550 if (bc->cb == NULL) {
552 rte_bpf_destroy(bpf);
553 bpf_eth_cbi_cleanup(bc);
561 rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
562 const struct rte_bpf_prm *prm, const char *fname, const char *sname,
566 struct bpf_eth_cbh *cbh;
569 rte_spinlock_lock(&cbh->lock);
570 rc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);
571 rte_spinlock_unlock(&cbh->lock);
577 rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
578 const struct rte_bpf_prm *prm, const char *fname, const char *sname,
582 struct bpf_eth_cbh *cbh;
585 rte_spinlock_lock(&cbh->lock);
586 rc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);
587 rte_spinlock_unlock(&cbh->lock);