1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
11 #include "cn9k_cryptodev_ops.h"
13 #include "cn9k_ethdev.h"
19 static __rte_always_inline uint8_t
20 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
22 const uint32_t tag = (uint32_t)ev->event;
23 const uint8_t new_tt = ev->sched_type;
24 const uint64_t event_ptr = ev->u64;
25 const uint16_t grp = ev->queue_id;
27 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
28 if (ws->xaq_lmt <= *ws->fc_mem)
31 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
35 static __rte_always_inline void
36 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
37 const struct rte_event *ev)
39 const uint32_t tag = (uint32_t)ev->event;
40 const uint8_t new_tt = ev->sched_type;
41 const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
44 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
46 * SSO_TT_ORDERED norm norm untag
47 * SSO_TT_ATOMIC norm norm untag
48 * SSO_TT_UNTAGGED norm norm NOOP
51 if (new_tt == SSO_TT_UNTAGGED) {
52 if (cur_tt != SSO_TT_UNTAGGED)
53 cnxk_sso_hws_swtag_untag(
54 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
55 SSOW_LF_GWS_OP_SWTAG_UNTAG);
57 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
61 static __rte_always_inline void
62 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
63 const struct rte_event *ev, const uint16_t grp)
65 const uint32_t tag = (uint32_t)ev->event;
66 const uint8_t new_tt = ev->sched_type;
68 plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
69 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
70 cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
73 static __rte_always_inline void
74 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
76 const uint8_t grp = ev->queue_id;
78 /* Group hasn't changed, Use SWTAG to forward the event */
79 if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
80 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
84 * Group has been changed for group based work pipelining,
85 * Use deschedule/add_work operation to transfer the event to
88 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
95 static __rte_always_inline uint8_t
96 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
97 const struct rte_event *ev)
99 const uint32_t tag = (uint32_t)ev->event;
100 const uint8_t new_tt = ev->sched_type;
101 const uint64_t event_ptr = ev->u64;
102 const uint16_t grp = ev->queue_id;
104 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
105 if (dws->xaq_lmt <= *dws->fc_mem)
108 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
112 static __rte_always_inline void
113 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
114 struct cn9k_sso_hws_state *vws,
115 const struct rte_event *ev)
117 const uint8_t grp = ev->queue_id;
119 /* Group hasn't changed, Use SWTAG to forward the event */
120 if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
121 cn9k_sso_hws_fwd_swtag(vws, ev);
125 * Group has been changed for group based work pipelining,
126 * Use deschedule/add_work operation to transfer the event to
129 cn9k_sso_hws_fwd_group(vws, ev, grp);
133 static __rte_always_inline void
134 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
135 const uint32_t tag, const uint32_t flags,
136 const void *const lookup_mem)
138 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
139 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
141 cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
142 (struct rte_mbuf *)mbuf, lookup_mem,
143 mbuf_init | ((uint64_t)port_id) << 48, flags);
146 static __rte_always_inline uint16_t
147 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
148 struct cn9k_sso_hws_state *ws_pair,
149 struct rte_event *ev, const uint32_t flags,
150 const void *const lookup_mem,
151 struct cnxk_timesync_info *const tstamp)
153 const uint64_t set_gw = BIT_ULL(16) | 1;
155 __uint128_t get_work;
161 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
162 rte_prefetch_non_temporal(lookup_mem);
163 #ifdef RTE_ARCH_ARM64
164 asm volatile(PLT_CPU_FEATURE_PREAMBLE
166 " ldr %[tag], [%[tag_loc]] \n"
167 " ldr %[wqp], [%[wqp_loc]] \n"
168 " tbnz %[tag], 63, rty%= \n"
169 "done%=: str %[gw], [%[pong]] \n"
171 " sub %[mbuf], %[wqp], #0x80 \n"
172 " prfm pldl1keep, [%[mbuf]] \n"
173 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
175 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
176 [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
178 gw.u64[0] = plt_read64(ws->tag_op);
179 while ((BIT_ULL(63)) & gw.u64[0])
180 gw.u64[0] = plt_read64(ws->tag_op);
181 gw.u64[1] = plt_read64(ws->wqp_op);
182 plt_write64(set_gw, ws_pair->getwrk_op);
183 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
186 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
187 (gw.u64[0] & (0x3FFull << 36)) << 4 |
188 (gw.u64[0] & 0xffffffff);
190 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
191 if ((flags & CPT_RX_WQE_F) &&
192 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
193 RTE_EVENT_TYPE_CRYPTODEV)) {
194 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
195 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
196 RTE_EVENT_TYPE_ETHDEV) {
197 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
199 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
200 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
201 gw.u64[0] & 0xFFFFF, flags,
203 /* Extracting tstamp, if PTP enabled*/
204 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
206 CNXK_SSO_WQE_SG_PTR);
207 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
208 flags & NIX_RX_OFFLOAD_TSTAMP_F,
209 flags & NIX_RX_MULTI_SEG_F,
210 (uint64_t *)tstamp_ptr);
215 ev->event = gw.u64[0];
221 static __rte_always_inline uint16_t
222 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
223 const uint32_t flags, const void *const lookup_mem)
226 __uint128_t get_work;
232 plt_write64(BIT_ULL(16) | /* wait for work. */
233 1, /* Use Mask set 0. */
236 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
237 rte_prefetch_non_temporal(lookup_mem);
238 #ifdef RTE_ARCH_ARM64
239 asm volatile(PLT_CPU_FEATURE_PREAMBLE
240 " ldr %[tag], [%[tag_loc]] \n"
241 " ldr %[wqp], [%[wqp_loc]] \n"
242 " tbz %[tag], 63, done%= \n"
245 " ldr %[tag], [%[tag_loc]] \n"
246 " ldr %[wqp], [%[wqp_loc]] \n"
247 " tbnz %[tag], 63, rty%= \n"
249 " sub %[mbuf], %[wqp], #0x80 \n"
250 " prfm pldl1keep, [%[mbuf]] \n"
251 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
253 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
255 gw.u64[0] = plt_read64(ws->tag_op);
256 while ((BIT_ULL(63)) & gw.u64[0])
257 gw.u64[0] = plt_read64(ws->tag_op);
259 gw.u64[1] = plt_read64(ws->wqp_op);
260 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
263 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
264 (gw.u64[0] & (0x3FFull << 36)) << 4 |
265 (gw.u64[0] & 0xffffffff);
267 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
268 if ((flags & CPT_RX_WQE_F) &&
269 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
270 RTE_EVENT_TYPE_CRYPTODEV)) {
271 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
272 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
273 RTE_EVENT_TYPE_ETHDEV) {
274 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
276 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
277 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
278 gw.u64[0] & 0xFFFFF, flags,
280 /* Extracting tstamp, if PTP enabled*/
281 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
283 CNXK_SSO_WQE_SG_PTR);
284 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
286 flags & NIX_RX_OFFLOAD_TSTAMP_F,
287 flags & NIX_RX_MULTI_SEG_F,
288 (uint64_t *)tstamp_ptr);
293 ev->event = gw.u64[0];
299 /* Used in cleaning up workslot. */
300 static __rte_always_inline uint16_t
301 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
304 __uint128_t get_work;
309 #ifdef RTE_ARCH_ARM64
310 asm volatile(PLT_CPU_FEATURE_PREAMBLE
311 " ldr %[tag], [%[tag_loc]] \n"
312 " ldr %[wqp], [%[wqp_loc]] \n"
313 " tbz %[tag], 63, done%= \n"
316 " ldr %[tag], [%[tag_loc]] \n"
317 " ldr %[wqp], [%[wqp_loc]] \n"
318 " tbnz %[tag], 63, rty%= \n"
320 " sub %[mbuf], %[wqp], #0x80 \n"
321 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
323 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
325 gw.u64[0] = plt_read64(ws->tag_op);
326 while ((BIT_ULL(63)) & gw.u64[0])
327 gw.u64[0] = plt_read64(ws->tag_op);
329 gw.u64[1] = plt_read64(ws->wqp_op);
330 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
333 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
334 (gw.u64[0] & (0x3FFull << 36)) << 4 |
335 (gw.u64[0] & 0xffffffff);
337 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
338 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
339 RTE_EVENT_TYPE_ETHDEV) {
340 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
342 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
343 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
344 gw.u64[0] & 0xFFFFF, 0, NULL);
349 ev->event = gw.u64[0];
355 /* CN9K Fastpath functions. */
356 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
357 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
358 const struct rte_event ev[],
360 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
361 const struct rte_event ev[],
363 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
364 const struct rte_event ev[],
367 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
368 const struct rte_event *ev);
369 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
370 const struct rte_event ev[],
372 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
373 const struct rte_event ev[],
375 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
376 const struct rte_event ev[],
378 uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
380 uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
383 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
384 uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
385 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
386 uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
387 void *port, struct rte_event ev[], uint16_t nb_events, \
388 uint64_t timeout_ticks); \
389 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
390 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
391 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
392 void *port, struct rte_event ev[], uint16_t nb_events, \
393 uint64_t timeout_ticks); \
394 uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
395 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
396 uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
397 void *port, struct rte_event ev[], uint16_t nb_events, \
398 uint64_t timeout_ticks); \
399 uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
400 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
401 uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
402 void *port, struct rte_event ev[], uint16_t nb_events, \
403 uint64_t timeout_ticks); \
404 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
405 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
406 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
407 void *port, struct rte_event ev[], uint16_t nb_events, \
408 uint64_t timeout_ticks); \
409 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
410 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
411 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
412 void *port, struct rte_event ev[], uint16_t nb_events, \
413 uint64_t timeout_ticks);
415 NIX_RX_FASTPATH_MODES
418 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
419 uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
420 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
421 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
422 void *port, struct rte_event ev[], uint16_t nb_events, \
423 uint64_t timeout_ticks); \
424 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
425 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
426 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
427 void *port, struct rte_event ev[], uint16_t nb_events, \
428 uint64_t timeout_ticks); \
429 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
430 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
431 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
432 void *port, struct rte_event ev[], uint16_t nb_events, \
433 uint64_t timeout_ticks); \
434 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
435 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
436 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
437 void *port, struct rte_event ev[], uint16_t nb_events, \
438 uint64_t timeout_ticks); \
439 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
440 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
441 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
442 void *port, struct rte_event ev[], uint16_t nb_events, \
443 uint64_t timeout_ticks); \
444 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
445 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
446 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
447 void *port, struct rte_event ev[], uint16_t nb_events, \
448 uint64_t timeout_ticks);
450 NIX_RX_FASTPATH_MODES
453 static __rte_always_inline void
454 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
456 while (!((txq->nb_sqb_bufs_adj -
457 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
458 << (txq)->sqes_per_sqb_log2))
462 static __rte_always_inline const struct cn9k_eth_txq *
463 cn9k_sso_hws_xtract_meta(struct rte_mbuf *m,
464 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
466 return (const struct cn9k_eth_txq *)
467 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
470 static __rte_always_inline void
471 cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
472 uint64_t *cmd, const uint32_t flags)
474 roc_lmt_mov(cmd, txq->cmd, cn9k_nix_tx_ext_subs(flags));
475 cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
478 static __rte_always_inline uint16_t
479 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
480 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
481 const uint32_t flags)
483 struct rte_mbuf *m = ev->mbuf;
484 const struct cn9k_eth_txq *txq;
485 uint16_t ref_cnt = m->refcnt;
487 /* Perform header writes before barrier for TSO */
488 cn9k_nix_xmit_prepare_tso(m, flags);
489 /* Lets commit any changes in the packet here in case when
490 * fast free is set as no further changes will be made to mbuf.
491 * In case of fast free is not set, both cn9k_nix_prepare_mseg()
492 * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
494 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
496 txq = cn9k_sso_hws_xtract_meta(m, txq_data);
497 cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
499 if (flags & NIX_TX_MULTI_SEG_F) {
500 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
501 if (!CNXK_TT_FROM_EVENT(ev->event)) {
502 cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
503 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
504 cn9k_sso_txq_fc_wait(txq);
505 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
506 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
507 txq->io_addr, segdw);
509 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
513 if (!CNXK_TT_FROM_EVENT(ev->event)) {
514 cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
515 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
516 cn9k_sso_txq_fc_wait(txq);
517 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
518 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
519 txq->io_addr, flags);
521 cn9k_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
526 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
531 cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
532 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
537 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
538 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name( \
539 void *port, struct rte_event ev[], uint16_t nb_events); \
540 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name( \
541 void *port, struct rte_event ev[], uint16_t nb_events); \
542 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name( \
543 void *port, struct rte_event ev[], uint16_t nb_events); \
544 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name( \
545 void *port, struct rte_event ev[], uint16_t nb_events);
547 NIX_TX_FASTPATH_MODES