1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN10K_WORKER_H__
6 #define __CN10K_WORKER_H__
10 #include "cn10k_cryptodev_ops.h"
11 #include "cnxk_ethdev.h"
12 #include "cnxk_eventdev.h"
13 #include "cnxk_worker.h"
15 #include "cn10k_ethdev.h"
21 static __rte_always_inline uint8_t
22 cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
24 const uint32_t tag = (uint32_t)ev->event;
25 const uint8_t new_tt = ev->sched_type;
26 const uint64_t event_ptr = ev->u64;
27 const uint16_t grp = ev->queue_id;
29 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
30 if (ws->xaq_lmt <= *ws->fc_mem)
33 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
34 ws->grp_base + (grp << 12));
38 static __rte_always_inline void
39 cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
41 const uint32_t tag = (uint32_t)ev->event;
42 const uint8_t new_tt = ev->sched_type;
43 const uint8_t cur_tt = CNXK_TT_FROM_TAG(ws->gw_rdata);
46 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
48 * SSO_TT_ORDERED norm norm untag
49 * SSO_TT_ATOMIC norm norm untag
50 * SSO_TT_UNTAGGED norm norm NOOP
53 if (new_tt == SSO_TT_UNTAGGED) {
54 if (cur_tt != SSO_TT_UNTAGGED)
55 cnxk_sso_hws_swtag_untag(ws->base +
56 SSOW_LF_GWS_OP_SWTAG_UNTAG);
58 cnxk_sso_hws_swtag_norm(tag, new_tt,
59 ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
64 static __rte_always_inline void
65 cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
68 const uint32_t tag = (uint32_t)ev->event;
69 const uint8_t new_tt = ev->sched_type;
71 plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
72 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
73 ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
76 static __rte_always_inline void
77 cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
78 const struct rte_event *ev)
80 const uint8_t grp = ev->queue_id;
82 /* Group hasn't changed, Use SWTAG to forward the event */
83 if (CNXK_GRP_FROM_TAG(ws->gw_rdata) == grp)
84 cn10k_sso_hws_fwd_swtag(ws, ev);
87 * Group has been changed for group based work pipelining,
88 * Use deschedule/add_work operation to transfer the event to
91 cn10k_sso_hws_fwd_group(ws, ev, grp);
94 static __rte_always_inline void
95 cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
96 const uint32_t tag, const uint32_t flags,
97 const void *const lookup_mem)
99 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
100 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
101 struct rte_mbuf *mbuf = (struct rte_mbuf *)__mbuf;
103 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
104 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
106 cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
107 (struct rte_mbuf *)mbuf, lookup_mem,
108 mbuf_init | ((uint64_t)port_id) << 48, flags);
111 static __rte_always_inline void
112 cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
113 void *lookup_mem, void *tstamp, uintptr_t lbase)
115 uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
116 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
117 struct rte_event_vector *vec;
118 uint64_t aura_handle, laddr;
119 uint16_t nb_mbufs, non_vec;
120 uint16_t lmt_id, d_off;
121 struct rte_mbuf *mbuf;
127 mbuf_init |= ((uint64_t)port_id) << 48;
128 vec = (struct rte_event_vector *)vwqe;
131 rte_prefetch_non_temporal(&vec->ptrs[0]);
132 #define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *))
133 for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
134 rte_prefetch_non_temporal(&vec->ptrs[i]);
136 nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
137 nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, vec->mbufs, nb_mbufs,
138 flags | NIX_RX_VWQE_F, lookup_mem,
141 non_vec = vec->nb_elem - nb_mbufs;
143 if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
144 mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
145 sizeof(struct rte_mbuf));
146 /* Pick first mbuf's aura handle assuming all
147 * mbufs are from a vec and are from same RQ.
149 aura_handle = mbuf->pool->pool_id;
150 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
153 d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
154 d_off += (mbuf_init & 0xFFFF);
155 sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
156 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
160 struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
163 mbuf = (struct rte_mbuf *)((char *)cqe -
164 sizeof(struct rte_mbuf));
166 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
167 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
169 /* Translate meta to mbuf */
170 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
171 const uint64_t cq_w1 = *((const uint64_t *)cqe + 1);
173 mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, sa_base, laddr,
177 cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
179 /* Extracting tstamp, if PTP enabled*/
180 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
181 CNXK_SSO_WQE_SG_PTR);
182 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
183 flags & NIX_RX_OFFLOAD_TSTAMP_F,
184 (uint64_t *)tstamp_ptr);
185 wqe[0] = (uint64_t *)mbuf;
190 /* Free remaining meta buffers if any */
191 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
192 nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
197 static __rte_always_inline uint16_t
198 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
199 const uint32_t flags, void *lookup_mem)
202 __uint128_t get_work;
207 gw.get_work = ws->gw_wdata;
208 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
210 PLT_CPU_FEATURE_PREAMBLE
211 "caspal %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
212 : [wdata] "+r"(gw.get_work)
213 : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
216 plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
218 roc_load_pair(gw.u64[0], gw.u64[1],
219 ws->base + SSOW_LF_GWS_WQE0);
220 } while (gw.u64[0] & BIT_ULL(63));
222 ws->gw_rdata = gw.u64[0];
224 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
225 (gw.u64[0] & (0x3FFull << 36)) << 4 |
226 (gw.u64[0] & 0xffffffff);
227 if ((flags & CPT_RX_WQE_F) &&
228 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
229 RTE_EVENT_TYPE_CRYPTODEV)) {
230 gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
231 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
232 RTE_EVENT_TYPE_ETHDEV) {
233 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
236 mbuf = gw.u64[1] - sizeof(struct rte_mbuf);
237 rte_prefetch0((void *)mbuf);
238 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
246 m = (struct rte_mbuf *)mbuf;
247 d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
248 d_off += RTE_PKTMBUF_HEADROOM;
250 cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
253 cnxk_nix_sa_base_get(port, lookup_mem);
254 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
256 mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
257 cq_w1, sa_base, (uintptr_t)&iova, &loff,
258 (struct rte_mbuf *)mbuf, d_off);
260 roc_npa_aura_op_free(m->pool->pool_id,
264 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
265 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
266 gw.u64[0] & 0xFFFFF, flags,
268 /* Extracting tstamp, if PTP enabled*/
269 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
271 CNXK_SSO_WQE_SG_PTR);
272 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
274 flags & NIX_RX_OFFLOAD_TSTAMP_F,
275 (uint64_t *)tstamp_ptr);
277 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
278 RTE_EVENT_TYPE_ETHDEV_VECTOR) {
279 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
280 __uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
282 vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
283 ((vwqe_hdr & 0xFFFF) << 48) |
284 ((uint64_t)port << 32);
285 *(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
286 cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
287 ws->tstamp, ws->lmt_base);
288 /* Mark vector mempool object as get */
289 RTE_MEMPOOL_CHECK_COOKIES(
290 rte_mempool_from_obj((void *)gw.u64[1]),
291 (void **)&gw.u64[1], 1, 1);
295 ev->event = gw.u64[0];
301 /* Used in cleaning up workslot. */
302 static __rte_always_inline uint16_t
303 cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
306 __uint128_t get_work;
311 #ifdef RTE_ARCH_ARM64
312 asm volatile(PLT_CPU_FEATURE_PREAMBLE
313 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
314 " tbz %[tag], 63, done%= \n"
317 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
318 " tbnz %[tag], 63, rty%= \n"
320 " sub %[mbuf], %[wqp], #0x80 \n"
321 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
323 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
327 roc_load_pair(gw.u64[0], gw.u64[1],
328 ws->base + SSOW_LF_GWS_WQE0);
329 } while (gw.u64[0] & BIT_ULL(63));
330 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
333 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
334 (gw.u64[0] & (0x3FFull << 36)) << 4 |
335 (gw.u64[0] & 0xffffffff);
337 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
338 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
339 RTE_EVENT_TYPE_ETHDEV) {
340 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
342 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
343 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
344 gw.u64[0] & 0xFFFFF, 0, NULL);
349 ev->event = gw.u64[0];
355 /* CN10K Fastpath functions. */
356 uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
357 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
358 const struct rte_event ev[],
360 uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
361 const struct rte_event ev[],
363 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
364 const struct rte_event ev[],
366 uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
369 #define R(name, flags) \
370 uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
371 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
372 uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
373 void *port, struct rte_event ev[], uint16_t nb_events, \
374 uint64_t timeout_ticks); \
375 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
376 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
377 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
378 void *port, struct rte_event ev[], uint16_t nb_events, \
379 uint64_t timeout_ticks); \
380 uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name( \
381 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
382 uint16_t __rte_hot cn10k_sso_hws_deq_ca_burst_##name( \
383 void *port, struct rte_event ev[], uint16_t nb_events, \
384 uint64_t timeout_ticks); \
385 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_##name( \
386 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
387 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_burst_##name( \
388 void *port, struct rte_event ev[], uint16_t nb_events, \
389 uint64_t timeout_ticks); \
390 uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
391 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
392 uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
393 void *port, struct rte_event ev[], uint16_t nb_events, \
394 uint64_t timeout_ticks); \
395 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
396 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
397 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
398 void *port, struct rte_event ev[], uint16_t nb_events, \
399 uint64_t timeout_ticks); \
400 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_##name( \
401 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
402 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_burst_##name( \
403 void *port, struct rte_event ev[], uint16_t nb_events, \
404 uint64_t timeout_ticks); \
405 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_seg_##name( \
406 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
407 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_ca_seg_burst_##name( \
408 void *port, struct rte_event ev[], uint16_t nb_events, \
409 uint64_t timeout_ticks);
411 NIX_RX_FASTPATH_MODES
414 #define SSO_DEQ(fn, flags) \
415 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
416 uint64_t timeout_ticks) \
418 struct cn10k_sso_hws *ws = port; \
419 RTE_SET_USED(timeout_ticks); \
420 if (ws->swtag_req) { \
422 ws->gw_rdata = cnxk_sso_hws_swtag_wait( \
423 ws->base + SSOW_LF_GWS_WQE0); \
426 return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
429 #define SSO_DEQ_SEG(fn, flags) SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
430 #define SSO_DEQ_CA(fn, flags) SSO_DEQ(fn, flags | CPT_RX_WQE_F)
431 #define SSO_DEQ_CA_SEG(fn, flags) SSO_DEQ_SEG(fn, flags | CPT_RX_WQE_F)
433 #define SSO_DEQ_TMO(fn, flags) \
434 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
435 uint64_t timeout_ticks) \
437 struct cn10k_sso_hws *ws = port; \
440 if (ws->swtag_req) { \
442 ws->gw_rdata = cnxk_sso_hws_swtag_wait( \
443 ws->base + SSOW_LF_GWS_WQE0); \
446 ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
447 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
448 ret = cn10k_sso_hws_get_work(ws, ev, flags, \
453 #define SSO_DEQ_TMO_SEG(fn, flags) SSO_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F)
454 #define SSO_DEQ_TMO_CA(fn, flags) SSO_DEQ_TMO(fn, flags | CPT_RX_WQE_F)
455 #define SSO_DEQ_TMO_CA_SEG(fn, flags) SSO_DEQ_TMO_SEG(fn, flags | CPT_RX_WQE_F)
457 #define SSO_CMN_DEQ_BURST(fnb, fn, flags) \
458 uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \
459 uint16_t nb_events, uint64_t timeout_ticks) \
461 RTE_SET_USED(nb_events); \
462 return fn(port, ev, timeout_ticks); \
465 #define SSO_CMN_DEQ_SEG_BURST(fnb, fn, flags) \
466 uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \
467 uint16_t nb_events, uint64_t timeout_ticks) \
469 RTE_SET_USED(nb_events); \
470 return fn(port, ev, timeout_ticks); \
473 static __rte_always_inline struct cn10k_eth_txq *
474 cn10k_sso_hws_xtract_meta(struct rte_mbuf *m, const uint64_t *txq_data)
476 return (struct cn10k_eth_txq
477 *)(txq_data[(txq_data[m->port] >> 48) +
478 rte_event_eth_tx_adapter_txq_get(m)] &
482 static __rte_always_inline void
483 cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq)
485 while ((uint64_t)txq->nb_sqb_bufs_adj <=
486 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
490 static __rte_always_inline void
491 cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
492 uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type,
493 const uint64_t *txq_data, const uint32_t flags)
495 uint8_t lnum = 0, loff = 0, shft = 0;
496 struct cn10k_eth_txq *txq;
502 txq = cn10k_sso_hws_xtract_meta(m, txq_data);
503 cn10k_nix_tx_skeleton(txq, cmd, flags, 0);
504 /* Perform header writes before barrier
507 if (flags & NIX_TX_OFFLOAD_TSO_F)
508 cn10k_nix_xmit_prepare_tso(m, flags);
510 cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
513 /* Prepare CPT instruction and get nixtx addr if
514 * it is for CPT on same lmtline.
516 if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
517 cn10k_nix_prep_sec(m, cmd, &laddr, lmt_addr, &lnum, &loff,
518 &shft, txq->sa_base, flags);
520 /* Move NIX desc to LMT/NIXTX area */
521 cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
523 if (flags & NIX_TX_MULTI_SEG_F)
524 segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)laddr, flags);
526 segdw = cn10k_nix_tx_ext_subs(flags) + 2;
528 cn10k_nix_xmit_prepare_tstamp(txq, laddr, m->ol_flags, segdw, flags);
529 if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
530 pa = txq->cpt_io_addr | 3 << 4;
532 pa = txq->io_addr | ((segdw - 1) << 4);
534 if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type)
535 ws->gw_rdata = roc_sso_hws_head_wait(ws->base);
537 cn10k_sso_txq_fc_wait(txq);
538 roc_lmt_submit_steorl(lmt_id, pa);
541 static __rte_always_inline void
542 cn10k_sso_vwqe_split_tx(struct cn10k_sso_hws *ws, struct rte_mbuf **mbufs,
543 uint16_t nb_mbufs, uint64_t *cmd, uint16_t lmt_id,
544 uintptr_t lmt_addr, uint8_t sched_type,
545 const uint64_t *txq_data, const uint32_t flags)
547 uint16_t port[4], queue[4];
548 uint16_t i, j, pkts, scalar;
549 struct cn10k_eth_txq *txq;
551 scalar = nb_mbufs & (NIX_DESCS_PER_LOOP - 1);
552 pkts = RTE_ALIGN_FLOOR(nb_mbufs, NIX_DESCS_PER_LOOP);
554 for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
555 port[0] = mbufs[i]->port;
556 port[1] = mbufs[i + 1]->port;
557 port[2] = mbufs[i + 2]->port;
558 port[3] = mbufs[i + 3]->port;
560 queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
561 queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
562 queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
563 queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
565 if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
566 ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
567 for (j = 0; j < 4; j++)
568 cn10k_sso_tx_one(ws, mbufs[i + j], cmd, lmt_id,
569 lmt_addr, sched_type, txq_data,
572 txq = (struct cn10k_eth_txq
573 *)(txq_data[(txq_data[port[0]] >> 48) +
576 cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws,
578 flags | NIX_TX_VWQE_F);
584 for (i = 0; i < scalar; i++) {
585 cn10k_sso_tx_one(ws, mbufs[i], cmd, lmt_id, lmt_addr,
586 sched_type, txq_data, flags);
590 static __rte_always_inline uint16_t
591 cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
592 uint64_t *cmd, const uint64_t *txq_data,
593 const uint32_t flags)
595 struct cn10k_eth_txq *txq;
600 lmt_addr = ws->lmt_base;
601 ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
603 if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
604 struct rte_mbuf **mbufs = ev->vec->mbufs;
605 uint64_t meta = *(uint64_t *)ev->vec;
607 if (meta & BIT(31)) {
608 txq = (struct cn10k_eth_txq
609 *)(txq_data[(txq_data[meta >> 32] >>
614 cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, mbufs,
616 flags | NIX_TX_VWQE_F);
618 cn10k_sso_vwqe_split_tx(
619 ws, mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
620 ev->sched_type, txq_data, flags);
622 rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
623 return (meta & 0xFFFF);
627 cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data,
633 #define T(name, sz, flags) \
634 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
635 void *port, struct rte_event ev[], uint16_t nb_events); \
636 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
637 void *port, struct rte_event ev[], uint16_t nb_events);
639 NIX_TX_FASTPATH_MODES
642 #define SSO_TX(fn, sz, flags) \
643 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
644 uint16_t nb_events) \
646 struct cn10k_sso_hws *ws = port; \
648 RTE_SET_USED(nb_events); \
649 return cn10k_sso_hws_event_tx( \
650 ws, &ev[0], cmd, (const uint64_t *)ws->tx_adptr_data, \
654 #define SSO_TX_SEG(fn, sz, flags) \
655 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
656 uint16_t nb_events) \
658 uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
659 struct cn10k_sso_hws *ws = port; \
660 RTE_SET_USED(nb_events); \
661 return cn10k_sso_hws_event_tx( \
662 ws, &ev[0], cmd, (const uint64_t *)ws->tx_adptr_data, \
663 (flags) | NIX_TX_MULTI_SEG_F); \