+uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
+ uint16_t nb_events);
+
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
+
+static __rte_always_inline struct cn10k_eth_txq *
+cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
+ const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
+{
+ return (struct cn10k_eth_txq *)
+ txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
+}
+
+static __rte_always_inline void
+cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
+ uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr,
+ uint8_t sched_type, uintptr_t base,
+ const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
+ const uint32_t flags)
+{
+ uint16_t port[4], queue[4];
+ struct cn10k_eth_txq *txq;
+ uint16_t i, j;
+ uintptr_t pa;
+
+ for (i = 0; i < nb_mbufs; i += 4) {
+ port[0] = mbufs[i]->port;
+ port[1] = mbufs[i + 1]->port;
+ port[2] = mbufs[i + 2]->port;
+ port[3] = mbufs[i + 3]->port;
+
+ queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
+ queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
+ queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
+ queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
+
+ if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
+ ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
+
+ for (j = 0; j < 4; j++) {
+ uint8_t lnum = 0, loff = 0, shft = 0;
+ struct rte_mbuf *m = mbufs[i + j];
+ uintptr_t laddr;
+ uint16_t segdw;
+ bool sec;
+
+ txq = (struct cn10k_eth_txq *)
+ txq_data[port[j]][queue[j]];
+ cn10k_nix_tx_skeleton(txq, cmd, flags);
+ /* Perform header writes before barrier
+ * for TSO
+ */
+ if (flags & NIX_TX_OFFLOAD_TSO_F)
+ cn10k_nix_xmit_prepare_tso(m, flags);
+
+ cn10k_nix_xmit_prepare(m, cmd, flags,
+ txq->lso_tun_fmt, &sec);
+
+ laddr = lmt_addr;
+ /* Prepare CPT instruction and get nixtx addr if
+ * it is for CPT on same lmtline.
+ */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(m, cmd, &laddr,
+ lmt_addr, &lnum,
+ &loff, &shft,
+ txq->sa_base, flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ segdw = cn10k_nix_prepare_mseg(m,
+ (uint64_t *)laddr, flags);
+ } else {
+ segdw = cn10k_nix_tx_ext_subs(flags) +
+ 2;
+ }
+
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ pa = txq->cpt_io_addr | 3 << 4;
+ else
+ pa = txq->io_addr | ((segdw - 1) << 4);
+
+ if (!sched_type)
+ roc_sso_hws_head_wait(base +
+ SSOW_LF_GWS_TAG);
+
+ roc_lmt_submit_steorl(lmt_id, pa);
+ }
+ } else {
+ txq = (struct cn10k_eth_txq *)
+ txq_data[port[0]][queue[0]];
+ cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd, base
+ + SSOW_LF_GWS_TAG,
+ flags | NIX_TX_VWQE_F);
+ }
+ }
+}
+
+static __rte_always_inline uint16_t
+cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
+ uint64_t *cmd,
+ const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
+ const uint32_t flags)
+{
+ uint8_t lnum = 0, loff = 0, shft = 0;
+ struct cn10k_eth_txq *txq;
+ uint16_t ref_cnt, segdw;
+ struct rte_mbuf *m;
+ uintptr_t lmt_addr;
+ uintptr_t c_laddr;
+ uint16_t lmt_id;
+ uintptr_t pa;
+ bool sec;
+
+ lmt_addr = ws->lmt_base;
+ ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+ c_laddr = lmt_addr;
+
+ if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
+ struct rte_mbuf **mbufs = ev->vec->mbufs;
+ uint64_t meta = *(uint64_t *)ev->vec;
+
+ if (meta & BIT(31)) {
+ txq = (struct cn10k_eth_txq *)
+ txq_data[meta >> 32][meta >> 48];
+
+ cn10k_nix_xmit_pkts_vector(
+ txq, mbufs, meta & 0xFFFF, cmd,
+ ws->tx_base + SSOW_LF_GWS_TAG,
+ flags | NIX_TX_VWQE_F);
+ } else {
+ cn10k_sso_vwqe_split_tx(
+ mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
+ ev->sched_type, ws->tx_base, txq_data, flags);
+ }
+ rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
+ return (meta & 0xFFFF);
+ }
+
+ m = ev->mbuf;
+ ref_cnt = m->refcnt;
+ txq = cn10k_sso_hws_xtract_meta(m, txq_data);
+ cn10k_nix_tx_skeleton(txq, cmd, flags);
+ /* Perform header writes before barrier for TSO */
+ if (flags & NIX_TX_OFFLOAD_TSO_F)
+ cn10k_nix_xmit_prepare_tso(m, flags);
+
+ cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+
+ /* Prepare CPT instruction and get nixtx addr if
+ * it is for CPT on same lmtline.
+ */
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ cn10k_nix_prep_sec(m, cmd, &lmt_addr, c_laddr, &lnum, &loff,
+ &shft, txq->sa_base, flags);
+
+ /* Move NIX desc to LMT/NIXTX area */
+ cn10k_nix_xmit_mv_lmt_base(lmt_addr, cmd, flags);
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
+ } else {
+ segdw = cn10k_nix_tx_ext_subs(flags) + 2;
+ }
+
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+ pa = txq->cpt_io_addr | 3 << 4;
+ else
+ pa = txq->io_addr | ((segdw - 1) << 4);
+
+ if (!ev->sched_type)
+ roc_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
+
+ roc_lmt_submit_steorl(lmt_id, pa);
+
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ if (ref_cnt > 1)
+ return 1;
+ }
+
+ cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
+ ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
+ return 1;
+}
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events); \
+ uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events); \
+ uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events); \
+ uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events);