static __rte_always_inline uint16_t
cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
struct rte_event *ev, const uint32_t flags,
- const void *const lookup_mem,
- struct cnxk_timesync_info *const tstamp)
+ struct cn9k_sso_hws_dual *dws)
{
- const uint64_t set_gw = BIT_ULL(16) | 1;
union {
__uint128_t get_work;
uint64_t u64[2];
uint64_t mbuf;
if (flags & NIX_RX_OFFLOAD_PTYPE_F)
- rte_prefetch_non_temporal(lookup_mem);
+ rte_prefetch_non_temporal(dws->lookup_mem);
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
"rty%=: \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
[mbuf] "=&r"(mbuf)
: [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
- [wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(set_gw),
+ [wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(dws->gw_wdata),
[pong] "r"(pair_base + SSOW_LF_GWS_OP_GET_WORK0));
#else
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
- plt_write64(set_gw, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
+ plt_write64(dws->gw_wdata, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
gw.u64[0] & 0xFFFFF, flags,
- lookup_mem);
+ dws->lookup_mem);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
gw.u64[1]) +
CNXK_SSO_WQE_SG_PTR);
- cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+ cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ dws->tstamp,
flags & NIX_RX_OFFLOAD_TSTAMP_F,
flags & NIX_RX_MULTI_SEG_F,
(uint64_t *)tstamp_ptr);
uint64_t tstamp_ptr;
uint64_t mbuf;
- plt_write64(BIT_ULL(16) | /* wait for work. */
- 1, /* Use Mask set 0. */
- ws->base + SSOW_LF_GWS_OP_GET_WORK0);
+ plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
if (flags & NIX_RX_OFFLOAD_PTYPE_F)
rte_prefetch_non_temporal(lookup_mem);
SSOW_LF_GWS_TAG); \
return 1; \
} \
- gw = cn9k_sso_hws_dual_get_work( \
- dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
- dws->lookup_mem, dws->tstamp); \
+ gw = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
+ dws->base[!dws->vws], ev, \
+ flags, dws); \
dws->vws = !dws->vws; \
return gw; \
}
SSOW_LF_GWS_TAG); \
return ret; \
} \
- ret = cn9k_sso_hws_dual_get_work( \
- dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
- dws->lookup_mem, dws->tstamp); \
+ ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
+ dws->base[!dws->vws], ev, \
+ flags, dws); \
dws->vws = !dws->vws; \
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
- ret = cn9k_sso_hws_dual_get_work( \
- dws->base[dws->vws], dws->base[!dws->vws], ev, \
- flags, dws->lookup_mem, dws->tstamp); \
+ ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
+ dws->base[!dws->vws], \
+ ev, flags, dws); \
dws->vws = !dws->vws; \
} \
return ret; \
;
}
-static __rte_always_inline const struct cn9k_eth_txq *
-cn9k_sso_hws_xtract_meta(struct rte_mbuf *m,
- const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
-{
- return (const struct cn9k_eth_txq *)
- txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
-}
-
-static __rte_always_inline void
-cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
- uint64_t *cmd, const uint32_t flags)
+static __rte_always_inline struct cn9k_eth_txq *
+cn9k_sso_hws_xtract_meta(struct rte_mbuf *m, uint64_t *txq_data)
{
- roc_lmt_mov(cmd, txq->cmd, cn9k_nix_tx_ext_subs(flags));
- cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
+ return (struct cn9k_eth_txq
+ *)(txq_data[(txq_data[m->port] >> 48) +
+ rte_event_eth_tx_adapter_txq_get(m)] &
+ (BIT_ULL(48) - 1));
}
#if defined(RTE_ARCH_ARM64)
nixtx += BIT_ULL(7);
nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
- roc_lmt_mov((void *)(nixtx + 16), cmd, cn9k_nix_tx_ext_subs(flags));
+ roc_lmt_mov_nv((void *)(nixtx + 16), cmd, cn9k_nix_tx_ext_subs(flags));
/* Load opcode and cptr already prepared at pkt metadata set */
pkt_len -= l2_len;
/* Head wait if needed */
if (base)
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
/* ESN */
outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
static __rte_always_inline uint16_t
cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
- const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
- const uint32_t flags)
+ uint64_t *txq_data, const uint32_t flags)
{
struct rte_mbuf *m = ev->mbuf;
- const struct cn9k_eth_txq *txq;
uint16_t ref_cnt = m->refcnt;
+ struct cn9k_eth_txq *txq;
/* Perform header writes before barrier for TSO */
cn9k_nix_xmit_prepare_tso(m, flags);
!(flags & NIX_TX_OFFLOAD_SECURITY_F))
rte_io_wmb();
txq = cn9k_sso_hws_xtract_meta(m, txq_data);
- cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
+ cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
+ cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
uint64_t ol_flags = m->ol_flags;
if (flags & NIX_TX_MULTI_SEG_F) {
const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
+ cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, segdw,
+ flags);
if (!CNXK_TT_FROM_EVENT(ev->event)) {
cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
cn9k_sso_txq_fc_wait(txq);
if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
segdw);
}
} else {
+ cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, 4, flags);
if (!CNXK_TT_FROM_EVENT(ev->event)) {
cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
cn9k_sso_txq_fc_wait(txq);
if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
cn9k_nix_xmit_one(cmd, txq->lmt_addr,
return 1;
}
-#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+#define T(name, sz, flags) \
uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name( \
void *port, struct rte_event ev[], uint16_t nb_events); \
uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name( \
NIX_TX_FASTPATH_MODES
#undef T
+#define SSO_TX(fn, sz, flags) \
+ uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ uint64_t cmd[sz]; \
+ RTE_SET_USED(nb_events); \
+ return cn9k_sso_hws_event_tx(ws->base, &ev[0], cmd, \
+ (uint64_t *)ws->tx_adptr_data, \
+ flags); \
+ }
+
+#define SSO_TX_SEG(fn, sz, flags) \
+ uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+ { \
+ uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
+ struct cn9k_sso_hws *ws = port; \
+ RTE_SET_USED(nb_events); \
+ return cn9k_sso_hws_event_tx(ws->base, &ev[0], cmd, \
+ (uint64_t *)ws->tx_adptr_data, \
+ (flags) | NIX_TX_MULTI_SEG_F); \
+ }
+
+#define SSO_DUAL_TX(fn, sz, flags) \
+ uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+ { \
+ struct cn9k_sso_hws_dual *ws = port; \
+ uint64_t cmd[sz]; \
+ RTE_SET_USED(nb_events); \
+ return cn9k_sso_hws_event_tx(ws->base[!ws->vws], &ev[0], cmd, \
+ (uint64_t *)ws->tx_adptr_data, \
+ flags); \
+ }
+
+#define SSO_DUAL_TX_SEG(fn, sz, flags) \
+ uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
+ uint16_t nb_events) \
+ { \
+ uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
+ struct cn9k_sso_hws_dual *ws = port; \
+ RTE_SET_USED(nb_events); \
+ return cn9k_sso_hws_event_tx(ws->base[!ws->vws], &ev[0], cmd, \
+ (uint64_t *)ws->tx_adptr_data, \
+ (flags) | NIX_TX_MULTI_SEG_F); \
+ }
+
#endif