uint8_t reserved[ROC_SSO_MEM_SZ] __plt_cache_aligned;
} __plt_cache_aligned;
-static __plt_always_inline void
-roc_sso_hws_head_wait(uintptr_t tag_op)
+static __plt_always_inline uint64_t
+roc_sso_hws_head_wait(uintptr_t base)
{
-#ifdef RTE_ARCH_ARM64
+ uintptr_t tag_op = base + SSOW_LF_GWS_TAG;
uint64_t tag;
+#if defined(__aarch64__)
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[tag], [%[tag_op]] \n"
" tbnz %[tag], 35, done%= \n"
: [tag] "=&r"(tag)
: [tag_op] "r"(tag_op));
#else
- /* Wait for the SWTAG/SWTAG_FULL operation */
- while (!(plt_read64(tag_op) & BIT_ULL(35)))
- ;
+ do {
+ tag = plt_read64(tag_op);
+ } while (!(tag & BIT_ULL(35)));
#endif
+ return tag;
}
/* SSO device initialization */
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
- const uint8_t cur_tt =
- CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
+ const uint8_t cur_tt = CNXK_TT_FROM_TAG(ws->gw_rdata);
/* CNXK model
* cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
- if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
+ if (CNXK_GRP_FROM_TAG(ws->gw_rdata) == grp)
cn10k_sso_hws_fwd_swtag(ws, ev);
else
/*
} while (gw.u64[0] & BIT_ULL(63));
mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
+ ws->gw_rdata = gw.u64[0];
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
RTE_SET_USED(timeout_ticks); \
if (ws->swtag_req) { \
ws->swtag_req = 0; \
- cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ ws->gw_rdata = cnxk_sso_hws_swtag_wait( \
+ ws->base + SSOW_LF_GWS_WQE0); \
return 1; \
} \
return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
uint64_t iter; \
if (ws->swtag_req) { \
ws->swtag_req = 0; \
- cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ ws->gw_rdata = cnxk_sso_hws_swtag_wait( \
+ ws->base + SSOW_LF_GWS_WQE0); \
return ret; \
} \
ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
else
pa = txq->io_addr | ((segdw - 1) << 4);
- if (!sched_type)
- roc_sso_hws_head_wait(ws->base + SSOW_LF_GWS_TAG);
+ if (!CNXK_TAG_IS_HEAD(ws->gw_rdata) && !sched_type)
+ ws->gw_rdata = roc_sso_hws_head_wait(ws->base);
roc_lmt_submit_steorl(lmt_id, pa);
}
/* Head wait if needed */
if (base)
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
/* ESN */
outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
flags);
if (!CNXK_TT_FROM_EVENT(ev->event)) {
cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
cn9k_sso_txq_fc_wait(txq);
if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, 4, flags);
if (!CNXK_TT_FROM_EVENT(ev->event)) {
cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
- roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+ roc_sso_hws_head_wait(base);
cn9k_sso_txq_fc_wait(txq);
if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
cn9k_nix_xmit_one(cmd, txq->lmt_addr,
#define CNXK_CLR_SUB_EVENT(x) (~(0xffu << 20) & x)
#define CNXK_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff)
#define CNXK_SWTAG_PEND(x) (BIT_ULL(62) & x)
+#define CNXK_TAG_IS_HEAD(x) (BIT_ULL(35) & x)
#define CN9K_SSOW_GET_BASE_ADDR(_GW) ((_GW)-SSOW_LF_GWS_OP_GET_WORK0)
struct cn10k_sso_hws {
uint64_t base;
+ uint64_t gw_rdata;
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
plt_write64(0, flush_op);
}
-static __rte_always_inline void
+static __rte_always_inline uint64_t
cnxk_sso_hws_swtag_wait(uintptr_t tag_op)
{
-#ifdef RTE_ARCH_ARM64
uint64_t swtp;
+#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[swtb], [%[swtp_loc]] \n"
: [swtp_loc] "r"(tag_op));
#else
/* Wait for the SWTAG/SWTAG_FULL operation */
- while (plt_read64(tag_op) & BIT_ULL(62))
- ;
+ do {
+ swtp = plt_read64(tag_op);
+ } while (swtp & BIT_ULL(62));
#endif
+
+ return swtp;
}
#endif
lnum++;
}
- if (flags & NIX_TX_VWQE_F)
- roc_sso_hws_head_wait(ws[0]);
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
left -= burst;
tx_pkts += burst;
}
}
- if (flags & NIX_TX_VWQE_F)
- roc_sso_hws_head_wait(ws[0]);
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
left -= burst;
tx_pkts += burst;
if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
wd.data[0] >>= 16;
- if (flags & NIX_TX_VWQE_F)
- roc_sso_hws_head_wait(ws[0]);
+ if ((flags & NIX_TX_VWQE_F) && !(ws[1] & BIT_ULL(35)))
+ ws[1] = roc_sso_hws_head_wait(ws[0]);
left -= burst;