#include <otx2_common.h>
#include "otx2_evdev.h"
+#include "otx2_ethdev_sec_tx.h"
/* SSO Operations */
static __rte_always_inline uint16_t
-otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev)
+otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
+ const uint32_t flags, const void * const lookup_mem)
{
union otx2_sso_event event;
+ uint64_t tstamp_ptr;
uint64_t get_work1;
+ uint64_t mbuf;
otx2_write64(BIT_ULL(16) | /* wait for work. */
1, /* Use Mask set 0. */
ws->getwrk_op);
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+ rte_prefetch_non_temporal(lookup_mem);
#ifdef RTE_ARCH_ARM64
asm volatile(
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
- " prfm pldl1keep, [%[wqp]] \n"
+ " prfm pldl1keep, [%[wqp], #8] \n"
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r" (event.get_work0),
- [wqp] "=&r" (get_work1)
+ [wqp] "=&r" (get_work1),
+ [mbuf] "=&r" (mbuf)
: [tag_loc] "r" (ws->tag_op),
[wqp_loc] "r" (ws->wqp_op)
);
get_work1 = otx2_read64(ws->wqp_op);
rte_prefetch0((const void *)get_work1);
+ mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
+ rte_prefetch0((const void *)mbuf);
#endif
event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
ws->cur_tt = event.sched_type;
ws->cur_grp = event.queue_id;
+ if (event.sched_type != SSO_TT_EMPTY &&
+ event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags, lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+ flags, (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
ev->event = event.get_work0;
ev->u64 = get_work1;
/* Used in cleaning up workslot. */
static __rte_always_inline uint16_t
-otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev)
+otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
+ const uint32_t flags)
{
union otx2_sso_event event;
+ uint64_t tstamp_ptr;
uint64_t get_work1;
+ uint64_t mbuf;
#ifdef RTE_ARCH_ARM64
asm volatile(
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
- " prfm pldl1keep, [%[wqp]] \n"
+ " prfm pldl1keep, [%[wqp], #8] \n"
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r" (event.get_work0),
- [wqp] "=&r" (get_work1)
+ [wqp] "=&r" (get_work1),
+ [mbuf] "=&r" (mbuf)
: [tag_loc] "r" (ws->tag_op),
[wqp_loc] "r" (ws->wqp_op)
);
event.get_work0 = otx2_read64(ws->tag_op);
get_work1 = otx2_read64(ws->wqp_op);
- rte_prefetch0((const void *)get_work1);
+ rte_prefetch_non_temporal((const void *)get_work1);
+ mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
+ rte_prefetch_non_temporal((const void *)mbuf);
#endif
event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
ws->cur_tt = event.sched_type;
ws->cur_grp = event.queue_id;
+ if (event.sched_type != SSO_TT_EMPTY &&
+ event.event_type == RTE_EVENT_TYPE_ETHDEV) {
+ otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
+ (uint32_t) event.get_work0, flags, NULL);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ + OTX2_SSO_WQE_SG_PTR);
+ otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
+ flags, (uint64_t *)tstamp_ptr);
+ get_work1 = mbuf;
+ }
+
ev->event = event.get_work0;
ev->u64 = get_work1;
#endif
}
+static __rte_always_inline void
+otx2_ssogws_head_wait(struct otx2_ssogws *ws)
+{
+#ifdef RTE_ARCH_ARM64
+ uint64_t tag;
+
+ asm volatile (
+ " ldr %[tag], [%[tag_op]] \n"
+ " tbnz %[tag], 35, done%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldr %[tag], [%[tag_op]] \n"
+ " tbz %[tag], 35, rty%= \n"
+ "done%=: \n"
+ : [tag] "=&r" (tag)
+ : [tag_op] "r" (ws->tag_op)
+ );
+#else
+ /* Wait for the HEAD to be set */
+ while (!(otx2_read64(ws->tag_op) & BIT_ULL(35)))
+ ;
+#endif
+}
+
+static __rte_always_inline void
+otx2_ssogws_order(struct otx2_ssogws *ws, const uint8_t wait_flag)
+{
+ if (wait_flag)
+ otx2_ssogws_head_wait(ws);
+
+ rte_cio_wmb();
+}
+
+static __rte_always_inline const struct otx2_eth_txq *
+otx2_ssogws_xtract_meta(struct rte_mbuf *m)
+{
+ return rte_eth_devices[m->port].data->tx_queues[
+ rte_event_eth_tx_adapter_txq_get(m)];
+}
+
+static __rte_always_inline void
+otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
+ uint64_t *cmd, const uint32_t flags)
+{
+ otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
+ otx2_nix_xmit_prepare(m, cmd, flags);
+}
+
+static __rte_always_inline uint16_t
+otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
+ uint64_t *cmd, const uint32_t flags)
+{
+ struct rte_mbuf *m = ev[0].mbuf;
+ const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
+
+ rte_prefetch_non_temporal(txq);
+
+ if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+ (m->ol_flags & PKT_TX_SEC_OFFLOAD))
+ return otx2_sec_event_tx(ws, ev, m, txq, flags);
+
+ /* Perform header writes before barrier for TSO */
+ otx2_nix_xmit_prepare_tso(m, flags);
+ otx2_ssogws_order(ws, !ev->sched_type);
+ otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
+
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ m->ol_flags, segdw, flags);
+ otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr, segdw);
+ } else {
+ /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ m->ol_flags, 4, flags);
+ otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr, flags);
+ }
+
+ return 1;
+}
+
#endif