Add support for event eth Rx adapter fastpath operations.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_tmo_deq[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_tmo_deq_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_tmo_deq_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_tmo_deq_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
event_dev->enqueue = cn10k_sso_hws_enq;
event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
-
- event_dev->dequeue = cn10k_sso_hws_deq;
- event_dev->dequeue_burst = cn10k_sso_hws_deq_burst;
- if (dev->is_timeout_deq) {
- event_dev->dequeue = cn10k_sso_hws_tmo_deq;
- event_dev->dequeue_burst = cn10k_sso_hws_tmo_deq_burst;
+ if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+ event_dev->dequeue = sso_hws_deq_seg
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_seg_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_tmo_deq_seg
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_tmo_deq_seg_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
+ } else {
+ event_dev->dequeue = sso_hws_deq
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_tmo_deq
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_tmo_deq_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
}
}
return 1;
}
-
-uint16_t __rte_hot
-cn10k_sso_hws_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct cn10k_sso_hws *ws = port;
-
- RTE_SET_USED(timeout_ticks);
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);
- return 1;
- }
-
- return cn10k_sso_hws_get_work(ws, ev);
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn10k_sso_hws_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct cn10k_sso_hws *ws = port;
- uint16_t ret = 1;
- uint64_t iter;
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0);
- return ret;
- }
-
- ret = cn10k_sso_hws_get_work(ws, ev);
- for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
- ret = cn10k_sso_hws_get_work(ws, ev);
-
- return ret;
-}
-
-uint16_t __rte_hot
-cn10k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events, uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn10k_sso_hws_tmo_deq(port, ev, timeout_ticks);
-}
cn10k_sso_hws_fwd_group(ws, ev, grp);
}
+static __rte_always_inline void
+cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
+ const uint32_t tag, const uint32_t flags,
+ const void *const lookup_mem)
+{
+ const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
+ (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+
+ cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
+ (struct rte_mbuf *)mbuf, lookup_mem,
+ mbuf_init | ((uint64_t)port_id) << 48, flags);
+}
+
static __rte_always_inline uint16_t
-cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)
+cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
+ const uint32_t flags, void *lookup_mem)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
+ uint64_t tstamp_ptr;
+ uint64_t mbuf;
gw.get_work = ws->gw_wdata;
#if defined(RTE_ARCH_ARM64) && !defined(__clang__)
asm volatile(
PLT_CPU_FEATURE_PREAMBLE
"caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
- : [wdata] "+r"(gw.get_work)
+ "sub %[mbuf], %H[wdata], #0x80 \n"
+ : [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
: [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
: "memory");
#else
roc_load_pair(gw.u64[0], gw.u64[1],
ws->base + SSOW_LF_GWS_WQE0);
} while (gw.u64[0] & BIT_ULL(63));
+ mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
+ if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+ if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+ gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+ cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+ gw.u64[0] & 0xFFFFF, flags,
+ lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
+ gw.u64[1]) +
+ CNXK_SSO_WQE_SG_PTR);
+ cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ ws->tstamp,
+ flags & NIX_RX_OFFLOAD_TSTAMP_F,
+ flags & NIX_RX_MULTI_SEG_F,
+ (uint64_t *)tstamp_ptr);
+ gw.u64[1] = mbuf;
+ }
+ }
+
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
__uint128_t get_work;
uint64_t u64[2];
} gw;
+ uint64_t mbuf;
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldp %[tag], %[wqp], [%[tag_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
- : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+ [mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
: "memory");
#else
roc_load_pair(gw.u64[0], gw.u64[1],
ws->base + SSOW_LF_GWS_WQE0);
} while (gw.u64[0] & BIT_ULL(63));
+ mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
+ if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+ if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+ gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+ cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+ gw.u64[0] & 0xFFFFF, 0, NULL);
+ gw.u64[1] = mbuf;
+ }
+ }
+
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
const struct rte_event ev[],
uint16_t nb_events);
-uint16_t __rte_hot cn10k_sso_hws_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn10k_sso_hws_tmo_deq_burst(void *port,
- struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
#endif
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn10k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn10k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ return 1; \
+ } \
+ \
+ return cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn10k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ return 1; \
+ } \
+ \
+ return cn10k_sso_hws_get_work( \
+ ws, ev, flags | NIX_RX_MULTI_SEG_F, ws->lookup_mem); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn10k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn10k_sso_hws_deq_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn10k_sso_hws_deq_seg_##name(port, ev, timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn10k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn10k_sso_hws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ return ret; \
+ } \
+ \
+ ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = cn10k_sso_hws_get_work(ws, ev, flags, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn10k_sso_hws_deq_tmo_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn10k_sso_hws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_WQE0); \
+ return ret; \
+ } \
+ \
+ ret = cn10k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = cn10k_sso_hws_get_work(ws, ev, flags, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn10k_sso_hws_deq_tmo_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ /* Single WS modes */
+ const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ /* Dual WS modes */
+ const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_dual_deq_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_dual_deq_tmo[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_tmo_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_t sso_hws_dual_deq_tmo_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_tmo_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
event_dev->enqueue = cn9k_sso_hws_enq;
event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
-
- event_dev->dequeue = cn9k_sso_hws_deq;
- event_dev->dequeue_burst = cn9k_sso_hws_deq_burst;
- if (dev->deq_tmo_ns) {
- event_dev->dequeue = cn9k_sso_hws_tmo_deq;
- event_dev->dequeue_burst = cn9k_sso_hws_tmo_deq_burst;
+ if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+ event_dev->dequeue = sso_hws_deq_seg
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_seg_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_deq_tmo_seg
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_tmo_seg_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
+ } else {
+ event_dev->dequeue = sso_hws_deq
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_burst
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_deq_tmo
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_deq_tmo_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ }
}
if (dev->dual_ws) {
event_dev->enqueue_forward_burst =
cn9k_sso_hws_dual_enq_fwd_burst;
- event_dev->dequeue = cn9k_sso_hws_dual_deq;
- event_dev->dequeue_burst = cn9k_sso_hws_dual_deq_burst;
- if (dev->deq_tmo_ns) {
- event_dev->dequeue = cn9k_sso_hws_dual_tmo_deq;
- event_dev->dequeue_burst =
- cn9k_sso_hws_dual_tmo_deq_burst;
+ if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
+ event_dev->dequeue = sso_hws_dual_deq_seg
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_dual_deq_seg_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_dual_deq_tmo_seg
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ sso_hws_dual_deq_tmo_seg_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ }
+ } else {
+ event_dev->dequeue = sso_hws_dual_deq
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst = sso_hws_dual_deq_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)];
+ if (dev->is_timeout_deq) {
+ event_dev->dequeue = sso_hws_dual_deq_tmo
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ event_dev->dequeue_burst =
+ sso_hws_dual_deq_tmo_burst
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offloads &
+ NIX_RX_OFFLOAD_RSS_F)];
+ }
}
}
+
+ rte_mb();
}
static void *
return 1;
}
-uint16_t __rte_hot
-cn9k_sso_hws_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct cn9k_sso_hws *ws = port;
-
- RTE_SET_USED(timeout_ticks);
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->tag_op);
- return 1;
- }
-
- return cn9k_sso_hws_get_work(ws, ev);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
- uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn9k_sso_hws_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_tmo_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct cn9k_sso_hws *ws = port;
- uint16_t ret = 1;
- uint64_t iter;
-
- if (ws->swtag_req) {
- ws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(ws->tag_op);
- return ret;
- }
-
- ret = cn9k_sso_hws_get_work(ws, ev);
- for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
- ret = cn9k_sso_hws_get_work(ws, ev);
-
- return ret;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events, uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn9k_sso_hws_tmo_deq(port, ev, timeout_ticks);
-}
-
/* Dual ws ops. */
uint16_t __rte_hot
return 1;
}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
- struct cn9k_sso_hws_dual *dws = port;
- uint16_t gw;
-
- RTE_SET_USED(timeout_ticks);
- if (dws->swtag_req) {
- dws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(dws->ws_state[!dws->vws].tag_op);
- return 1;
- }
-
- gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
- &dws->ws_state[!dws->vws], ev);
- dws->vws = !dws->vws;
- return gw;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events, uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn9k_sso_hws_dual_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_tmo_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks)
-{
- struct cn9k_sso_hws_dual *dws = port;
- uint16_t ret = 1;
- uint64_t iter;
-
- if (dws->swtag_req) {
- dws->swtag_req = 0;
- cnxk_sso_hws_swtag_wait(dws->ws_state[!dws->vws].tag_op);
- return ret;
- }
-
- ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
- &dws->ws_state[!dws->vws], ev);
- dws->vws = !dws->vws;
- for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {
- ret = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],
- &dws->ws_state[!dws->vws], ev);
- dws->vws = !dws->vws;
- }
-
- return ret;
-}
-
-uint16_t __rte_hot
-cn9k_sso_hws_dual_tmo_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events, uint64_t timeout_ticks)
-{
- RTE_SET_USED(nb_events);
-
- return cn9k_sso_hws_dual_tmo_deq(port, ev, timeout_ticks);
-}
}
}
+static __rte_always_inline void
+cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
+ const uint32_t tag, const uint32_t flags,
+ const void *const lookup_mem)
+{
+ const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
+ (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+
+ cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
+ (struct rte_mbuf *)mbuf, lookup_mem,
+ mbuf_init | ((uint64_t)port_id) << 48, flags);
+}
+
static __rte_always_inline uint16_t
cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
struct cn9k_sso_hws_state *ws_pair,
- struct rte_event *ev)
+ struct rte_event *ev, const uint32_t flags,
+ const void *const lookup_mem,
+ struct cnxk_timesync_info *const tstamp)
{
const uint64_t set_gw = BIT_ULL(16) | 1;
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
+ uint64_t tstamp_ptr;
+ uint64_t mbuf;
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+ rte_prefetch_non_temporal(lookup_mem);
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
"rty%=: \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: str %[gw], [%[pong]] \n"
" dmb ld \n"
- : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
+ : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+ [mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
[gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
plt_write64(set_gw, ws_pair->getwrk_op);
+ mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
+ if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+ if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+ gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+ cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+ gw.u64[0] & 0xFFFFF, flags,
+ lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
+ gw.u64[1]) +
+ CNXK_SSO_WQE_SG_PTR);
+ cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
+ flags & NIX_RX_OFFLOAD_TSTAMP_F,
+ flags & NIX_RX_MULTI_SEG_F,
+ (uint64_t *)tstamp_ptr);
+ gw.u64[1] = mbuf;
+ }
+ }
+
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
}
static __rte_always_inline uint16_t
-cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
+cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
+ const uint32_t flags, const void *const lookup_mem)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
+ uint64_t tstamp_ptr;
+ uint64_t mbuf;
plt_write64(BIT_ULL(16) | /* wait for work. */
1, /* Use Mask set 0. */
ws->getwrk_op);
+
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F)
+ rte_prefetch_non_temporal(lookup_mem);
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
- : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ " prfm pldl1keep, [%[mbuf]] \n"
+ : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+ [mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
+ mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
+ if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+ if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+ gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+ cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+ gw.u64[0] & 0xFFFFF, flags,
+ lookup_mem);
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
+ gw.u64[1]) +
+ CNXK_SSO_WQE_SG_PTR);
+ cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
+ ws->tstamp,
+ flags & NIX_RX_OFFLOAD_TSTAMP_F,
+ flags & NIX_RX_MULTI_SEG_F,
+ (uint64_t *)tstamp_ptr);
+ gw.u64[1] = mbuf;
+ }
+ }
+
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
__uint128_t get_work;
uint64_t u64[2];
} gw;
+ uint64_t mbuf;
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
- : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
+ " sub %[mbuf], %[wqp], #0x80 \n"
+ : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
+ [mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
+ mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
+ if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
+ if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
+ uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
+
+ gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
+ cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
+ gw.u64[0] & 0xFFFFF, 0, NULL);
+ gw.u64[1] = mbuf;
+ }
+ }
+
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
const struct rte_event ev[],
uint16_t nb_events);
-uint16_t __rte_hot cn9k_sso_hws_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
-
-uint16_t __rte_hot cn9k_sso_hws_dual_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst(void *port,
- struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq(void *port, struct rte_event *ev,
- uint64_t timeout_ticks);
-uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_burst(void *port,
- struct rte_event ev[],
- uint16_t nb_events,
- uint64_t timeout_ticks);
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks);
+
+NIX_RX_FASTPATH_MODES
+#undef R
#endif
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work( \
+ ws, ev, flags | NIX_RX_MULTI_SEG_F, ws->lookup_mem); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_seg_##name(port, ev, timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return ret; \
+ } \
+ \
+ ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = cn9k_sso_hws_get_work(ws, ev, flags, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_tmo_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return ret; \
+ } \
+ \
+ ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
+ ret = cn9k_sso_hws_get_work(ws, ev, flags, \
+ ws->lookup_mem); \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_tmo_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags, dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags, dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return ret; \
+ } \
+ \
+ ret = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags, dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
+ ret = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], \
+ &dws->ws_state[!dws->vws], ev, flags, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ } \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_tmo_##name(port, ev, \
+ timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t ret = 1; \
+ uint64_t iter; \
+ \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return ret; \
+ } \
+ \
+ ret = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags, dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
+ ret = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], \
+ &dws->ws_state[!dws->vws], ev, flags, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ } \
+ \
+ return ret; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_tmo_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
#define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
+#define CNXK_SSO_WQE_SG_PTR (9)
#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
sources = files(
'cn9k_eventdev.c',
'cn9k_worker.c',
+ 'cn9k_worker_deq.c',
+ 'cn9k_worker_deq_burst.c',
+ 'cn9k_worker_deq_tmo.c',
+ 'cn9k_worker_dual_deq.c',
+ 'cn9k_worker_dual_deq_burst.c',
+ 'cn9k_worker_dual_deq_tmo.c',
'cn10k_eventdev.c',
'cn10k_worker.c',
+ 'cn10k_worker_deq.c',
+ 'cn10k_worker_deq_burst.c',
+ 'cn10k_worker_deq_tmo.c',
'cnxk_eventdev.c',
'cnxk_eventdev_adptr.c',
'cnxk_eventdev_selftest.c',