Add SSO GWS fastpath event device enqueue functions.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
* Copyright(C) 2021 Marvell.
*/
+#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
static void
cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
}
+static void
+cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
+{
+ PLT_SET_USED(event_dev);
+ event_dev->enqueue = cn10k_sso_hws_enq;
+ event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
+ event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
+ event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
+}
+
static void
cn10k_sso_info_get(struct rte_eventdev *event_dev,
struct rte_event_dev_info *dev_info)
event_dev->dev_ops = &cn10k_sso_dev_ops;
/* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ cn10k_sso_fp_fns_set(event_dev);
return 0;
+ }
rc = cnxk_sso_init(event_dev);
if (rc < 0)
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+
+uint16_t __rte_hot
+cn10k_sso_hws_enq(void *port, const struct rte_event *ev)
+{
+ struct cn10k_sso_hws *ws = port;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ return cn10k_sso_hws_new_event(ws, ev);
+ case RTE_EVENT_OP_FORWARD:
+ cn10k_sso_hws_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ cnxk_sso_hws_swtag_flush(ws->tag_wqe_op, ws->swtag_flush_op);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+uint16_t __rte_hot
+cn10k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return cn10k_sso_hws_enq(port, ev);
+}
+
+uint16_t __rte_hot
+cn10k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn10k_sso_hws *ws = port;
+ uint16_t i, rc = 1;
+
+ for (i = 0; i < nb_events && rc; i++)
+ rc = cn10k_sso_hws_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __rte_hot
+cn10k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn10k_sso_hws *ws = port;
+
+ RTE_SET_USED(nb_events);
+ cn10k_sso_hws_forward_event(ws, ev);
+
+ return 1;
+}
return !!gw.u64[1];
}
+/* CN10K Fastpath functions. */
+uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
+uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+
#endif
* Copyright(C) 2021 Marvell.
*/
+#include "cn9k_worker.h"
#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
#define CN9K_DUAL_WS_NB_WS 2
#define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
}
+static void
+cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+ event_dev->enqueue = cn9k_sso_hws_enq;
+ event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
+ event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
+ event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
+
+ if (dev->dual_ws) {
+ event_dev->enqueue = cn9k_sso_hws_dual_enq;
+ event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
+ event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
+ event_dev->enqueue_forward_burst =
+ cn9k_sso_hws_dual_enq_fwd_burst;
+ }
+}
+
static void *
cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
{
event_dev->dev_ops = &cn9k_sso_dev_ops;
/* For secondary processes, the primary has done all the work */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ cn9k_sso_fp_fns_set(event_dev);
return 0;
+ }
rc = cnxk_sso_init(event_dev);
if (rc < 0)
#include "roc_api.h"
#include "cn9k_worker.h"
+
+uint16_t __rte_hot
+cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
+{
+ struct cn9k_sso_hws *ws = port;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ return cn9k_sso_hws_new_event(ws, ev);
+ case RTE_EVENT_OP_FORWARD:
+ cn9k_sso_hws_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ cnxk_sso_hws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return cn9k_sso_hws_enq(port, ev);
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn9k_sso_hws *ws = port;
+ uint16_t i, rc = 1;
+
+ for (i = 0; i < nb_events && rc; i++)
+ rc = cn9k_sso_hws_new_event(ws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn9k_sso_hws *ws = port;
+
+ RTE_SET_USED(nb_events);
+ cn9k_sso_hws_forward_event(ws, ev);
+
+ return 1;
+}
+
+/* Dual ws ops. */
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
+{
+ struct cn9k_sso_hws_dual *dws = port;
+ struct cn9k_sso_hws_state *vws;
+
+ vws = &dws->ws_state[!dws->vws];
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ return cn9k_sso_hws_dual_new_event(dws, ev);
+ case RTE_EVENT_OP_FORWARD:
+ cn9k_sso_hws_dual_forward_event(dws, vws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ cnxk_sso_hws_swtag_flush(vws->tag_op, vws->swtag_flush_op);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_enq_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return cn9k_sso_hws_dual_enq(port, ev);
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_enq_new_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn9k_sso_hws_dual *dws = port;
+ uint16_t i, rc = 1;
+
+ for (i = 0; i < nb_events && rc; i++)
+ rc = cn9k_sso_hws_dual_new_event(dws, &ev[i]);
+
+ return nb_events;
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct cn9k_sso_hws_dual *dws = port;
+
+ RTE_SET_USED(nb_events);
+ cn9k_sso_hws_dual_forward_event(dws, &dws->ws_state[!dws->vws], ev);
+
+ return 1;
+}
return !!gw.u64[1];
}
+/* CN9K Fastpath functions. */
+uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
+uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+
+uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
+ const struct rte_event *ev);
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t nb_events);
+
#endif