Set crypto adapter enqueue and dequeue operations for CN9K.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
#undef R
};
+ const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
- };
+ };
+
+ const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
/* Dual WS modes */
const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
- };
+ };
+
+ const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_ca_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
#undef R
};
+ const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
/* Tx modes */
const event_tx_adapter_enqueue
sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_seg_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca_seg);
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_seg_burst);
+ }
} else {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca);
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_burst);
+ }
}
+ event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
event_dev->enqueue_forward_burst =
cn9k_sso_hws_dual_enq_fwd_burst;
+ event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
dev, event_dev->dequeue_burst,
sso_hws_dual_deq_tmo_seg_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_dual_deq_ca_seg);
+ CN9K_SET_EVDEV_DEQ_OP(
+ dev, event_dev->dequeue_burst,
+ sso_hws_dual_deq_ca_seg_burst);
+ }
} else {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
sso_hws_dual_deq);
dev, event_dev->dequeue_burst,
sso_hws_dual_deq_tmo_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_dual_deq_ca);
+ CN9K_SET_EVDEV_DEQ_OP(
+ dev, event_dev->dequeue_burst,
+ sso_hws_dual_deq_ca_burst);
+ }
}
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
- *caps = 0;
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
return 0;
}
#include "roc_api.h"
#include "cn9k_worker.h"
+#include "cn9k_cryptodev_ops.h"
uint16_t __rte_hot
cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
return 1;
}
+
+uint16_t __rte_hot
+cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct cn9k_sso_hws *ws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return cn9k_cpt_crypto_adapter_enqueue(ws->tag_op, ev->event_ptr);
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct cn9k_sso_hws_dual *dws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return cn9k_cpt_crypto_adapter_enqueue(dws->ws_state[!dws->vws].tag_op,
+ ev->event_ptr);
+}
#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+#include "cn9k_cryptodev_ops.h"
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
(gw.u64[0] & 0xffffffff);
if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
- if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
- RTE_EVENT_TYPE_ETHDEV) {
+ if ((flags & CPT_RX_WQE_F) &&
+ (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_CRYPTODEV)) {
+ gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
+ } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
(gw.u64[0] & 0xffffffff);
if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
- if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
- RTE_EVENT_TYPE_ETHDEV) {
+ if ((flags & CPT_RX_WQE_F) &&
+ (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_CRYPTODEV)) {
+ gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
+ } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
const struct rte_event ev[],
uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
+ uint16_t nb_events);
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks);
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work(ws, ev, flags | CPT_RX_WQE_F, \
+ ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_ca_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work( \
+ ws, ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
+ ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_ca_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws], \
+ &dws->ws_state[!dws->vws], ev, \
+ flags | CPT_RX_WQE_F, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_ca_##name(port, ev, \
+ timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_ca_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
'cn9k_worker.c',
'cn9k_worker_deq.c',
'cn9k_worker_deq_burst.c',
+ 'cn9k_worker_deq_ca.c',
'cn9k_worker_deq_tmo.c',
'cn9k_worker_dual_deq.c',
'cn9k_worker_dual_deq_burst.c',
+ 'cn9k_worker_dual_deq_ca.c',
'cn9k_worker_dual_deq_tmo.c',
'cn9k_worker_tx_enq.c',
'cn9k_worker_tx_enq_seg.c',