From 16261be9a3240bc1ce08fc2f5773b334a4337ef5 Mon Sep 17 00:00:00 2001 From: Shijith Thotton Date: Thu, 2 Sep 2021 20:11:54 +0530 Subject: [PATCH] event/cnxk: add cn9k crypto adapter fast path Set crypto adapter enqueue and dequeue operations for CN9K. Signed-off-by: Shijith Thotton Acked-by: Akhil Goyal --- drivers/event/cnxk/cn9k_eventdev.c | 94 +++++++++++++++++++- drivers/event/cnxk/cn9k_worker.c | 22 +++++ drivers/event/cnxk/cn9k_worker.h | 41 ++++++++- drivers/event/cnxk/cn9k_worker_deq_ca.c | 65 ++++++++++++++ drivers/event/cnxk/cn9k_worker_dual_deq_ca.c | 75 ++++++++++++++++ drivers/event/cnxk/meson.build | 2 + 6 files changed, 292 insertions(+), 7 deletions(-) create mode 100644 drivers/event/cnxk/cn9k_worker_deq_ca.c create mode 100644 drivers/event/cnxk/cn9k_worker_dual_deq_ca.c diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index c73d81c092..59a3dc22a3 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -358,6 +358,20 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) #undef R }; + const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = { #define R(name, f5, f4, f3, f2, f1, f0, flags) \ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name, @@ -385,7 +399,22 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name, NIX_RX_FASTPATH_MODES #undef R - }; + }; + + const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t + sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; /* Dual WS modes */ const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = { @@ -415,7 +444,22 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name, NIX_RX_FASTPATH_MODES #undef R - }; + }; + + const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t + sso_hws_dual_deq_ca_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = { #define R(name, f5, f4, f3, f2, f1, f0, flags) \ @@ -447,6 +491,21 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) #undef R }; + const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + + const event_dequeue_burst_t + sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2] = { +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name, + NIX_RX_FASTPATH_MODES +#undef R + }; + /* Tx modes */ const event_tx_adapter_enqueue sso_hws_tx_adptr_enq[2][2][2][2][2][2] = { @@ -494,6 +553,12 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_seg_burst); } + if (dev->is_ca_internal_port) { + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, + sso_hws_deq_ca_seg); + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_deq_ca_seg_burst); + } } else { CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq); CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, @@ -504,7 +569,14 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, sso_hws_deq_tmo_burst); } + if (dev->is_ca_internal_port) { + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, + sso_hws_deq_ca); + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst, + sso_hws_deq_ca_burst); + } } + event_dev->ca_enqueue = cn9k_sso_hws_ca_enq; if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, @@ -519,6 +591,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst; event_dev->enqueue_forward_burst = cn9k_sso_hws_dual_enq_fwd_burst; + event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq; if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) { CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, @@ -532,6 +605,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) dev, event_dev->dequeue_burst, sso_hws_dual_deq_tmo_seg_burst); } + if (dev->is_ca_internal_port) { + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, + sso_hws_dual_deq_ca_seg); + CN9K_SET_EVDEV_DEQ_OP( + dev, event_dev->dequeue_burst, + sso_hws_dual_deq_ca_seg_burst); + } } else { CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_dual_deq); @@ -544,6 +624,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev) dev, event_dev->dequeue_burst, sso_hws_dual_deq_tmo_burst); } + if (dev->is_ca_internal_port) { + CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, + sso_hws_dual_deq_ca); + CN9K_SET_EVDEV_DEQ_OP( + dev, event_dev->dequeue_burst, + sso_hws_dual_deq_ca_burst); + } } if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) @@ -930,7 +1017,8 @@ cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); - *caps = 0; + *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; return 0; } diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c index 538bc4b0b3..32f7cc0343 100644 --- a/drivers/event/cnxk/cn9k_worker.c +++ b/drivers/event/cnxk/cn9k_worker.c @@ -5,6 +5,7 @@ #include "roc_api.h" #include "cn9k_worker.h" +#include "cn9k_cryptodev_ops.h" uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev) @@ -117,3 +118,24 @@ cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[], return 1; } + +uint16_t __rte_hot +cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events) +{ + struct cn9k_sso_hws *ws = port; + + RTE_SET_USED(nb_events); + + return cn9k_cpt_crypto_adapter_enqueue(ws->tag_op, ev->event_ptr); +} + +uint16_t __rte_hot +cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events) +{ + struct cn9k_sso_hws_dual *dws = port; + + RTE_SET_USED(nb_events); + + return cn9k_cpt_crypto_adapter_enqueue(dws->ws_state[!dws->vws].tag_op, + ev->event_ptr); +} diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h index 9b2a0bf882..3e8f214904 100644 --- a/drivers/event/cnxk/cn9k_worker.h +++ b/drivers/event/cnxk/cn9k_worker.h @@ -8,6 +8,7 @@ #include "cnxk_ethdev.h" #include "cnxk_eventdev.h" #include "cnxk_worker.h" +#include "cn9k_cryptodev_ops.h" #include "cn9k_ethdev.h" #include "cn9k_rx.h" @@ -187,8 +188,12 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws, (gw.u64[0] & 0xffffffff); if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) { - if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == - RTE_EVENT_TYPE_ETHDEV) { + if ((flags & CPT_RX_WQE_F) && + (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == + RTE_EVENT_TYPE_CRYPTODEV)) { + gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]); + } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == + RTE_EVENT_TYPE_ETHDEV) { uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]); gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]); @@ -260,8 +265,12 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev, (gw.u64[0] & 0xffffffff); if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) { - if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == - RTE_EVENT_TYPE_ETHDEV) { + if ((flags & CPT_RX_WQE_F) && + (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == + RTE_EVENT_TYPE_CRYPTODEV)) { + gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]); + } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) == + RTE_EVENT_TYPE_ETHDEV) { uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]); gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]); @@ -366,6 +375,10 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port, uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events); +uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], + uint16_t nb_events); +uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], + uint16_t nb_events); #define R(name, f5, f4, f3, f2, f1, f0, flags) \ uint16_t __rte_hot cn9k_sso_hws_deq_##name( \ @@ -378,6 +391,11 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port, uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \ void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \ @@ -386,6 +404,11 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port, uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \ void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); @@ -403,6 +426,11 @@ NIX_RX_FASTPATH_MODES uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \ void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \ @@ -411,6 +439,11 @@ NIX_RX_FASTPATH_MODES uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \ void *port, struct rte_event *ev, uint64_t timeout_ticks); \ uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks); \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \ void *port, struct rte_event ev[], uint16_t nb_events, \ uint64_t timeout_ticks); diff --git a/drivers/event/cnxk/cn9k_worker_deq_ca.c b/drivers/event/cnxk/cn9k_worker_deq_ca.c new file mode 100644 index 0000000000..dbdbba17db --- /dev/null +++ b/drivers/event/cnxk/cn9k_worker_deq_ca.c @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#include "cn9k_worker.h" +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws *ws = port; \ + \ + RTE_SET_USED(timeout_ticks); \ + \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(ws->tag_op); \ + return 1; \ + } \ + \ + return cn9k_sso_hws_get_work(ws, ev, flags | CPT_RX_WQE_F, \ + ws->lookup_mem); \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + \ + return cn9k_sso_hws_deq_ca_##name(port, ev, timeout_ticks); \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws *ws = port; \ + \ + RTE_SET_USED(timeout_ticks); \ + \ + if (ws->swtag_req) { \ + ws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait(ws->tag_op); \ + return 1; \ + } \ + \ + return cn9k_sso_hws_get_work( \ + ws, ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \ + ws->lookup_mem); \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + \ + return cn9k_sso_hws_deq_ca_seg_##name(port, ev, \ + timeout_ticks); \ + } + +NIX_RX_FASTPATH_MODES +#undef R diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c new file mode 100644 index 0000000000..dc9191fe80 --- /dev/null +++ b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2021 Marvell. + */ + +#include "cn9k_worker.h" +#include "cnxk_eventdev.h" +#include "cnxk_worker.h" + +#define R(name, f5, f4, f3, f2, f1, f0, flags) \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws_dual *dws = port; \ + uint16_t gw; \ + \ + RTE_SET_USED(timeout_ticks); \ + if (dws->swtag_req) { \ + dws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait( \ + dws->ws_state[!dws->vws].tag_op); \ + return 1; \ + } \ + \ + gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws], \ + &dws->ws_state[!dws->vws], ev, \ + flags | CPT_RX_WQE_F, \ + dws->lookup_mem, dws->tstamp); \ + dws->vws = !dws->vws; \ + return gw; \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + \ + return cn9k_sso_hws_dual_deq_ca_##name(port, ev, \ + timeout_ticks); \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \ + void *port, struct rte_event *ev, uint64_t timeout_ticks) \ + { \ + struct cn9k_sso_hws_dual *dws = port; \ + uint16_t gw; \ + \ + RTE_SET_USED(timeout_ticks); \ + if (dws->swtag_req) { \ + dws->swtag_req = 0; \ + cnxk_sso_hws_swtag_wait( \ + dws->ws_state[!dws->vws].tag_op); \ + return 1; \ + } \ + \ + gw = cn9k_sso_hws_dual_get_work( \ + &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \ + ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \ + dws->lookup_mem, dws->tstamp); \ + dws->vws = !dws->vws; \ + return gw; \ + } \ + \ + uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \ + void *port, struct rte_event ev[], uint16_t nb_events, \ + uint64_t timeout_ticks) \ + { \ + RTE_SET_USED(nb_events); \ + \ + return cn9k_sso_hws_dual_deq_ca_seg_##name(port, ev, \ + timeout_ticks); \ + } + +NIX_RX_FASTPATH_MODES +#undef R diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build index 1155e18ba7..ffbc0ce0f4 100644 --- a/drivers/event/cnxk/meson.build +++ b/drivers/event/cnxk/meson.build @@ -13,9 +13,11 @@ sources = files( 'cn9k_worker.c', 'cn9k_worker_deq.c', 'cn9k_worker_deq_burst.c', + 'cn9k_worker_deq_ca.c', 'cn9k_worker_deq_tmo.c', 'cn9k_worker_dual_deq.c', 'cn9k_worker_dual_deq_burst.c', + 'cn9k_worker_dual_deq_ca.c', 'cn9k_worker_dual_deq_tmo.c', 'cn9k_worker_tx_enq.c', 'cn9k_worker_tx_enq_seg.c', -- 2.20.1