/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2021 Marvell.
*/
#include <rte_cryptodev.h>
#include <rte_eventdev.h>
+#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_qp.h"
#include "otx2_cryptodev_mbox.h"
return 0;
}
-int
-otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
- int32_t queue_pair_id, const struct rte_event *event)
+static int
+otx2_ca_qp_sso_link(const struct rte_cryptodev *cdev, struct otx2_cpt_qp *qp,
+ uint16_t sso_pf_func)
{
- struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
- struct otx2_cpt_qp *qp;
int ret;
- qp = cdev->data->queue_pairs[queue_pair_id];
-
- qp->ca_enable = 1;
- rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
-
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, &af_lf_ctl2.u);
+ qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
- af_lf_ctl2.s.sso_pf_func = otx2_sso_pf_func_get();
+ af_lf_ctl2.s.sso_pf_func = sso_pf_func;
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, af_lf_ctl2.u);
- if (ret)
- return ret;
+ qp->blkaddr, af_lf_ctl2.u);
+ return ret;
+}
+
+static void
+otx2_ca_qp_init(struct otx2_cpt_qp *qp, const struct rte_event *event)
+{
+ if (event) {
+ qp->qp_ev_bind = 1;
+ rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
+ } else {
+ qp->qp_ev_bind = 0;
+ }
+ qp->ca_enable = 1;
+}
+
+int
+otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id, const struct rte_event *event)
+{
+ struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
+ uint16_t sso_pf_func = otx2_sso_pf_func_get();
+ struct otx2_cpt_qp *qp;
+ uint8_t qp_id;
+ int ret;
+
+ if (queue_pair_id == -1) {
+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
+ qp = cdev->data->queue_pairs[qp_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
+ if (ret) {
+ uint8_t qp_tmp;
+ for (qp_tmp = 0; qp_tmp < qp_id; qp_tmp++)
+ otx2_ca_qp_del(dev, cdev, qp_tmp);
+ return ret;
+ }
+ otx2_ca_qp_init(qp, event);
+ }
+ } else {
+ qp = cdev->data->queue_pairs[queue_pair_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
+ if (ret)
+ return ret;
+ otx2_ca_qp_init(qp, event);
+ }
sso_evdev->rx_offloads |= NIX_RX_OFFLOAD_SECURITY_F;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev);
otx2_ca_qp_del(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
- union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
+ struct otx2_cpt_vf *vf = cdev->data->dev_private;
struct otx2_cpt_qp *qp;
+ uint8_t qp_id;
int ret;
RTE_SET_USED(dev);
- qp = cdev->data->queue_pairs[queue_pair_id];
- qp->ca_enable = 0;
- memset(&qp->ev, 0, sizeof(struct rte_event));
+ ret = 0;
+ if (queue_pair_id == -1) {
+ for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
+ qp = cdev->data->queue_pairs[qp_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
+ if (ret)
+ return ret;
+ qp->ca_enable = 0;
+ }
+ } else {
+ qp = cdev->data->queue_pairs[queue_pair_id];
+ ret = otx2_ca_qp_sso_link(cdev, qp, 0);
+ if (ret)
+ return ret;
+ qp->ca_enable = 0;
+ }
- ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, &af_lf_ctl2.u);
- if (ret)
- return ret;
-
- af_lf_ctl2.s.sso_pf_func = 0;
- ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
- qp->blkaddr, af_lf_ctl2.u);
-
- return ret;
+ return 0;
}