SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_hal.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_logs.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_sym.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_sym_reqmgr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_qp.c
include $(RTE_SDK)/mk/rte.lib.mk
'nitrox_hal.c',
'nitrox_logs.c',
'nitrox_sym.c',
+ 'nitrox_sym_reqmgr.c',
+ 'nitrox_qp.c'
)
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "nitrox_qp.h"
+#include "nitrox_hal.h"
+#include "nitrox_logs.h"
+
+#define MAX_CMD_QLEN 16384
+
+static int
+nitrox_setup_ridq(struct nitrox_qp *qp, int socket_id)
+{
+ size_t ridq_size = qp->count * sizeof(*qp->ridq);
+
+ qp->ridq = rte_zmalloc_socket("nitrox ridq", ridq_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!qp->ridq) {
+ NITROX_LOG(ERR, "Failed to create rid queue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int
+nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, const char *dev_name,
+ uint32_t nb_descriptors, uint8_t instr_size, int socket_id)
+{
+ int err;
+ uint32_t count;
+
+ RTE_SET_USED(bar_addr);
+ RTE_SET_USED(instr_size);
+ count = rte_align32pow2(nb_descriptors);
+ if (count > MAX_CMD_QLEN) {
+ NITROX_LOG(ERR, "%s: Number of descriptors too big %d,"
+ " greater than max queue length %d\n",
+ dev_name, count,
+ MAX_CMD_QLEN);
+ return -EINVAL;
+ }
+
+ qp->count = count;
+ qp->head = qp->tail = 0;
+ rte_atomic16_init(&qp->pending_count);
+ err = nitrox_setup_ridq(qp, socket_id);
+ if (err)
+ goto ridq_err;
+
+ return 0;
+
+ridq_err:
+ return err;
+
+}
+
+static void
+nitrox_release_ridq(struct nitrox_qp *qp)
+{
+ rte_free(qp->ridq);
+}
+
+int
+nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr)
+{
+ RTE_SET_USED(bar_addr);
+ nitrox_release_ridq(qp);
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_QP_H_
+#define _NITROX_QP_H_
+
+#include <stdbool.h>
+
+#include <rte_io.h>
+
+struct nitrox_softreq;
+
+struct rid {
+ struct nitrox_softreq *sr;
+};
+
+struct nitrox_qp {
+ struct rid *ridq;
+ uint32_t count;
+ uint32_t head;
+ uint32_t tail;
+ struct rte_mempool *sr_mp;
+ struct rte_cryptodev_stats stats;
+ uint16_t qno;
+ rte_atomic16_t pending_count;
+};
+
+static inline bool
+nitrox_qp_is_empty(struct nitrox_qp *qp)
+{
+ return (rte_atomic16_read(&qp->pending_count) == 0);
+}
+
+int nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr,
+ const char *dev_name, uint32_t nb_descriptors,
+ uint8_t inst_size, int socket_id);
+int nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr);
+
+#endif /* _NITROX_QP_H_ */
#include "nitrox_sym.h"
#include "nitrox_device.h"
+#include "nitrox_qp.h"
+#include "nitrox_sym_reqmgr.h"
#include "nitrox_logs.h"
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+#define NPS_PKT_IN_INSTR_SIZE 64
struct nitrox_sym_device {
struct rte_cryptodev *cdev;
info->sym.max_nb_sessions = 0;
}
+static void
+nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+ struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
+
+ if (!qp)
+ continue;
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+static void
+nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+ struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
+
+ if (!qp)
+ continue;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
static int
-nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
+nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
{
- RTE_SET_USED(cdev);
- RTE_SET_USED(qp_id);
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+ struct nitrox_qp *qp = NULL;
+ int err;
+
+ NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ if (qp_id >= ndev->nr_queues) {
+ NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ qp_id, ndev->nr_queues);
+ return -EINVAL;
+ }
+
+ if (cdev->data->queue_pairs[qp_id]) {
+ err = nitrox_sym_dev_qp_release(cdev, qp_id);
+ if (err)
+ return err;
+ }
+
+ qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!qp) {
+ NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
+ return -ENOMEM;
+ }
+
+ qp->qno = qp_id;
+ err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
+ qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
+ socket_id);
+ if (unlikely(err))
+ goto qp_setup_err;
+
+ qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
+ socket_id);
+ if (unlikely(!qp->sr_mp))
+ goto req_pool_err;
+
+ cdev->data->queue_pairs[qp_id] = qp;
+ NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
return 0;
+
+req_pool_err:
+ nitrox_qp_release(qp, ndev->bar_addr);
+qp_setup_err:
+ rte_free(qp);
+ return err;
+}
+
+static int
+nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
+{
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+ struct nitrox_qp *qp;
+ int err;
+
+ NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ if (qp_id >= ndev->nr_queues) {
+ NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ qp_id, ndev->nr_queues);
+ return -EINVAL;
+ }
+
+ qp = cdev->data->queue_pairs[qp_id];
+ if (!qp) {
+ NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
+ return 0;
+ }
+
+ if (!nitrox_qp_is_empty(qp)) {
+ NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
+ return -EAGAIN;
+ }
+
+ cdev->data->queue_pairs[qp_id] = NULL;
+ err = nitrox_qp_release(qp, ndev->bar_addr);
+ nitrox_sym_req_pool_free(qp->sr_mp);
+ rte_free(qp);
+ NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
+ return err;
}
static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
.dev_stop = nitrox_sym_dev_stop,
.dev_close = nitrox_sym_dev_close,
.dev_infos_get = nitrox_sym_dev_info_get,
- .stats_get = NULL,
- .stats_reset = NULL,
- .queue_pair_setup = NULL,
- .queue_pair_release = NULL,
+ .stats_get = nitrox_sym_dev_stats_get,
+ .stats_reset = nitrox_sym_dev_stats_reset,
+ .queue_pair_setup = nitrox_sym_dev_qp_setup,
+ .queue_pair_release = nitrox_sym_dev_qp_release,
.sym_session_get_size = NULL,
.sym_session_configure = NULL,
.sym_session_clear = NULL
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_errno.h>
+
+#include "nitrox_sym_reqmgr.h"
+#include "nitrox_logs.h"
+
+struct nitrox_softreq {
+ rte_iova_t iova;
+};
+
+static void
+softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
+{
+ memset(sr, 0, sizeof(*sr));
+ sr->iova = iova;
+}
+
+static void
+req_pool_obj_init(__rte_unused struct rte_mempool *mp,
+ __rte_unused void *opaque, void *obj,
+ __rte_unused unsigned int obj_idx)
+{
+ softreq_init(obj, rte_mempool_virt2iova(obj));
+}
+
+struct rte_mempool *
+nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
+ uint16_t qp_id, int socket_id)
+{
+ char softreq_pool_name[RTE_RING_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
+ cdev->data->name, qp_id);
+ mp = rte_mempool_create(softreq_pool_name,
+ RTE_ALIGN_MUL_CEIL(nobjs, 64),
+ sizeof(struct nitrox_softreq),
+ 64, 0, NULL, NULL, req_pool_obj_init, NULL,
+ socket_id, 0);
+ if (unlikely(!mp))
+ NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
+ qp_id, rte_errno);
+
+ return mp;
+}
+
+void
+nitrox_sym_req_pool_free(struct rte_mempool *mp)
+{
+ rte_mempool_free(mp);
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_SYM_REQMGR_H_
+#define _NITROX_SYM_REQMGR_H_
+
+struct rte_mempool *nitrox_sym_req_pool_create(struct rte_cryptodev *cdev,
+ uint32_t nobjs, uint16_t qp_id,
+ int socket_id);
+void nitrox_sym_req_pool_free(struct rte_mempool *mp);
+
+#endif /* _NITROX_SYM_REQMGR_H_ */