#include <rte_dpaa_bus.h>
#include <dpaa_sec.h>
+#include <dpaa_sec_event.h>
#include <dpaa_sec_log.h>
#include <dpaax_iova_table.h>
DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
-
- /* report op status to sym->op and then free the ctx memory */
- rte_mempool_put(ctx->ctx_pool, (void *)ctx);
}
static inline struct dpaa_sec_op_ctx *
-dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
+dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
{
struct dpaa_sec_op_ctx *ctx;
- int retval;
+ int i, retval;
- retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ retval = rte_mempool_get(
+ ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+ (void **)(&ctx));
if (!ctx || retval) {
DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
return NULL;
* to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
* each packet, memset is costlier than dcbz_64().
*/
- dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
- dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
- dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
- dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
+ for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
+ dcbz_64(&ctx->job.sg[i]);
- ctx->ctx_pool = ses->ctx_pool;
- ctx->vtop_offset = (size_t) ctx
- - rte_mempool_virt2iova(ctx);
+ ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
+ ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
return ctx;
}
if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct qm_sg_entry *sg_out;
uint32_t len;
+ struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
+ ctx->op->sym->m_src : ctx->op->sym->m_dst;
sg_out = &job->sg[0];
hw_sg_to_cpu(sg_out);
len = sg_out->length;
- ctx->op->sym->m_src->pkt_len = len;
- ctx->op->sym->m_src->data_len = len;
+ mbuf->pkt_len = len;
+ while (mbuf->next != NULL) {
+ len -= mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ mbuf->data_len = len;
}
dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
dpaa_sec_op_ending(ctx);
{
return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
- (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC) &&
+ (ses->aead_alg == 0));
}
static inline int is_proto_ipsec(dpaa_sec_session *ses)
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
struct qm_sg_entry *sg_out;
uint32_t len;
+ struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
+ op->sym->m_src : op->sym->m_dst;
sg_out = &job->sg[0];
hw_sg_to_cpu(sg_out);
len = sg_out->length;
- op->sym->m_src->pkt_len = len;
- op->sym->m_src->data_len = len;
+ mbuf->pkt_len = len;
+ while (mbuf->next != NULL) {
+ len -= mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ mbuf->data_len = len;
}
if (!ctx->fd_status) {
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
else
extra_segs = 2;
- if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ if (mbuf->nb_segs > MAX_SG_ENTRIES) {
DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
if (!ctx)
return NULL;
rte_iova_t start_addr;
uint8_t *old_digest;
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, 4);
if (!ctx)
return NULL;
req_segs = mbuf->nb_segs * 2 + 3;
}
- if (req_segs > MAX_SG_ENTRIES) {
+ if (mbuf->nb_segs > MAX_SG_ENTRIES) {
DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, req_segs);
if (!ctx)
return NULL;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
ses->iv.offset);
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, 4);
if (!ctx)
return NULL;
if (ses->auth_only_len)
req_segs++;
- if (req_segs > MAX_SG_ENTRIES) {
+ if (mbuf->nb_segs > MAX_SG_ENTRIES) {
DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, req_segs);
if (!ctx)
return NULL;
else
dst_start_addr = src_start_addr;
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, 7);
if (!ctx)
return NULL;
req_segs = mbuf->nb_segs * 2 + 4;
}
- if (req_segs > MAX_SG_ENTRIES) {
+ if (mbuf->nb_segs > MAX_SG_ENTRIES) {
DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, req_segs);
if (!ctx)
return NULL;
else
dst_start_addr = src_start_addr;
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, 7);
if (!ctx)
return NULL;
struct qm_sg_entry *sg;
phys_addr_t src_start_addr, dst_start_addr;
- ctx = dpaa_sec_alloc_ctx(ses);
+ ctx = dpaa_sec_alloc_ctx(ses, 2);
if (!ctx)
return NULL;
cf = &ctx->job;
return cf;
}
+static inline struct dpaa_sec_job *
+build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint32_t in_len = 0, out_len = 0;
+
+ if (sym->m_dst)
+ mbuf = sym->m_dst;
+ else
+ mbuf = sym->m_src;
+
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
+ if (mbuf->nb_segs > MAX_SG_ENTRIES) {
+ DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses, req_segs);
+ if (!ctx)
+ return NULL;
+ cf = &ctx->job;
+ ctx->op = op;
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+
+ /* 1st seg */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->offset = 0;
+
+ /* Successive segs */
+ while (mbuf->next) {
+ sg->length = mbuf->data_len;
+ out_len += sg->length;
+ mbuf = mbuf->next;
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->offset = 0;
+ }
+ sg->length = mbuf->buf_len - mbuf->data_off;
+ out_len += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ out_sg->length = out_len;
+ cpu_to_hw_sg(out_sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_len = mbuf->data_len;
+
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ sg->offset = 0;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ sg->offset = 0;
+ in_len += sg->length;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ in_sg->length = in_len;
+ cpu_to_hw_sg(in_sg);
+
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+
+ return cf;
+}
+
static uint16_t
dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
struct rte_crypto_op *op;
struct dpaa_sec_job *cf;
dpaa_sec_session *ses;
- uint32_t auth_only_len;
+ uint32_t auth_only_len, index, flags[DPAA_SEC_BURST] = {0};
struct qman_fq *inq[DPAA_SEC_BURST];
while (nb_ops) {
DPAA_SEC_BURST : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
op = *(ops++);
+ if (op->sym->m_src->seqn != 0) {
+ index = op->sym->m_src->seqn - 1;
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
+ /* QM_EQCR_DCA_IDXMASK = 0x0f */
+ flags[loop] = ((index & 0x0f) << 8);
+ flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ DPAA_PER_LCORE_DQRR_HELD &=
+ ~(1 << index);
+ }
+ }
+
switch (op->sess_type) {
case RTE_CRYPTO_OP_WITH_SESSION:
ses = (dpaa_sec_session *)
auth_only_len = op->sym->auth.data.length -
op->sym->cipher.data.length;
- if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
+ ((op->sym->m_dst == NULL) ||
+ rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
if (is_proto_ipsec(ses)) {
cf = build_proto(op, ses);
} else if (is_proto_pdcp(ses)) {
goto send_pkts;
}
} else {
- if (is_auth_only(ses)) {
+ if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
+ cf = build_proto_sg(op, ses);
+ } else if (is_auth_only(ses)) {
cf = build_auth_only_sg(op, ses);
} else if (is_cipher_only(ses)) {
cf = build_cipher_only_sg(op, ses);
loop = 0;
while (loop < frames_to_send) {
loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
- frames_to_send - loop);
+ &flags[loop], frames_to_send - loop);
}
nb_ops -= frames_to_send;
num_tx += frames_to_send;
}
qp = &internals->qps[qp_id];
+ rte_mempool_free(qp->ctx_pool);
qp->internals = NULL;
dev->data->queue_pairs[qp_id] = NULL;
{
struct dpaa_sec_dev_private *internals;
struct dpaa_sec_qp *qp = NULL;
+ char str[20];
DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
qp = &internals->qps[qp_id];
qp->internals = internals;
+ snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
+ dev->data->dev_id, qp_id);
+ if (!qp->ctx_pool) {
+ qp->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!qp->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
+ dev->data->dev_id, qp_id);
dev->data->queue_pairs[qp_id] = qp;
return 0;
DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
- session->ctx_pool = internals->ctx_pool;
rte_spinlock_lock(&internals->lock);
for (i = 0; i < MAX_DPAA_CORES; i++) {
session->inq[i] = dpaa_sec_attach_rxq(internals);
session->dir = DIR_DEC;
} else
goto out;
- session->ctx_pool = internals->ctx_pool;
rte_spinlock_lock(&internals->lock);
for (i = 0; i < MAX_DPAA_CORES; i++) {
session->inq[i] = dpaa_sec_attach_rxq(internals);
session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
- session->ctx_pool = dev_priv->ctx_pool;
rte_spinlock_lock(&dev_priv->lock);
for (i = 0; i < MAX_DPAA_CORES; i++) {
session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
}
static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_config *config __rte_unused)
{
-
- char str[20];
- struct dpaa_sec_dev_private *internals;
-
PMD_INIT_FUNC_TRACE();
- internals = dev->data->dev_private;
- snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
- if (!internals->ctx_pool) {
- internals->ctx_pool = rte_mempool_create((const char *)str,
- CTX_POOL_NUM_BUFS,
- CTX_POOL_BUF_SIZE,
- CTX_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->ctx_pool) {
- DPAA_SEC_ERR("%s create failed\n", str);
- return -ENOMEM;
- }
- } else
- DPAA_SEC_INFO("mempool already created for dev_id : %d",
- dev->data->dev_id);
-
return 0;
}
static int
dpaa_sec_dev_close(struct rte_cryptodev *dev)
{
- struct dpaa_sec_dev_private *internals;
-
PMD_INIT_FUNC_TRACE();
if (dev == NULL)
return -ENOMEM;
- internals = dev->data->dev_private;
- rte_mempool_free(internals->ctx_pool);
- internals->ctx_pool = NULL;
-
return 0;
}
}
}
+static enum qman_cb_dqrr_result
+dpaa_sec_process_parallel_event(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *outq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ fd = &dqrr->fd;
+
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ev->event_ptr = (void *)ctx->op;
+
+ ev->flow_id = outq->ev.flow_id;
+ ev->sub_event_type = outq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = outq->ev.sched_type;
+ ev->queue_id = outq->ev.queue_id;
+ ev->priority = outq->ev.priority;
+ *bufs = (void *)ctx->op;
+
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result
+dpaa_sec_process_atomic_event(void *event,
+ struct qman_portal *qm __rte_unused,
+ struct qman_fq *outq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u8 index;
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ fd = &dqrr->fd;
+
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ev->event_ptr = (void *)ctx->op;
+ ev->flow_id = outq->ev.flow_id;
+ ev->sub_event_type = outq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = outq->ev.sched_type;
+ ev->queue_id = outq->ev.queue_id;
+ ev->priority = outq->ev.priority;
+
+ /* Save active dqrr entries */
+ index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
+ DPAA_PER_LCORE_DQRR_SIZE++;
+ DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
+ DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
+ ev->impl_opaque = index + 1;
+ ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
+ *bufs = (void *)ctx->op;
+
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ return qman_cb_dqrr_defer;
+}
+
+int
+dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t ch_id,
+ const struct rte_event *event)
+{
+ struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+ struct qm_mcc_initfq opts = {0};
+
+ int ret;
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ opts.fqd.dest.channel = ch_id;
+
+ switch (event->sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
+ break;
+ }
+
+ ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (unlikely(ret)) {
+ DPAA_SEC_ERR("unable to init caam source fq!");
+ return ret;
+ }
+
+ memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int
+dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id)
+{
+ struct qm_mcc_initfq opts = {0};
+ int ret;
+ struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
+
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+ qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
+ qp->outq.cb.ern = ern_sec_fq_handler;
+ qman_retire_fq(&qp->outq, NULL);
+ qman_oos_fq(&qp->outq);
+ ret = qman_init_fq(&qp->outq, 0, &opts);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
+ qp->outq.cb.dqrr = NULL;
+
+ return ret;
+}
+
static struct rte_cryptodev_ops crypto_ops = {
.dev_configure = dpaa_sec_dev_configure,
.dev_start = dpaa_sec_dev_start,
internals = dev->data->dev_private;
rte_free(dev->security_ctx);
- /* In case close has been called, internals->ctx_pool would be NULL */
- rte_mempool_free(internals->ctx_pool);
rte_free(internals);
DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
int retval;
- snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
- dpaa_dev->id.dev_id);
+ snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
if (cryptodev == NULL)