1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
37 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec_log.h>
41 enum rta_sec_era rta_sec_era;
45 static uint8_t cryptodev_driver_id;
47 static __thread struct rte_crypto_op **dpaa_sec_ops;
48 static __thread int dpaa_sec_op_nb;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63 /* report op status to sym->op and then free the ctx memeory */
64 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
79 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82 * each packet, memset is costlier than dcbz_64().
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89 ctx->ctx_pool = ses->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx
91 - rte_mempool_virt2iova(ctx);
96 static inline rte_iova_t
97 dpaa_mem_vtop(void *vaddr)
99 const struct rte_memseg *ms;
101 ms = rte_mem_virt2memseg(vaddr, NULL);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
108 dpaa_mem_ptov(rte_iova_t paddr)
112 va = (void *)dpaax_iova_table_get_va(paddr);
116 return rte_mem_iova2virt(paddr);
120 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122 const struct qm_mr_entry *msg)
124 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
125 fq->fqid, msg->ern.rc, msg->ern.seqnum);
128 /* initialize the queue with dest chan as caam chan so that
129 * all the packets in this queue could be dispatched into caam
132 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
135 struct qm_mcc_initfq fq_opts;
139 /* Clear FQ options */
140 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142 flags = QMAN_INITFQ_FLAG_SCHED;
143 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
144 QM_INITFQ_WE_CONTEXTB;
146 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
147 fq_opts.fqd.context_b = fqid_out;
148 fq_opts.fqd.dest.channel = qm_channel_caam;
149 fq_opts.fqd.dest.wq = 0;
151 fq_in->cb.ern = ern_sec_fq_handler;
153 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155 ret = qman_init_fq(fq_in, flags, &fq_opts);
156 if (unlikely(ret != 0))
157 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
162 /* something is put into in_fq and caam put the crypto result into out_fq */
163 static enum qman_cb_dqrr_result
164 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
165 struct qman_fq *fq __always_unused,
166 const struct qm_dqrr_entry *dqrr)
168 const struct qm_fd *fd;
169 struct dpaa_sec_job *job;
170 struct dpaa_sec_op_ctx *ctx;
172 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
173 return qman_cb_dqrr_defer;
175 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
176 return qman_cb_dqrr_consume;
179 /* sg is embedded in an op ctx,
180 * sg[0] is for output
183 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
186 ctx->fd_status = fd->status;
187 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
188 struct qm_sg_entry *sg_out;
191 sg_out = &job->sg[0];
192 hw_sg_to_cpu(sg_out);
193 len = sg_out->length;
194 ctx->op->sym->m_src->pkt_len = len;
195 ctx->op->sym->m_src->data_len = len;
197 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
198 dpaa_sec_op_ending(ctx);
200 return qman_cb_dqrr_consume;
203 /* caam result is put into this queue */
205 dpaa_sec_init_tx(struct qman_fq *fq)
208 struct qm_mcc_initfq opts;
211 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
212 QMAN_FQ_FLAG_DYNAMIC_FQID;
214 ret = qman_create_fq(0, flags, fq);
216 DPAA_SEC_ERR("qman_create_fq failed");
220 memset(&opts, 0, sizeof(opts));
221 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
222 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
224 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
226 fq->cb.dqrr = dqrr_out_fq_cb_rx;
227 fq->cb.ern = ern_sec_fq_handler;
229 ret = qman_init_fq(fq, 0, &opts);
231 DPAA_SEC_ERR("unable to init caam source fq!");
238 static inline int is_cipher_only(dpaa_sec_session *ses)
240 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
241 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
244 static inline int is_auth_only(dpaa_sec_session *ses)
246 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
247 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
250 static inline int is_aead(dpaa_sec_session *ses)
252 return ((ses->cipher_alg == 0) &&
253 (ses->auth_alg == 0) &&
254 (ses->aead_alg != 0));
257 static inline int is_auth_cipher(dpaa_sec_session *ses)
259 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
260 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
261 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
264 static inline int is_proto_ipsec(dpaa_sec_session *ses)
266 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
269 static inline int is_encode(dpaa_sec_session *ses)
271 return ses->dir == DIR_ENC;
274 static inline int is_decode(dpaa_sec_session *ses)
276 return ses->dir == DIR_DEC;
280 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
282 switch (ses->auth_alg) {
283 case RTE_CRYPTO_AUTH_NULL:
285 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
286 OP_PCL_IPSEC_HMAC_NULL : 0;
287 ses->digest_length = 0;
289 case RTE_CRYPTO_AUTH_MD5_HMAC:
291 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
293 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295 case RTE_CRYPTO_AUTH_SHA1_HMAC:
297 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
299 alginfo_a->algmode = OP_ALG_AAI_HMAC;
301 case RTE_CRYPTO_AUTH_SHA224_HMAC:
303 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
305 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 case RTE_CRYPTO_AUTH_SHA256_HMAC:
309 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
311 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 case RTE_CRYPTO_AUTH_SHA384_HMAC:
315 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
317 alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 case RTE_CRYPTO_AUTH_SHA512_HMAC:
321 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
323 alginfo_a->algmode = OP_ALG_AAI_HMAC;
326 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
331 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
333 switch (ses->cipher_alg) {
334 case RTE_CRYPTO_CIPHER_NULL:
336 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
337 OP_PCL_IPSEC_NULL : 0;
339 case RTE_CRYPTO_CIPHER_AES_CBC:
341 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
343 alginfo_c->algmode = OP_ALG_AAI_CBC;
345 case RTE_CRYPTO_CIPHER_3DES_CBC:
347 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
349 alginfo_c->algmode = OP_ALG_AAI_CBC;
351 case RTE_CRYPTO_CIPHER_AES_CTR:
353 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
355 alginfo_c->algmode = OP_ALG_AAI_CTR;
358 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
363 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
365 switch (ses->aead_alg) {
366 case RTE_CRYPTO_AEAD_AES_GCM:
367 alginfo->algtype = OP_ALG_ALGSEL_AES;
368 alginfo->algmode = OP_ALG_AAI_GCM;
371 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
375 /* prepare ipsec proto command block of the session */
377 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
379 struct alginfo cipherdata = {0}, authdata = {0};
380 struct sec_cdb *cdb = &ses->cdb;
381 int32_t shared_desc_len = 0;
383 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
389 caam_cipher_alg(ses, &cipherdata);
390 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
391 DPAA_SEC_ERR("not supported cipher alg");
395 cipherdata.key = (size_t)ses->cipher_key.data;
396 cipherdata.keylen = ses->cipher_key.length;
397 cipherdata.key_enc_flags = 0;
398 cipherdata.key_type = RTA_DATA_IMM;
400 caam_auth_alg(ses, &authdata);
401 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
402 DPAA_SEC_ERR("not supported auth alg");
406 authdata.key = (size_t)ses->auth_key.data;
407 authdata.keylen = ses->auth_key.length;
408 authdata.key_enc_flags = 0;
409 authdata.key_type = RTA_DATA_IMM;
411 cdb->sh_desc[0] = cipherdata.keylen;
412 cdb->sh_desc[1] = authdata.keylen;
413 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
415 (unsigned int *)cdb->sh_desc,
416 &cdb->sh_desc[2], 2);
419 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
422 if (cdb->sh_desc[2] & 1)
423 cipherdata.key_type = RTA_DATA_IMM;
425 cipherdata.key = (size_t)dpaa_mem_vtop(
426 (void *)(size_t)cipherdata.key);
427 cipherdata.key_type = RTA_DATA_PTR;
429 if (cdb->sh_desc[2] & (1<<1))
430 authdata.key_type = RTA_DATA_IMM;
432 authdata.key = (size_t)dpaa_mem_vtop(
433 (void *)(size_t)authdata.key);
434 authdata.key_type = RTA_DATA_PTR;
440 if (ses->dir == DIR_ENC) {
441 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
443 true, swap, SHR_SERIAL,
445 (uint8_t *)&ses->ip4_hdr,
446 &cipherdata, &authdata);
447 } else if (ses->dir == DIR_DEC) {
448 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
450 true, swap, SHR_SERIAL,
452 &cipherdata, &authdata);
454 return shared_desc_len;
457 /* prepare command block of the session */
459 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
461 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
462 int32_t shared_desc_len = 0;
463 struct sec_cdb *cdb = &ses->cdb;
465 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
471 memset(cdb, 0, sizeof(struct sec_cdb));
473 if (is_proto_ipsec(ses)) {
474 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
475 } else if (is_cipher_only(ses)) {
476 caam_cipher_alg(ses, &alginfo_c);
477 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
478 DPAA_SEC_ERR("not supported cipher alg");
482 alginfo_c.key = (size_t)ses->cipher_key.data;
483 alginfo_c.keylen = ses->cipher_key.length;
484 alginfo_c.key_enc_flags = 0;
485 alginfo_c.key_type = RTA_DATA_IMM;
487 shared_desc_len = cnstr_shdsc_blkcipher(
493 } else if (is_auth_only(ses)) {
494 caam_auth_alg(ses, &alginfo_a);
495 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
496 DPAA_SEC_ERR("not supported auth alg");
500 alginfo_a.key = (size_t)ses->auth_key.data;
501 alginfo_a.keylen = ses->auth_key.length;
502 alginfo_a.key_enc_flags = 0;
503 alginfo_a.key_type = RTA_DATA_IMM;
505 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
509 } else if (is_aead(ses)) {
510 caam_aead_alg(ses, &alginfo);
511 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
512 DPAA_SEC_ERR("not supported aead alg");
515 alginfo.key = (size_t)ses->aead_key.data;
516 alginfo.keylen = ses->aead_key.length;
517 alginfo.key_enc_flags = 0;
518 alginfo.key_type = RTA_DATA_IMM;
520 if (ses->dir == DIR_ENC)
521 shared_desc_len = cnstr_shdsc_gcm_encap(
522 cdb->sh_desc, true, swap,
527 shared_desc_len = cnstr_shdsc_gcm_decap(
528 cdb->sh_desc, true, swap,
533 caam_cipher_alg(ses, &alginfo_c);
534 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
535 DPAA_SEC_ERR("not supported cipher alg");
539 alginfo_c.key = (size_t)ses->cipher_key.data;
540 alginfo_c.keylen = ses->cipher_key.length;
541 alginfo_c.key_enc_flags = 0;
542 alginfo_c.key_type = RTA_DATA_IMM;
544 caam_auth_alg(ses, &alginfo_a);
545 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546 DPAA_SEC_ERR("not supported auth alg");
550 alginfo_a.key = (size_t)ses->auth_key.data;
551 alginfo_a.keylen = ses->auth_key.length;
552 alginfo_a.key_enc_flags = 0;
553 alginfo_a.key_type = RTA_DATA_IMM;
555 cdb->sh_desc[0] = alginfo_c.keylen;
556 cdb->sh_desc[1] = alginfo_a.keylen;
557 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
559 (unsigned int *)cdb->sh_desc,
560 &cdb->sh_desc[2], 2);
563 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
566 if (cdb->sh_desc[2] & 1)
567 alginfo_c.key_type = RTA_DATA_IMM;
569 alginfo_c.key = (size_t)dpaa_mem_vtop(
570 (void *)(size_t)alginfo_c.key);
571 alginfo_c.key_type = RTA_DATA_PTR;
573 if (cdb->sh_desc[2] & (1<<1))
574 alginfo_a.key_type = RTA_DATA_IMM;
576 alginfo_a.key = (size_t)dpaa_mem_vtop(
577 (void *)(size_t)alginfo_a.key);
578 alginfo_a.key_type = RTA_DATA_PTR;
583 /* Auth_only_len is set as 0 here and it will be
584 * overwritten in fd for each packet.
586 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
587 true, swap, &alginfo_c, &alginfo_a,
589 ses->digest_length, ses->dir);
592 if (shared_desc_len < 0) {
593 DPAA_SEC_ERR("error in preparing command block");
594 return shared_desc_len;
597 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
598 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
599 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
604 /* qp is lockless, should be accessed by only one thread */
606 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
609 unsigned int pkts = 0;
610 int num_rx_bufs, ret;
611 struct qm_dqrr_entry *dq;
612 uint32_t vdqcr_flags = 0;
616 * Until request for four buffers, we provide exact number of buffers.
617 * Otherwise we do not set the QM_VDQCR_EXACT flag.
618 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
619 * requested, so we request two less in this case.
622 vdqcr_flags = QM_VDQCR_EXACT;
623 num_rx_bufs = nb_ops;
625 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
626 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
628 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
633 const struct qm_fd *fd;
634 struct dpaa_sec_job *job;
635 struct dpaa_sec_op_ctx *ctx;
636 struct rte_crypto_op *op;
638 dq = qman_dequeue(fq);
643 /* sg is embedded in an op ctx,
644 * sg[0] is for output
647 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
649 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
650 ctx->fd_status = fd->status;
652 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
653 struct qm_sg_entry *sg_out;
656 sg_out = &job->sg[0];
657 hw_sg_to_cpu(sg_out);
658 len = sg_out->length;
659 op->sym->m_src->pkt_len = len;
660 op->sym->m_src->data_len = len;
662 if (!ctx->fd_status) {
663 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
665 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
666 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
670 /* report op status to sym->op and then free the ctx memeory */
671 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
673 qman_dqrr_consume(fq, dq);
674 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
679 static inline struct dpaa_sec_job *
680 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
682 struct rte_crypto_sym_op *sym = op->sym;
683 struct rte_mbuf *mbuf = sym->m_src;
684 struct dpaa_sec_job *cf;
685 struct dpaa_sec_op_ctx *ctx;
686 struct qm_sg_entry *sg, *out_sg, *in_sg;
687 phys_addr_t start_addr;
688 uint8_t *old_digest, extra_segs;
695 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
696 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
700 ctx = dpaa_sec_alloc_ctx(ses);
706 old_digest = ctx->digest;
710 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
711 out_sg->length = ses->digest_length;
712 cpu_to_hw_sg(out_sg);
716 /* need to extend the input to a compound frame */
717 in_sg->extension = 1;
719 in_sg->length = sym->auth.data.length;
720 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
724 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
725 sg->length = mbuf->data_len - sym->auth.data.offset;
726 sg->offset = sym->auth.data.offset;
728 /* Successive segs */
733 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
734 sg->length = mbuf->data_len;
738 if (is_decode(ses)) {
739 /* Digest verification case */
742 rte_memcpy(old_digest, sym->auth.digest.data,
744 start_addr = dpaa_mem_vtop(old_digest);
745 qm_sg_entry_set64(sg, start_addr);
746 sg->length = ses->digest_length;
747 in_sg->length += ses->digest_length;
749 /* Digest calculation case */
750 sg->length -= ses->digest_length;
761 * |<----data_len------->|
762 * |ip_header|ah_header|icv|payload|
767 static inline struct dpaa_sec_job *
768 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
770 struct rte_crypto_sym_op *sym = op->sym;
771 struct rte_mbuf *mbuf = sym->m_src;
772 struct dpaa_sec_job *cf;
773 struct dpaa_sec_op_ctx *ctx;
774 struct qm_sg_entry *sg;
775 rte_iova_t start_addr;
778 ctx = dpaa_sec_alloc_ctx(ses);
784 old_digest = ctx->digest;
786 start_addr = rte_pktmbuf_iova(mbuf);
789 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
790 sg->length = ses->digest_length;
795 if (is_decode(ses)) {
796 /* need to extend the input to a compound frame */
798 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
799 sg->length = sym->auth.data.length + ses->digest_length;
804 /* hash result or digest, save digest first */
805 rte_memcpy(old_digest, sym->auth.digest.data,
807 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
808 sg->length = sym->auth.data.length;
811 /* let's check digest by hw */
812 start_addr = dpaa_mem_vtop(old_digest);
814 qm_sg_entry_set64(sg, start_addr);
815 sg->length = ses->digest_length;
819 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
820 sg->length = sym->auth.data.length;
828 static inline struct dpaa_sec_job *
829 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
831 struct rte_crypto_sym_op *sym = op->sym;
832 struct dpaa_sec_job *cf;
833 struct dpaa_sec_op_ctx *ctx;
834 struct qm_sg_entry *sg, *out_sg, *in_sg;
835 struct rte_mbuf *mbuf;
837 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
842 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
845 req_segs = mbuf->nb_segs * 2 + 3;
848 if (req_segs > MAX_SG_ENTRIES) {
849 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
854 ctx = dpaa_sec_alloc_ctx(ses);
863 out_sg->extension = 1;
864 out_sg->length = sym->cipher.data.length;
865 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
866 cpu_to_hw_sg(out_sg);
870 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
871 sg->length = mbuf->data_len - sym->cipher.data.offset;
872 sg->offset = sym->cipher.data.offset;
874 /* Successive segs */
879 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
880 sg->length = mbuf->data_len;
889 in_sg->extension = 1;
891 in_sg->length = sym->cipher.data.length + ses->iv.length;
894 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
898 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
899 sg->length = ses->iv.length;
904 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
905 sg->length = mbuf->data_len - sym->cipher.data.offset;
906 sg->offset = sym->cipher.data.offset;
908 /* Successive segs */
913 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
914 sg->length = mbuf->data_len;
923 static inline struct dpaa_sec_job *
924 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
926 struct rte_crypto_sym_op *sym = op->sym;
927 struct dpaa_sec_job *cf;
928 struct dpaa_sec_op_ctx *ctx;
929 struct qm_sg_entry *sg;
930 rte_iova_t src_start_addr, dst_start_addr;
931 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
934 ctx = dpaa_sec_alloc_ctx(ses);
941 src_start_addr = rte_pktmbuf_iova(sym->m_src);
944 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
946 dst_start_addr = src_start_addr;
950 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
951 sg->length = sym->cipher.data.length + ses->iv.length;
957 /* need to extend the input to a compound frame */
960 sg->length = sym->cipher.data.length + ses->iv.length;
961 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
965 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
966 sg->length = ses->iv.length;
970 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
971 sg->length = sym->cipher.data.length;
978 static inline struct dpaa_sec_job *
979 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
981 struct rte_crypto_sym_op *sym = op->sym;
982 struct dpaa_sec_job *cf;
983 struct dpaa_sec_op_ctx *ctx;
984 struct qm_sg_entry *sg, *out_sg, *in_sg;
985 struct rte_mbuf *mbuf;
987 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
992 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
995 req_segs = mbuf->nb_segs * 2 + 4;
998 if (ses->auth_only_len)
1001 if (req_segs > MAX_SG_ENTRIES) {
1002 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1007 ctx = dpaa_sec_alloc_ctx(ses);
1014 rte_prefetch0(cf->sg);
1017 out_sg = &cf->sg[0];
1018 out_sg->extension = 1;
1020 out_sg->length = sym->aead.data.length + ses->auth_only_len
1021 + ses->digest_length;
1023 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1025 /* output sg entries */
1027 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1028 cpu_to_hw_sg(out_sg);
1031 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1032 sg->length = mbuf->data_len - sym->aead.data.offset +
1034 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1036 /* Successive segs */
1041 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1042 sg->length = mbuf->data_len;
1045 sg->length -= ses->digest_length;
1047 if (is_encode(ses)) {
1049 /* set auth output */
1051 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1052 sg->length = ses->digest_length;
1060 in_sg->extension = 1;
1063 in_sg->length = ses->iv.length + sym->aead.data.length
1064 + ses->auth_only_len;
1066 in_sg->length = ses->iv.length + sym->aead.data.length
1067 + ses->auth_only_len + ses->digest_length;
1069 /* input sg entries */
1071 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1072 cpu_to_hw_sg(in_sg);
1075 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1076 sg->length = ses->iv.length;
1079 /* 2nd seg auth only */
1080 if (ses->auth_only_len) {
1082 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1083 sg->length = ses->auth_only_len;
1089 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1090 sg->length = mbuf->data_len - sym->aead.data.offset;
1091 sg->offset = sym->aead.data.offset;
1093 /* Successive segs */
1098 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1099 sg->length = mbuf->data_len;
1103 if (is_decode(ses)) {
1106 memcpy(ctx->digest, sym->aead.digest.data,
1107 ses->digest_length);
1108 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1109 sg->length = ses->digest_length;
1117 static inline struct dpaa_sec_job *
1118 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1120 struct rte_crypto_sym_op *sym = op->sym;
1121 struct dpaa_sec_job *cf;
1122 struct dpaa_sec_op_ctx *ctx;
1123 struct qm_sg_entry *sg;
1124 uint32_t length = 0;
1125 rte_iova_t src_start_addr, dst_start_addr;
1126 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1129 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1132 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1134 dst_start_addr = src_start_addr;
1136 ctx = dpaa_sec_alloc_ctx(ses);
1144 rte_prefetch0(cf->sg);
1146 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1147 if (is_encode(ses)) {
1148 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1149 sg->length = ses->iv.length;
1150 length += sg->length;
1154 if (ses->auth_only_len) {
1155 qm_sg_entry_set64(sg,
1156 dpaa_mem_vtop(sym->aead.aad.data));
1157 sg->length = ses->auth_only_len;
1158 length += sg->length;
1162 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1163 sg->length = sym->aead.data.length;
1164 length += sg->length;
1168 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1169 sg->length = ses->iv.length;
1170 length += sg->length;
1174 if (ses->auth_only_len) {
1175 qm_sg_entry_set64(sg,
1176 dpaa_mem_vtop(sym->aead.aad.data));
1177 sg->length = ses->auth_only_len;
1178 length += sg->length;
1182 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1183 sg->length = sym->aead.data.length;
1184 length += sg->length;
1187 memcpy(ctx->digest, sym->aead.digest.data,
1188 ses->digest_length);
1191 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1192 sg->length = ses->digest_length;
1193 length += sg->length;
1197 /* input compound frame */
1198 cf->sg[1].length = length;
1199 cf->sg[1].extension = 1;
1200 cf->sg[1].final = 1;
1201 cpu_to_hw_sg(&cf->sg[1]);
1205 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1206 qm_sg_entry_set64(sg,
1207 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1208 sg->length = sym->aead.data.length + ses->auth_only_len;
1209 length = sg->length;
1210 if (is_encode(ses)) {
1212 /* set auth output */
1214 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1215 sg->length = ses->digest_length;
1216 length += sg->length;
1221 /* output compound frame */
1222 cf->sg[0].length = length;
1223 cf->sg[0].extension = 1;
1224 cpu_to_hw_sg(&cf->sg[0]);
1229 static inline struct dpaa_sec_job *
1230 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1232 struct rte_crypto_sym_op *sym = op->sym;
1233 struct dpaa_sec_job *cf;
1234 struct dpaa_sec_op_ctx *ctx;
1235 struct qm_sg_entry *sg, *out_sg, *in_sg;
1236 struct rte_mbuf *mbuf;
1238 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1243 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1246 req_segs = mbuf->nb_segs * 2 + 4;
1249 if (req_segs > MAX_SG_ENTRIES) {
1250 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1255 ctx = dpaa_sec_alloc_ctx(ses);
1262 rte_prefetch0(cf->sg);
1265 out_sg = &cf->sg[0];
1266 out_sg->extension = 1;
1268 out_sg->length = sym->auth.data.length + ses->digest_length;
1270 out_sg->length = sym->auth.data.length;
1272 /* output sg entries */
1274 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1275 cpu_to_hw_sg(out_sg);
1278 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1279 sg->length = mbuf->data_len - sym->auth.data.offset;
1280 sg->offset = sym->auth.data.offset;
1282 /* Successive segs */
1287 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1288 sg->length = mbuf->data_len;
1291 sg->length -= ses->digest_length;
1293 if (is_encode(ses)) {
1295 /* set auth output */
1297 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1298 sg->length = ses->digest_length;
1306 in_sg->extension = 1;
1309 in_sg->length = ses->iv.length + sym->auth.data.length;
1311 in_sg->length = ses->iv.length + sym->auth.data.length
1312 + ses->digest_length;
1314 /* input sg entries */
1316 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1317 cpu_to_hw_sg(in_sg);
1320 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1321 sg->length = ses->iv.length;
1326 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1327 sg->length = mbuf->data_len - sym->auth.data.offset;
1328 sg->offset = sym->auth.data.offset;
1330 /* Successive segs */
1335 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1336 sg->length = mbuf->data_len;
1340 sg->length -= ses->digest_length;
1341 if (is_decode(ses)) {
1344 memcpy(ctx->digest, sym->auth.digest.data,
1345 ses->digest_length);
1346 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1347 sg->length = ses->digest_length;
1355 static inline struct dpaa_sec_job *
1356 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1358 struct rte_crypto_sym_op *sym = op->sym;
1359 struct dpaa_sec_job *cf;
1360 struct dpaa_sec_op_ctx *ctx;
1361 struct qm_sg_entry *sg;
1362 rte_iova_t src_start_addr, dst_start_addr;
1363 uint32_t length = 0;
1364 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1367 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1369 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1371 dst_start_addr = src_start_addr;
1373 ctx = dpaa_sec_alloc_ctx(ses);
1381 rte_prefetch0(cf->sg);
1383 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1384 if (is_encode(ses)) {
1385 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1386 sg->length = ses->iv.length;
1387 length += sg->length;
1391 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1392 sg->length = sym->auth.data.length;
1393 length += sg->length;
1397 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1398 sg->length = ses->iv.length;
1399 length += sg->length;
1404 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1405 sg->length = sym->auth.data.length;
1406 length += sg->length;
1409 memcpy(ctx->digest, sym->auth.digest.data,
1410 ses->digest_length);
1413 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1414 sg->length = ses->digest_length;
1415 length += sg->length;
1419 /* input compound frame */
1420 cf->sg[1].length = length;
1421 cf->sg[1].extension = 1;
1422 cf->sg[1].final = 1;
1423 cpu_to_hw_sg(&cf->sg[1]);
1427 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1428 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1429 sg->length = sym->cipher.data.length;
1430 length = sg->length;
1431 if (is_encode(ses)) {
1433 /* set auth output */
1435 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1436 sg->length = ses->digest_length;
1437 length += sg->length;
1442 /* output compound frame */
1443 cf->sg[0].length = length;
1444 cf->sg[0].extension = 1;
1445 cpu_to_hw_sg(&cf->sg[0]);
1450 static inline struct dpaa_sec_job *
1451 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1453 struct rte_crypto_sym_op *sym = op->sym;
1454 struct dpaa_sec_job *cf;
1455 struct dpaa_sec_op_ctx *ctx;
1456 struct qm_sg_entry *sg;
1457 phys_addr_t src_start_addr, dst_start_addr;
1459 ctx = dpaa_sec_alloc_ctx(ses);
1465 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1468 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1470 dst_start_addr = src_start_addr;
1474 qm_sg_entry_set64(sg, src_start_addr);
1475 sg->length = sym->m_src->pkt_len;
1479 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1482 qm_sg_entry_set64(sg, dst_start_addr);
1483 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1490 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1493 /* Function to transmit the frames to given device and queuepair */
1495 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1496 uint16_t num_tx = 0;
1497 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1498 uint32_t frames_to_send;
1499 struct rte_crypto_op *op;
1500 struct dpaa_sec_job *cf;
1501 dpaa_sec_session *ses;
1502 uint32_t auth_only_len;
1503 struct qman_fq *inq[DPAA_SEC_BURST];
1506 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1507 DPAA_SEC_BURST : nb_ops;
1508 for (loop = 0; loop < frames_to_send; loop++) {
1510 switch (op->sess_type) {
1511 case RTE_CRYPTO_OP_WITH_SESSION:
1512 ses = (dpaa_sec_session *)
1513 get_sym_session_private_data(
1515 cryptodev_driver_id);
1517 case RTE_CRYPTO_OP_SECURITY_SESSION:
1518 ses = (dpaa_sec_session *)
1519 get_sec_session_private_data(
1520 op->sym->sec_session);
1524 "sessionless crypto op not supported");
1525 frames_to_send = loop;
1529 if (unlikely(!ses->qp)) {
1530 if (dpaa_sec_attach_sess_q(qp, ses)) {
1531 frames_to_send = loop;
1535 } else if (unlikely(ses->qp != qp)) {
1536 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1537 " New qp = %p\n", ses->qp, qp);
1538 frames_to_send = loop;
1543 auth_only_len = op->sym->auth.data.length -
1544 op->sym->cipher.data.length;
1545 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1546 if (is_proto_ipsec(ses)) {
1547 cf = build_proto(op, ses);
1548 } else if (is_auth_only(ses)) {
1549 cf = build_auth_only(op, ses);
1550 } else if (is_cipher_only(ses)) {
1551 cf = build_cipher_only(op, ses);
1552 } else if (is_aead(ses)) {
1553 cf = build_cipher_auth_gcm(op, ses);
1554 auth_only_len = ses->auth_only_len;
1555 } else if (is_auth_cipher(ses)) {
1556 cf = build_cipher_auth(op, ses);
1558 DPAA_SEC_DP_ERR("not supported ops");
1559 frames_to_send = loop;
1564 if (is_auth_only(ses)) {
1565 cf = build_auth_only_sg(op, ses);
1566 } else if (is_cipher_only(ses)) {
1567 cf = build_cipher_only_sg(op, ses);
1568 } else if (is_aead(ses)) {
1569 cf = build_cipher_auth_gcm_sg(op, ses);
1570 auth_only_len = ses->auth_only_len;
1571 } else if (is_auth_cipher(ses)) {
1572 cf = build_cipher_auth_sg(op, ses);
1574 DPAA_SEC_DP_ERR("not supported ops");
1575 frames_to_send = loop;
1580 if (unlikely(!cf)) {
1581 frames_to_send = loop;
1587 inq[loop] = ses->inq;
1588 fd->opaque_addr = 0;
1590 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1591 fd->_format1 = qm_fd_compound;
1592 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1593 /* Auth_only_len is set as 0 in descriptor and it is
1594 * overwritten here in the fd.cmd which will update
1598 fd->cmd = 0x80000000 | auth_only_len;
1603 while (loop < frames_to_send) {
1604 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1605 frames_to_send - loop);
1607 nb_ops -= frames_to_send;
1608 num_tx += frames_to_send;
1611 dpaa_qp->tx_pkts += num_tx;
1612 dpaa_qp->tx_errs += nb_ops - num_tx;
1618 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1622 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1624 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1626 dpaa_qp->rx_pkts += num_rx;
1627 dpaa_qp->rx_errs += nb_ops - num_rx;
1629 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1634 /** Release queue pair */
1636 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1639 struct dpaa_sec_dev_private *internals;
1640 struct dpaa_sec_qp *qp = NULL;
1642 PMD_INIT_FUNC_TRACE();
1644 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1646 internals = dev->data->dev_private;
1647 if (qp_id >= internals->max_nb_queue_pairs) {
1648 DPAA_SEC_ERR("Max supported qpid %d",
1649 internals->max_nb_queue_pairs);
1653 qp = &internals->qps[qp_id];
1654 qp->internals = NULL;
1655 dev->data->queue_pairs[qp_id] = NULL;
1660 /** Setup a queue pair */
1662 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1663 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1664 __rte_unused int socket_id)
1666 struct dpaa_sec_dev_private *internals;
1667 struct dpaa_sec_qp *qp = NULL;
1669 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1671 internals = dev->data->dev_private;
1672 if (qp_id >= internals->max_nb_queue_pairs) {
1673 DPAA_SEC_ERR("Max supported qpid %d",
1674 internals->max_nb_queue_pairs);
1678 qp = &internals->qps[qp_id];
1679 qp->internals = internals;
1680 dev->data->queue_pairs[qp_id] = qp;
1685 /** Return the number of allocated queue pairs */
1687 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1689 PMD_INIT_FUNC_TRACE();
1691 return dev->data->nb_queue_pairs;
1694 /** Returns the size of session structure */
1696 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1698 PMD_INIT_FUNC_TRACE();
1700 return sizeof(dpaa_sec_session);
1704 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1705 struct rte_crypto_sym_xform *xform,
1706 dpaa_sec_session *session)
1708 session->cipher_alg = xform->cipher.algo;
1709 session->iv.length = xform->cipher.iv.length;
1710 session->iv.offset = xform->cipher.iv.offset;
1711 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1712 RTE_CACHE_LINE_SIZE);
1713 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1714 DPAA_SEC_ERR("No Memory for cipher key");
1717 session->cipher_key.length = xform->cipher.key.length;
1719 memcpy(session->cipher_key.data, xform->cipher.key.data,
1720 xform->cipher.key.length);
1721 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1728 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1729 struct rte_crypto_sym_xform *xform,
1730 dpaa_sec_session *session)
1732 session->auth_alg = xform->auth.algo;
1733 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1734 RTE_CACHE_LINE_SIZE);
1735 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1736 DPAA_SEC_ERR("No Memory for auth key");
1739 session->auth_key.length = xform->auth.key.length;
1740 session->digest_length = xform->auth.digest_length;
1742 memcpy(session->auth_key.data, xform->auth.key.data,
1743 xform->auth.key.length);
1744 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1751 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1752 struct rte_crypto_sym_xform *xform,
1753 dpaa_sec_session *session)
1755 session->aead_alg = xform->aead.algo;
1756 session->iv.length = xform->aead.iv.length;
1757 session->iv.offset = xform->aead.iv.offset;
1758 session->auth_only_len = xform->aead.aad_length;
1759 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1760 RTE_CACHE_LINE_SIZE);
1761 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1762 DPAA_SEC_ERR("No Memory for aead key\n");
1765 session->aead_key.length = xform->aead.key.length;
1766 session->digest_length = xform->aead.digest_length;
1768 memcpy(session->aead_key.data, xform->aead.key.data,
1769 xform->aead.key.length);
1770 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1776 static struct qman_fq *
1777 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1781 for (i = 0; i < qi->max_nb_sessions; i++) {
1782 if (qi->inq_attach[i] == 0) {
1783 qi->inq_attach[i] = 1;
1787 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1793 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1797 for (i = 0; i < qi->max_nb_sessions; i++) {
1798 if (&qi->inq[i] == fq) {
1799 qman_retire_fq(fq, NULL);
1801 qi->inq_attach[i] = 0;
1809 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1814 ret = dpaa_sec_prep_cdb(sess);
1816 DPAA_SEC_ERR("Unable to prepare sec cdb");
1819 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1820 ret = rte_dpaa_portal_init((void *)0);
1822 DPAA_SEC_ERR("Failure in affining portal");
1826 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1827 qman_fq_fqid(&qp->outq));
1829 DPAA_SEC_ERR("Unable to init sec queue");
1835 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1836 struct rte_crypto_sym_xform *xform, void *sess)
1838 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1839 dpaa_sec_session *session = sess;
1841 PMD_INIT_FUNC_TRACE();
1843 if (unlikely(sess == NULL)) {
1844 DPAA_SEC_ERR("invalid session struct");
1847 memset(session, 0, sizeof(dpaa_sec_session));
1849 /* Default IV length = 0 */
1850 session->iv.length = 0;
1853 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1854 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1855 dpaa_sec_cipher_init(dev, xform, session);
1857 /* Authentication Only */
1858 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1859 xform->next == NULL) {
1860 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1861 dpaa_sec_auth_init(dev, xform, session);
1863 /* Cipher then Authenticate */
1864 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1865 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1866 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1867 dpaa_sec_cipher_init(dev, xform, session);
1868 dpaa_sec_auth_init(dev, xform->next, session);
1870 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1874 /* Authenticate then Cipher */
1875 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1876 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1877 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1878 dpaa_sec_auth_init(dev, xform, session);
1879 dpaa_sec_cipher_init(dev, xform->next, session);
1881 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1885 /* AEAD operation for AES-GCM kind of Algorithms */
1886 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1887 xform->next == NULL) {
1888 dpaa_sec_aead_init(dev, xform, session);
1891 DPAA_SEC_ERR("Invalid crypto type");
1894 session->ctx_pool = internals->ctx_pool;
1895 rte_spinlock_lock(&internals->lock);
1896 session->inq = dpaa_sec_attach_rxq(internals);
1897 rte_spinlock_unlock(&internals->lock);
1898 if (session->inq == NULL) {
1899 DPAA_SEC_ERR("unable to attach sec queue");
1906 rte_free(session->cipher_key.data);
1907 rte_free(session->auth_key.data);
1908 memset(session, 0, sizeof(dpaa_sec_session));
1914 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1915 struct rte_crypto_sym_xform *xform,
1916 struct rte_cryptodev_sym_session *sess,
1917 struct rte_mempool *mempool)
1919 void *sess_private_data;
1922 PMD_INIT_FUNC_TRACE();
1924 if (rte_mempool_get(mempool, &sess_private_data)) {
1925 DPAA_SEC_ERR("Couldn't get object from session mempool");
1929 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1931 DPAA_SEC_ERR("failed to configure session parameters");
1933 /* Return session to mempool */
1934 rte_mempool_put(mempool, sess_private_data);
1938 set_sym_session_private_data(sess, dev->driver_id,
1945 /** Clear the memory of session so it doesn't leave key material behind */
1947 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1948 struct rte_cryptodev_sym_session *sess)
1950 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1951 uint8_t index = dev->driver_id;
1952 void *sess_priv = get_sym_session_private_data(sess, index);
1954 PMD_INIT_FUNC_TRACE();
1956 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1959 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1962 dpaa_sec_detach_rxq(qi, s->inq);
1963 rte_free(s->cipher_key.data);
1964 rte_free(s->auth_key.data);
1965 memset(s, 0, sizeof(dpaa_sec_session));
1966 set_sym_session_private_data(sess, index, NULL);
1967 rte_mempool_put(sess_mp, sess_priv);
1972 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1973 struct rte_security_session_conf *conf,
1976 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1977 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1978 struct rte_crypto_auth_xform *auth_xform = NULL;
1979 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1980 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1982 PMD_INIT_FUNC_TRACE();
1984 memset(session, 0, sizeof(dpaa_sec_session));
1985 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1986 cipher_xform = &conf->crypto_xform->cipher;
1987 if (conf->crypto_xform->next)
1988 auth_xform = &conf->crypto_xform->next->auth;
1990 auth_xform = &conf->crypto_xform->auth;
1991 if (conf->crypto_xform->next)
1992 cipher_xform = &conf->crypto_xform->next->cipher;
1994 session->proto_alg = conf->protocol;
1996 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
1997 session->cipher_key.data = rte_zmalloc(NULL,
1998 cipher_xform->key.length,
1999 RTE_CACHE_LINE_SIZE);
2000 if (session->cipher_key.data == NULL &&
2001 cipher_xform->key.length > 0) {
2002 DPAA_SEC_ERR("No Memory for cipher key");
2005 memcpy(session->cipher_key.data, cipher_xform->key.data,
2006 cipher_xform->key.length);
2007 session->cipher_key.length = cipher_xform->key.length;
2009 switch (cipher_xform->algo) {
2010 case RTE_CRYPTO_CIPHER_AES_CBC:
2011 case RTE_CRYPTO_CIPHER_3DES_CBC:
2012 case RTE_CRYPTO_CIPHER_AES_CTR:
2015 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2016 cipher_xform->algo);
2019 session->cipher_alg = cipher_xform->algo;
2021 session->cipher_key.data = NULL;
2022 session->cipher_key.length = 0;
2023 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2026 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2027 session->auth_key.data = rte_zmalloc(NULL,
2028 auth_xform->key.length,
2029 RTE_CACHE_LINE_SIZE);
2030 if (session->auth_key.data == NULL &&
2031 auth_xform->key.length > 0) {
2032 DPAA_SEC_ERR("No Memory for auth key");
2033 rte_free(session->cipher_key.data);
2036 memcpy(session->auth_key.data, auth_xform->key.data,
2037 auth_xform->key.length);
2038 session->auth_key.length = auth_xform->key.length;
2040 switch (auth_xform->algo) {
2041 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2042 case RTE_CRYPTO_AUTH_MD5_HMAC:
2043 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2044 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2045 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2046 case RTE_CRYPTO_AUTH_AES_CMAC:
2049 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2053 session->auth_alg = auth_xform->algo;
2055 session->auth_key.data = NULL;
2056 session->auth_key.length = 0;
2057 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2060 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2061 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2062 sizeof(session->ip4_hdr));
2063 session->ip4_hdr.ip_v = IPVERSION;
2064 session->ip4_hdr.ip_hl = 5;
2065 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2066 sizeof(session->ip4_hdr));
2067 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2068 session->ip4_hdr.ip_id = 0;
2069 session->ip4_hdr.ip_off = 0;
2070 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2071 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2072 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2074 session->ip4_hdr.ip_sum = 0;
2075 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2076 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2077 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2078 (void *)&session->ip4_hdr,
2081 session->encap_pdb.options =
2082 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2083 PDBOPTS_ESP_OIHI_PDB_INL |
2085 PDBHMO_ESP_ENCAP_DTTL |
2087 session->encap_pdb.spi = ipsec_xform->spi;
2088 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2090 session->dir = DIR_ENC;
2091 } else if (ipsec_xform->direction ==
2092 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2093 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2094 session->decap_pdb.options = sizeof(struct ip) << 16;
2095 session->dir = DIR_DEC;
2098 session->ctx_pool = internals->ctx_pool;
2099 rte_spinlock_lock(&internals->lock);
2100 session->inq = dpaa_sec_attach_rxq(internals);
2101 rte_spinlock_unlock(&internals->lock);
2102 if (session->inq == NULL) {
2103 DPAA_SEC_ERR("unable to attach sec queue");
2110 rte_free(session->auth_key.data);
2111 rte_free(session->cipher_key.data);
2112 memset(session, 0, sizeof(dpaa_sec_session));
2117 dpaa_sec_security_session_create(void *dev,
2118 struct rte_security_session_conf *conf,
2119 struct rte_security_session *sess,
2120 struct rte_mempool *mempool)
2122 void *sess_private_data;
2123 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2126 if (rte_mempool_get(mempool, &sess_private_data)) {
2127 DPAA_SEC_ERR("Couldn't get object from session mempool");
2131 switch (conf->protocol) {
2132 case RTE_SECURITY_PROTOCOL_IPSEC:
2133 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2136 case RTE_SECURITY_PROTOCOL_MACSEC:
2142 DPAA_SEC_ERR("failed to configure session parameters");
2143 /* Return session to mempool */
2144 rte_mempool_put(mempool, sess_private_data);
2148 set_sec_session_private_data(sess, sess_private_data);
2153 /** Clear the memory of session so it doesn't leave key material behind */
2155 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2156 struct rte_security_session *sess)
2158 PMD_INIT_FUNC_TRACE();
2159 void *sess_priv = get_sec_session_private_data(sess);
2161 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2164 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2166 rte_free(s->cipher_key.data);
2167 rte_free(s->auth_key.data);
2168 memset(sess, 0, sizeof(dpaa_sec_session));
2169 set_sec_session_private_data(sess, NULL);
2170 rte_mempool_put(sess_mp, sess_priv);
2177 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2178 struct rte_cryptodev_config *config __rte_unused)
2182 struct dpaa_sec_dev_private *internals;
2184 PMD_INIT_FUNC_TRACE();
2186 internals = dev->data->dev_private;
2187 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2188 if (!internals->ctx_pool) {
2189 internals->ctx_pool = rte_mempool_create((const char *)str,
2192 CTX_POOL_CACHE_SIZE, 0,
2193 NULL, NULL, NULL, NULL,
2195 if (!internals->ctx_pool) {
2196 DPAA_SEC_ERR("%s create failed\n", str);
2200 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2207 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2209 PMD_INIT_FUNC_TRACE();
2214 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2216 PMD_INIT_FUNC_TRACE();
2220 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2222 struct dpaa_sec_dev_private *internals;
2224 PMD_INIT_FUNC_TRACE();
2229 internals = dev->data->dev_private;
2230 rte_mempool_free(internals->ctx_pool);
2231 internals->ctx_pool = NULL;
2237 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2238 struct rte_cryptodev_info *info)
2240 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2242 PMD_INIT_FUNC_TRACE();
2244 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2245 info->feature_flags = dev->feature_flags;
2246 info->capabilities = dpaa_sec_capabilities;
2247 info->sym.max_nb_sessions = internals->max_nb_sessions;
2248 info->driver_id = cryptodev_driver_id;
2252 static struct rte_cryptodev_ops crypto_ops = {
2253 .dev_configure = dpaa_sec_dev_configure,
2254 .dev_start = dpaa_sec_dev_start,
2255 .dev_stop = dpaa_sec_dev_stop,
2256 .dev_close = dpaa_sec_dev_close,
2257 .dev_infos_get = dpaa_sec_dev_infos_get,
2258 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2259 .queue_pair_release = dpaa_sec_queue_pair_release,
2260 .queue_pair_count = dpaa_sec_queue_pair_count,
2261 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2262 .sym_session_configure = dpaa_sec_sym_session_configure,
2263 .sym_session_clear = dpaa_sec_sym_session_clear
2266 static const struct rte_security_capability *
2267 dpaa_sec_capabilities_get(void *device __rte_unused)
2269 return dpaa_sec_security_cap;
2272 static const struct rte_security_ops dpaa_sec_security_ops = {
2273 .session_create = dpaa_sec_security_session_create,
2274 .session_update = NULL,
2275 .session_stats_get = NULL,
2276 .session_destroy = dpaa_sec_security_session_destroy,
2277 .set_pkt_metadata = NULL,
2278 .capabilities_get = dpaa_sec_capabilities_get
2282 dpaa_sec_uninit(struct rte_cryptodev *dev)
2284 struct dpaa_sec_dev_private *internals;
2289 internals = dev->data->dev_private;
2290 rte_free(dev->security_ctx);
2292 /* In case close has been called, internals->ctx_pool would be NULL */
2293 rte_mempool_free(internals->ctx_pool);
2294 rte_free(internals);
2296 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2297 dev->data->name, rte_socket_id());
2303 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2305 struct dpaa_sec_dev_private *internals;
2306 struct rte_security_ctx *security_instance;
2307 struct dpaa_sec_qp *qp;
2311 PMD_INIT_FUNC_TRACE();
2313 cryptodev->driver_id = cryptodev_driver_id;
2314 cryptodev->dev_ops = &crypto_ops;
2316 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2317 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2318 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2319 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2320 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2321 RTE_CRYPTODEV_FF_SECURITY |
2322 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2323 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2324 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2325 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2326 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2328 internals = cryptodev->data->dev_private;
2329 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2330 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2333 * For secondary processes, we don't initialise any further as primary
2334 * has already done this work. Only check we don't need a different
2337 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2338 DPAA_SEC_WARN("Device already init by primary process");
2342 /* Initialize security_ctx only for primary process*/
2343 security_instance = rte_malloc("rte_security_instances_ops",
2344 sizeof(struct rte_security_ctx), 0);
2345 if (security_instance == NULL)
2347 security_instance->device = (void *)cryptodev;
2348 security_instance->ops = &dpaa_sec_security_ops;
2349 security_instance->sess_cnt = 0;
2350 cryptodev->security_ctx = security_instance;
2352 rte_spinlock_init(&internals->lock);
2353 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2354 /* init qman fq for queue pair */
2355 qp = &internals->qps[i];
2356 ret = dpaa_sec_init_tx(&qp->outq);
2358 DPAA_SEC_ERR("config tx of queue pair %d", i);
2363 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2364 QMAN_FQ_FLAG_TO_DCPORTAL;
2365 for (i = 0; i < internals->max_nb_sessions; i++) {
2366 /* create rx qman fq for sessions*/
2367 ret = qman_create_fq(0, flags, &internals->inq[i]);
2368 if (unlikely(ret != 0)) {
2369 DPAA_SEC_ERR("sec qman_create_fq failed");
2374 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2378 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2380 dpaa_sec_uninit(cryptodev);
2385 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2386 struct rte_dpaa_device *dpaa_dev)
2388 struct rte_cryptodev *cryptodev;
2389 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2393 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2395 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2396 if (cryptodev == NULL)
2399 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2400 cryptodev->data->dev_private = rte_zmalloc_socket(
2401 "cryptodev private structure",
2402 sizeof(struct dpaa_sec_dev_private),
2403 RTE_CACHE_LINE_SIZE,
2406 if (cryptodev->data->dev_private == NULL)
2407 rte_panic("Cannot allocate memzone for private "
2411 dpaa_dev->crypto_dev = cryptodev;
2412 cryptodev->device = &dpaa_dev->device;
2414 /* init user callbacks */
2415 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2417 /* if sec device version is not configured */
2418 if (!rta_get_sec_era()) {
2419 const struct device_node *caam_node;
2421 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2422 const uint32_t *prop = of_get_property(caam_node,
2427 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2433 /* Invoke PMD device initialization function */
2434 retval = dpaa_sec_dev_init(cryptodev);
2438 /* In case of error, cleanup is done */
2439 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2440 rte_free(cryptodev->data->dev_private);
2442 rte_cryptodev_pmd_release_device(cryptodev);
2448 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2450 struct rte_cryptodev *cryptodev;
2453 cryptodev = dpaa_dev->crypto_dev;
2454 if (cryptodev == NULL)
2457 ret = dpaa_sec_uninit(cryptodev);
2461 return rte_cryptodev_pmd_destroy(cryptodev);
2464 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2465 .drv_type = FSL_DPAA_CRYPTO,
2467 .name = "DPAA SEC PMD"
2469 .probe = cryptodev_dpaa_sec_probe,
2470 .remove = cryptodev_dpaa_sec_remove,
2473 static struct cryptodev_driver dpaa_sec_crypto_drv;
2475 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2476 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2477 cryptodev_driver_id);
2479 RTE_INIT(dpaa_sec_init_log)
2481 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2482 if (dpaa_logtype_sec >= 0)
2483 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);