1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
42 static uint8_t cryptodev_driver_id;
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 /* report op status to sym->op and then free the ctx memeory */
61 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
86 ctx->ctx_pool = ses->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx
88 - rte_mempool_virt2iova(ctx);
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
96 const struct rte_memseg *ms;
98 ms = rte_mem_virt2memseg(vaddr, NULL);
100 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104 /* virtual address conversin when mempool support is available for ctx */
105 static inline phys_addr_t
106 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
108 return (size_t)vaddr - ctx->vtop_offset;
112 dpaa_mem_ptov(rte_iova_t paddr)
114 return rte_mem_iova2virt(paddr);
118 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
120 const struct qm_mr_entry *msg)
122 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
123 fq->fqid, msg->ern.rc, msg->ern.seqnum);
126 /* initialize the queue with dest chan as caam chan so that
127 * all the packets in this queue could be dispatched into caam
130 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
133 struct qm_mcc_initfq fq_opts;
137 /* Clear FQ options */
138 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
140 flags = QMAN_INITFQ_FLAG_SCHED;
141 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
142 QM_INITFQ_WE_CONTEXTB;
144 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
145 fq_opts.fqd.context_b = fqid_out;
146 fq_opts.fqd.dest.channel = qm_channel_caam;
147 fq_opts.fqd.dest.wq = 0;
149 fq_in->cb.ern = ern_sec_fq_handler;
151 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
153 ret = qman_init_fq(fq_in, flags, &fq_opts);
154 if (unlikely(ret != 0))
155 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
160 /* something is put into in_fq and caam put the crypto result into out_fq */
161 static enum qman_cb_dqrr_result
162 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
163 struct qman_fq *fq __always_unused,
164 const struct qm_dqrr_entry *dqrr)
166 const struct qm_fd *fd;
167 struct dpaa_sec_job *job;
168 struct dpaa_sec_op_ctx *ctx;
170 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
171 return qman_cb_dqrr_defer;
173 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
174 return qman_cb_dqrr_consume;
177 /* sg is embedded in an op ctx,
178 * sg[0] is for output
181 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
183 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
184 ctx->fd_status = fd->status;
185 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
186 struct qm_sg_entry *sg_out;
189 sg_out = &job->sg[0];
190 hw_sg_to_cpu(sg_out);
191 len = sg_out->length;
192 ctx->op->sym->m_src->pkt_len = len;
193 ctx->op->sym->m_src->data_len = len;
195 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
196 dpaa_sec_op_ending(ctx);
198 return qman_cb_dqrr_consume;
201 /* caam result is put into this queue */
203 dpaa_sec_init_tx(struct qman_fq *fq)
206 struct qm_mcc_initfq opts;
209 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
210 QMAN_FQ_FLAG_DYNAMIC_FQID;
212 ret = qman_create_fq(0, flags, fq);
214 PMD_INIT_LOG(ERR, "qman_create_fq failed");
218 memset(&opts, 0, sizeof(opts));
219 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
220 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
222 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
224 fq->cb.dqrr = dqrr_out_fq_cb_rx;
225 fq->cb.ern = ern_sec_fq_handler;
227 ret = qman_init_fq(fq, 0, &opts);
229 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
236 static inline int is_cipher_only(dpaa_sec_session *ses)
238 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
239 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
242 static inline int is_auth_only(dpaa_sec_session *ses)
244 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
245 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
248 static inline int is_aead(dpaa_sec_session *ses)
250 return ((ses->cipher_alg == 0) &&
251 (ses->auth_alg == 0) &&
252 (ses->aead_alg != 0));
255 static inline int is_auth_cipher(dpaa_sec_session *ses)
257 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
258 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
259 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
262 static inline int is_proto_ipsec(dpaa_sec_session *ses)
264 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
267 static inline int is_encode(dpaa_sec_session *ses)
269 return ses->dir == DIR_ENC;
272 static inline int is_decode(dpaa_sec_session *ses)
274 return ses->dir == DIR_DEC;
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
280 switch (ses->auth_alg) {
281 case RTE_CRYPTO_AUTH_NULL:
282 ses->digest_length = 0;
284 case RTE_CRYPTO_AUTH_MD5_HMAC:
286 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
287 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
288 alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 case RTE_CRYPTO_AUTH_SHA1_HMAC:
292 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
293 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
294 alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 case RTE_CRYPTO_AUTH_SHA224_HMAC:
298 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
300 alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 case RTE_CRYPTO_AUTH_SHA256_HMAC:
304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA384_HMAC:
310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
312 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 case RTE_CRYPTO_AUTH_SHA512_HMAC:
316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
318 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
326 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
328 switch (ses->cipher_alg) {
329 case RTE_CRYPTO_CIPHER_NULL:
331 case RTE_CRYPTO_CIPHER_AES_CBC:
333 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
334 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
335 alginfo_c->algmode = OP_ALG_AAI_CBC;
337 case RTE_CRYPTO_CIPHER_3DES_CBC:
339 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
340 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
341 alginfo_c->algmode = OP_ALG_AAI_CBC;
343 case RTE_CRYPTO_CIPHER_AES_CTR:
345 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
347 alginfo_c->algmode = OP_ALG_AAI_CTR;
350 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
355 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
357 switch (ses->aead_alg) {
358 case RTE_CRYPTO_AEAD_AES_GCM:
359 alginfo->algtype = OP_ALG_ALGSEL_AES;
360 alginfo->algmode = OP_ALG_AAI_GCM;
363 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
368 /* prepare command block of the session */
370 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
372 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
373 int32_t shared_desc_len = 0;
374 struct sec_cdb *cdb = &ses->cdb;
376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
382 memset(cdb, 0, sizeof(struct sec_cdb));
384 if (is_cipher_only(ses)) {
385 caam_cipher_alg(ses, &alginfo_c);
386 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387 PMD_TX_LOG(ERR, "not supported cipher alg\n");
391 alginfo_c.key = (size_t)ses->cipher_key.data;
392 alginfo_c.keylen = ses->cipher_key.length;
393 alginfo_c.key_enc_flags = 0;
394 alginfo_c.key_type = RTA_DATA_IMM;
396 shared_desc_len = cnstr_shdsc_blkcipher(
402 } else if (is_auth_only(ses)) {
403 caam_auth_alg(ses, &alginfo_a);
404 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 PMD_TX_LOG(ERR, "not supported auth alg\n");
409 alginfo_a.key = (size_t)ses->auth_key.data;
410 alginfo_a.keylen = ses->auth_key.length;
411 alginfo_a.key_enc_flags = 0;
412 alginfo_a.key_type = RTA_DATA_IMM;
414 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
418 } else if (is_aead(ses)) {
419 caam_aead_alg(ses, &alginfo);
420 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421 PMD_TX_LOG(ERR, "not supported aead alg\n");
424 alginfo.key = (size_t)ses->aead_key.data;
425 alginfo.keylen = ses->aead_key.length;
426 alginfo.key_enc_flags = 0;
427 alginfo.key_type = RTA_DATA_IMM;
429 if (ses->dir == DIR_ENC)
430 shared_desc_len = cnstr_shdsc_gcm_encap(
431 cdb->sh_desc, true, swap,
436 shared_desc_len = cnstr_shdsc_gcm_decap(
437 cdb->sh_desc, true, swap,
442 caam_cipher_alg(ses, &alginfo_c);
443 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
444 PMD_TX_LOG(ERR, "not supported cipher alg\n");
448 alginfo_c.key = (size_t)ses->cipher_key.data;
449 alginfo_c.keylen = ses->cipher_key.length;
450 alginfo_c.key_enc_flags = 0;
451 alginfo_c.key_type = RTA_DATA_IMM;
453 caam_auth_alg(ses, &alginfo_a);
454 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
455 PMD_TX_LOG(ERR, "not supported auth alg\n");
459 alginfo_a.key = (size_t)ses->auth_key.data;
460 alginfo_a.keylen = ses->auth_key.length;
461 alginfo_a.key_enc_flags = 0;
462 alginfo_a.key_type = RTA_DATA_IMM;
464 cdb->sh_desc[0] = alginfo_c.keylen;
465 cdb->sh_desc[1] = alginfo_a.keylen;
466 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
468 (unsigned int *)cdb->sh_desc,
469 &cdb->sh_desc[2], 2);
472 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
475 if (cdb->sh_desc[2] & 1)
476 alginfo_c.key_type = RTA_DATA_IMM;
478 alginfo_c.key = (size_t)dpaa_mem_vtop(
479 (void *)(size_t)alginfo_c.key);
480 alginfo_c.key_type = RTA_DATA_PTR;
482 if (cdb->sh_desc[2] & (1<<1))
483 alginfo_a.key_type = RTA_DATA_IMM;
485 alginfo_a.key = (size_t)dpaa_mem_vtop(
486 (void *)(size_t)alginfo_a.key);
487 alginfo_a.key_type = RTA_DATA_PTR;
492 if (is_proto_ipsec(ses)) {
493 if (ses->dir == DIR_ENC) {
494 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
496 true, swap, &ses->encap_pdb,
497 (uint8_t *)&ses->ip4_hdr,
498 &alginfo_c, &alginfo_a);
499 } else if (ses->dir == DIR_DEC) {
500 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
502 true, swap, &ses->decap_pdb,
503 &alginfo_c, &alginfo_a);
506 /* Auth_only_len is set as 0 here and it will be
507 * overwritten in fd for each packet.
509 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
510 true, swap, &alginfo_c, &alginfo_a,
512 ses->digest_length, ses->dir);
516 if (shared_desc_len < 0) {
517 PMD_TX_LOG(ERR, "error in preparing command block\n");
518 return shared_desc_len;
521 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
522 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
523 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
528 /* qp is lockless, should be accessed by only one thread */
530 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
533 unsigned int pkts = 0;
535 struct qm_dqrr_entry *dq;
538 ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
539 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
544 const struct qm_fd *fd;
545 struct dpaa_sec_job *job;
546 struct dpaa_sec_op_ctx *ctx;
547 struct rte_crypto_op *op;
549 dq = qman_dequeue(fq);
554 /* sg is embedded in an op ctx,
555 * sg[0] is for output
558 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
560 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
561 ctx->fd_status = fd->status;
563 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
564 struct qm_sg_entry *sg_out;
567 sg_out = &job->sg[0];
568 hw_sg_to_cpu(sg_out);
569 len = sg_out->length;
570 op->sym->m_src->pkt_len = len;
571 op->sym->m_src->data_len = len;
573 if (!ctx->fd_status) {
574 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
576 printf("\nSEC return err: 0x%x", ctx->fd_status);
577 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
581 /* report op status to sym->op and then free the ctx memeory */
582 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
584 qman_dqrr_consume(fq, dq);
585 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
590 static inline struct dpaa_sec_job *
591 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
593 struct rte_crypto_sym_op *sym = op->sym;
594 struct rte_mbuf *mbuf = sym->m_src;
595 struct dpaa_sec_job *cf;
596 struct dpaa_sec_op_ctx *ctx;
597 struct qm_sg_entry *sg, *out_sg, *in_sg;
598 phys_addr_t start_addr;
599 uint8_t *old_digest, extra_segs;
606 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
607 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
611 ctx = dpaa_sec_alloc_ctx(ses);
617 old_digest = ctx->digest;
621 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
622 out_sg->length = ses->digest_length;
623 cpu_to_hw_sg(out_sg);
627 /* need to extend the input to a compound frame */
628 in_sg->extension = 1;
630 in_sg->length = sym->auth.data.length;
631 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
635 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
636 sg->length = mbuf->data_len - sym->auth.data.offset;
637 sg->offset = sym->auth.data.offset;
639 /* Successive segs */
644 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
645 sg->length = mbuf->data_len;
649 if (is_decode(ses)) {
650 /* Digest verification case */
653 rte_memcpy(old_digest, sym->auth.digest.data,
655 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
656 qm_sg_entry_set64(sg, start_addr);
657 sg->length = ses->digest_length;
658 in_sg->length += ses->digest_length;
660 /* Digest calculation case */
661 sg->length -= ses->digest_length;
672 * |<----data_len------->|
673 * |ip_header|ah_header|icv|payload|
678 static inline struct dpaa_sec_job *
679 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
681 struct rte_crypto_sym_op *sym = op->sym;
682 struct rte_mbuf *mbuf = sym->m_src;
683 struct dpaa_sec_job *cf;
684 struct dpaa_sec_op_ctx *ctx;
685 struct qm_sg_entry *sg;
686 rte_iova_t start_addr;
689 ctx = dpaa_sec_alloc_ctx(ses);
695 old_digest = ctx->digest;
697 start_addr = rte_pktmbuf_iova(mbuf);
700 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
701 sg->length = ses->digest_length;
706 if (is_decode(ses)) {
707 /* need to extend the input to a compound frame */
709 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
710 sg->length = sym->auth.data.length + ses->digest_length;
715 /* hash result or digest, save digest first */
716 rte_memcpy(old_digest, sym->auth.digest.data,
718 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
719 sg->length = sym->auth.data.length;
722 /* let's check digest by hw */
723 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
725 qm_sg_entry_set64(sg, start_addr);
726 sg->length = ses->digest_length;
730 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
731 sg->length = sym->auth.data.length;
739 static inline struct dpaa_sec_job *
740 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
742 struct rte_crypto_sym_op *sym = op->sym;
743 struct dpaa_sec_job *cf;
744 struct dpaa_sec_op_ctx *ctx;
745 struct qm_sg_entry *sg, *out_sg, *in_sg;
746 struct rte_mbuf *mbuf;
748 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
753 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
756 req_segs = mbuf->nb_segs * 2 + 3;
759 if (req_segs > MAX_SG_ENTRIES) {
760 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
765 ctx = dpaa_sec_alloc_ctx(ses);
774 out_sg->extension = 1;
775 out_sg->length = sym->cipher.data.length;
776 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
777 cpu_to_hw_sg(out_sg);
781 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
782 sg->length = mbuf->data_len - sym->cipher.data.offset;
783 sg->offset = sym->cipher.data.offset;
785 /* Successive segs */
790 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
791 sg->length = mbuf->data_len;
800 in_sg->extension = 1;
802 in_sg->length = sym->cipher.data.length + ses->iv.length;
805 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
809 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
810 sg->length = ses->iv.length;
815 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
816 sg->length = mbuf->data_len - sym->cipher.data.offset;
817 sg->offset = sym->cipher.data.offset;
819 /* Successive segs */
824 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
825 sg->length = mbuf->data_len;
834 static inline struct dpaa_sec_job *
835 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
837 struct rte_crypto_sym_op *sym = op->sym;
838 struct dpaa_sec_job *cf;
839 struct dpaa_sec_op_ctx *ctx;
840 struct qm_sg_entry *sg;
841 rte_iova_t src_start_addr, dst_start_addr;
842 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
845 ctx = dpaa_sec_alloc_ctx(ses);
852 src_start_addr = rte_pktmbuf_iova(sym->m_src);
855 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
857 dst_start_addr = src_start_addr;
861 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
862 sg->length = sym->cipher.data.length + ses->iv.length;
868 /* need to extend the input to a compound frame */
871 sg->length = sym->cipher.data.length + ses->iv.length;
872 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
876 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
877 sg->length = ses->iv.length;
881 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
882 sg->length = sym->cipher.data.length;
889 static inline struct dpaa_sec_job *
890 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
892 struct rte_crypto_sym_op *sym = op->sym;
893 struct dpaa_sec_job *cf;
894 struct dpaa_sec_op_ctx *ctx;
895 struct qm_sg_entry *sg, *out_sg, *in_sg;
896 struct rte_mbuf *mbuf;
898 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
903 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
906 req_segs = mbuf->nb_segs * 2 + 4;
909 if (ses->auth_only_len)
912 if (req_segs > MAX_SG_ENTRIES) {
913 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
918 ctx = dpaa_sec_alloc_ctx(ses);
925 rte_prefetch0(cf->sg);
929 out_sg->extension = 1;
931 out_sg->length = sym->aead.data.length + ses->auth_only_len
932 + ses->digest_length;
934 out_sg->length = sym->aead.data.length + ses->auth_only_len;
936 /* output sg entries */
938 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
939 cpu_to_hw_sg(out_sg);
942 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
943 sg->length = mbuf->data_len - sym->aead.data.offset +
945 sg->offset = sym->aead.data.offset - ses->auth_only_len;
947 /* Successive segs */
952 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
953 sg->length = mbuf->data_len;
956 sg->length -= ses->digest_length;
958 if (is_encode(ses)) {
960 /* set auth output */
962 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
963 sg->length = ses->digest_length;
971 in_sg->extension = 1;
974 in_sg->length = ses->iv.length + sym->aead.data.length
975 + ses->auth_only_len;
977 in_sg->length = ses->iv.length + sym->aead.data.length
978 + ses->auth_only_len + ses->digest_length;
980 /* input sg entries */
982 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
986 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
987 sg->length = ses->iv.length;
990 /* 2nd seg auth only */
991 if (ses->auth_only_len) {
993 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
994 sg->length = ses->auth_only_len;
1000 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1001 sg->length = mbuf->data_len - sym->aead.data.offset;
1002 sg->offset = sym->aead.data.offset;
1004 /* Successive segs */
1009 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1010 sg->length = mbuf->data_len;
1014 if (is_decode(ses)) {
1017 memcpy(ctx->digest, sym->aead.digest.data,
1018 ses->digest_length);
1019 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1020 sg->length = ses->digest_length;
1028 static inline struct dpaa_sec_job *
1029 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1031 struct rte_crypto_sym_op *sym = op->sym;
1032 struct dpaa_sec_job *cf;
1033 struct dpaa_sec_op_ctx *ctx;
1034 struct qm_sg_entry *sg;
1035 uint32_t length = 0;
1036 rte_iova_t src_start_addr, dst_start_addr;
1037 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1040 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1043 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1045 dst_start_addr = src_start_addr;
1047 ctx = dpaa_sec_alloc_ctx(ses);
1055 rte_prefetch0(cf->sg);
1057 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1058 if (is_encode(ses)) {
1059 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1060 sg->length = ses->iv.length;
1061 length += sg->length;
1065 if (ses->auth_only_len) {
1066 qm_sg_entry_set64(sg,
1067 dpaa_mem_vtop(sym->aead.aad.data));
1068 sg->length = ses->auth_only_len;
1069 length += sg->length;
1073 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1074 sg->length = sym->aead.data.length;
1075 length += sg->length;
1079 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1080 sg->length = ses->iv.length;
1081 length += sg->length;
1085 if (ses->auth_only_len) {
1086 qm_sg_entry_set64(sg,
1087 dpaa_mem_vtop(sym->aead.aad.data));
1088 sg->length = ses->auth_only_len;
1089 length += sg->length;
1093 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1094 sg->length = sym->aead.data.length;
1095 length += sg->length;
1098 memcpy(ctx->digest, sym->aead.digest.data,
1099 ses->digest_length);
1102 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1103 sg->length = ses->digest_length;
1104 length += sg->length;
1108 /* input compound frame */
1109 cf->sg[1].length = length;
1110 cf->sg[1].extension = 1;
1111 cf->sg[1].final = 1;
1112 cpu_to_hw_sg(&cf->sg[1]);
1116 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1117 qm_sg_entry_set64(sg,
1118 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1119 sg->length = sym->aead.data.length + ses->auth_only_len;
1120 length = sg->length;
1121 if (is_encode(ses)) {
1123 /* set auth output */
1125 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1126 sg->length = ses->digest_length;
1127 length += sg->length;
1132 /* output compound frame */
1133 cf->sg[0].length = length;
1134 cf->sg[0].extension = 1;
1135 cpu_to_hw_sg(&cf->sg[0]);
1140 static inline struct dpaa_sec_job *
1141 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1143 struct rte_crypto_sym_op *sym = op->sym;
1144 struct dpaa_sec_job *cf;
1145 struct dpaa_sec_op_ctx *ctx;
1146 struct qm_sg_entry *sg, *out_sg, *in_sg;
1147 struct rte_mbuf *mbuf;
1149 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1154 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1157 req_segs = mbuf->nb_segs * 2 + 4;
1160 if (req_segs > MAX_SG_ENTRIES) {
1161 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1166 ctx = dpaa_sec_alloc_ctx(ses);
1173 rte_prefetch0(cf->sg);
1176 out_sg = &cf->sg[0];
1177 out_sg->extension = 1;
1179 out_sg->length = sym->auth.data.length + ses->digest_length;
1181 out_sg->length = sym->auth.data.length;
1183 /* output sg entries */
1185 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1186 cpu_to_hw_sg(out_sg);
1189 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1190 sg->length = mbuf->data_len - sym->auth.data.offset;
1191 sg->offset = sym->auth.data.offset;
1193 /* Successive segs */
1198 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199 sg->length = mbuf->data_len;
1202 sg->length -= ses->digest_length;
1204 if (is_encode(ses)) {
1206 /* set auth output */
1208 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1209 sg->length = ses->digest_length;
1217 in_sg->extension = 1;
1220 in_sg->length = ses->iv.length + sym->auth.data.length;
1222 in_sg->length = ses->iv.length + sym->auth.data.length
1223 + ses->digest_length;
1225 /* input sg entries */
1227 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1228 cpu_to_hw_sg(in_sg);
1231 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1232 sg->length = ses->iv.length;
1237 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1238 sg->length = mbuf->data_len - sym->auth.data.offset;
1239 sg->offset = sym->auth.data.offset;
1241 /* Successive segs */
1246 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247 sg->length = mbuf->data_len;
1251 sg->length -= ses->digest_length;
1252 if (is_decode(ses)) {
1255 memcpy(ctx->digest, sym->auth.digest.data,
1256 ses->digest_length);
1257 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1258 sg->length = ses->digest_length;
1266 static inline struct dpaa_sec_job *
1267 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1269 struct rte_crypto_sym_op *sym = op->sym;
1270 struct dpaa_sec_job *cf;
1271 struct dpaa_sec_op_ctx *ctx;
1272 struct qm_sg_entry *sg;
1273 rte_iova_t src_start_addr, dst_start_addr;
1274 uint32_t length = 0;
1275 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1278 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1280 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1282 dst_start_addr = src_start_addr;
1284 ctx = dpaa_sec_alloc_ctx(ses);
1292 rte_prefetch0(cf->sg);
1294 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1295 if (is_encode(ses)) {
1296 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1297 sg->length = ses->iv.length;
1298 length += sg->length;
1302 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1303 sg->length = sym->auth.data.length;
1304 length += sg->length;
1308 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1309 sg->length = ses->iv.length;
1310 length += sg->length;
1315 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1316 sg->length = sym->auth.data.length;
1317 length += sg->length;
1320 memcpy(ctx->digest, sym->auth.digest.data,
1321 ses->digest_length);
1324 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1325 sg->length = ses->digest_length;
1326 length += sg->length;
1330 /* input compound frame */
1331 cf->sg[1].length = length;
1332 cf->sg[1].extension = 1;
1333 cf->sg[1].final = 1;
1334 cpu_to_hw_sg(&cf->sg[1]);
1338 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1339 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1340 sg->length = sym->cipher.data.length;
1341 length = sg->length;
1342 if (is_encode(ses)) {
1344 /* set auth output */
1346 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1347 sg->length = ses->digest_length;
1348 length += sg->length;
1353 /* output compound frame */
1354 cf->sg[0].length = length;
1355 cf->sg[0].extension = 1;
1356 cpu_to_hw_sg(&cf->sg[0]);
1361 static inline struct dpaa_sec_job *
1362 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1364 struct rte_crypto_sym_op *sym = op->sym;
1365 struct dpaa_sec_job *cf;
1366 struct dpaa_sec_op_ctx *ctx;
1367 struct qm_sg_entry *sg;
1368 phys_addr_t src_start_addr, dst_start_addr;
1370 ctx = dpaa_sec_alloc_ctx(ses);
1376 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1379 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1381 dst_start_addr = src_start_addr;
1385 qm_sg_entry_set64(sg, src_start_addr);
1386 sg->length = sym->m_src->pkt_len;
1390 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1393 qm_sg_entry_set64(sg, dst_start_addr);
1394 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1401 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1404 /* Function to transmit the frames to given device and queuepair */
1406 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1407 uint16_t num_tx = 0;
1408 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1409 uint32_t frames_to_send;
1410 struct rte_crypto_op *op;
1411 struct dpaa_sec_job *cf;
1412 dpaa_sec_session *ses;
1413 struct dpaa_sec_op_ctx *ctx;
1414 uint32_t auth_only_len;
1415 struct qman_fq *inq[DPAA_SEC_BURST];
1418 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1419 DPAA_SEC_BURST : nb_ops;
1420 for (loop = 0; loop < frames_to_send; loop++) {
1422 switch (op->sess_type) {
1423 case RTE_CRYPTO_OP_WITH_SESSION:
1424 ses = (dpaa_sec_session *)
1425 get_session_private_data(
1427 cryptodev_driver_id);
1429 case RTE_CRYPTO_OP_SECURITY_SESSION:
1430 ses = (dpaa_sec_session *)
1431 get_sec_session_private_data(
1432 op->sym->sec_session);
1436 "sessionless crypto op not supported");
1437 frames_to_send = loop;
1441 if (unlikely(!ses->qp || ses->qp != qp)) {
1442 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1444 if (dpaa_sec_attach_sess_q(qp, ses)) {
1445 frames_to_send = loop;
1451 auth_only_len = op->sym->auth.data.length -
1452 op->sym->cipher.data.length;
1453 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1454 if (is_auth_only(ses)) {
1455 cf = build_auth_only(op, ses);
1456 } else if (is_cipher_only(ses)) {
1457 cf = build_cipher_only(op, ses);
1458 } else if (is_aead(ses)) {
1459 cf = build_cipher_auth_gcm(op, ses);
1460 auth_only_len = ses->auth_only_len;
1461 } else if (is_auth_cipher(ses)) {
1462 cf = build_cipher_auth(op, ses);
1463 } else if (is_proto_ipsec(ses)) {
1464 cf = build_proto(op, ses);
1466 PMD_TX_LOG(ERR, "not supported sec op");
1467 frames_to_send = loop;
1472 if (is_auth_only(ses)) {
1473 cf = build_auth_only_sg(op, ses);
1474 } else if (is_cipher_only(ses)) {
1475 cf = build_cipher_only_sg(op, ses);
1476 } else if (is_aead(ses)) {
1477 cf = build_cipher_auth_gcm_sg(op, ses);
1478 auth_only_len = ses->auth_only_len;
1479 } else if (is_auth_cipher(ses)) {
1480 cf = build_cipher_auth_sg(op, ses);
1482 PMD_TX_LOG(ERR, "not supported sec op");
1483 frames_to_send = loop;
1488 if (unlikely(!cf)) {
1489 frames_to_send = loop;
1495 inq[loop] = ses->inq;
1496 fd->opaque_addr = 0;
1498 ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1499 qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1500 fd->_format1 = qm_fd_compound;
1501 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1502 /* Auth_only_len is set as 0 in descriptor and it is
1503 * overwritten here in the fd.cmd which will update
1507 fd->cmd = 0x80000000 | auth_only_len;
1512 while (loop < frames_to_send) {
1513 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1514 frames_to_send - loop);
1516 nb_ops -= frames_to_send;
1517 num_tx += frames_to_send;
1520 dpaa_qp->tx_pkts += num_tx;
1521 dpaa_qp->tx_errs += nb_ops - num_tx;
1527 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1531 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1533 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1535 dpaa_qp->rx_pkts += num_rx;
1536 dpaa_qp->rx_errs += nb_ops - num_rx;
1538 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1543 /** Release queue pair */
1545 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1548 struct dpaa_sec_dev_private *internals;
1549 struct dpaa_sec_qp *qp = NULL;
1551 PMD_INIT_FUNC_TRACE();
1553 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1555 internals = dev->data->dev_private;
1556 if (qp_id >= internals->max_nb_queue_pairs) {
1557 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1558 internals->max_nb_queue_pairs);
1562 qp = &internals->qps[qp_id];
1563 qp->internals = NULL;
1564 dev->data->queue_pairs[qp_id] = NULL;
1569 /** Setup a queue pair */
1571 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1572 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1573 __rte_unused int socket_id,
1574 __rte_unused struct rte_mempool *session_pool)
1576 struct dpaa_sec_dev_private *internals;
1577 struct dpaa_sec_qp *qp = NULL;
1579 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1580 dev, qp_id, qp_conf);
1582 internals = dev->data->dev_private;
1583 if (qp_id >= internals->max_nb_queue_pairs) {
1584 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1585 internals->max_nb_queue_pairs);
1589 qp = &internals->qps[qp_id];
1590 qp->internals = internals;
1591 dev->data->queue_pairs[qp_id] = qp;
1596 /** Start queue pair */
1598 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1599 __rte_unused uint16_t queue_pair_id)
1601 PMD_INIT_FUNC_TRACE();
1606 /** Stop queue pair */
1608 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1609 __rte_unused uint16_t queue_pair_id)
1611 PMD_INIT_FUNC_TRACE();
1616 /** Return the number of allocated queue pairs */
1618 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1620 PMD_INIT_FUNC_TRACE();
1622 return dev->data->nb_queue_pairs;
1625 /** Returns the size of session structure */
1627 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1629 PMD_INIT_FUNC_TRACE();
1631 return sizeof(dpaa_sec_session);
1635 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1636 struct rte_crypto_sym_xform *xform,
1637 dpaa_sec_session *session)
1639 session->cipher_alg = xform->cipher.algo;
1640 session->iv.length = xform->cipher.iv.length;
1641 session->iv.offset = xform->cipher.iv.offset;
1642 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1643 RTE_CACHE_LINE_SIZE);
1644 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1645 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1648 session->cipher_key.length = xform->cipher.key.length;
1650 memcpy(session->cipher_key.data, xform->cipher.key.data,
1651 xform->cipher.key.length);
1652 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1659 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1660 struct rte_crypto_sym_xform *xform,
1661 dpaa_sec_session *session)
1663 session->auth_alg = xform->auth.algo;
1664 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1665 RTE_CACHE_LINE_SIZE);
1666 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1667 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1670 session->auth_key.length = xform->auth.key.length;
1671 session->digest_length = xform->auth.digest_length;
1673 memcpy(session->auth_key.data, xform->auth.key.data,
1674 xform->auth.key.length);
1675 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1682 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1683 struct rte_crypto_sym_xform *xform,
1684 dpaa_sec_session *session)
1686 session->aead_alg = xform->aead.algo;
1687 session->iv.length = xform->aead.iv.length;
1688 session->iv.offset = xform->aead.iv.offset;
1689 session->auth_only_len = xform->aead.aad_length;
1690 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1691 RTE_CACHE_LINE_SIZE);
1692 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1693 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1696 session->aead_key.length = xform->aead.key.length;
1697 session->digest_length = xform->aead.digest_length;
1699 memcpy(session->aead_key.data, xform->aead.key.data,
1700 xform->aead.key.length);
1701 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1707 static struct qman_fq *
1708 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1712 for (i = 0; i < qi->max_nb_sessions; i++) {
1713 if (qi->inq_attach[i] == 0) {
1714 qi->inq_attach[i] = 1;
1718 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1724 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1728 for (i = 0; i < qi->max_nb_sessions; i++) {
1729 if (&qi->inq[i] == fq) {
1730 qman_retire_fq(fq, NULL);
1732 qi->inq_attach[i] = 0;
1740 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1745 ret = dpaa_sec_prep_cdb(sess);
1747 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1750 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1751 ret = rte_dpaa_portal_init((void *)0);
1753 PMD_DRV_LOG(ERR, "Failure in affining portal");
1757 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1758 qman_fq_fqid(&qp->outq));
1760 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1766 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1767 uint16_t qp_id __rte_unused,
1768 void *ses __rte_unused)
1770 PMD_INIT_FUNC_TRACE();
1775 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1776 uint16_t qp_id __rte_unused,
1779 dpaa_sec_session *sess = ses;
1780 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1782 PMD_INIT_FUNC_TRACE();
1785 dpaa_sec_detach_rxq(qi, sess->inq);
1794 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1795 struct rte_crypto_sym_xform *xform, void *sess)
1797 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1798 dpaa_sec_session *session = sess;
1800 PMD_INIT_FUNC_TRACE();
1802 if (unlikely(sess == NULL)) {
1803 RTE_LOG(ERR, PMD, "invalid session struct\n");
1807 /* Default IV length = 0 */
1808 session->iv.length = 0;
1811 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1812 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1813 dpaa_sec_cipher_init(dev, xform, session);
1815 /* Authentication Only */
1816 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1817 xform->next == NULL) {
1818 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1819 dpaa_sec_auth_init(dev, xform, session);
1821 /* Cipher then Authenticate */
1822 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1823 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1824 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1825 dpaa_sec_cipher_init(dev, xform, session);
1826 dpaa_sec_auth_init(dev, xform->next, session);
1828 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1832 /* Authenticate then Cipher */
1833 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1834 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1835 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1836 dpaa_sec_auth_init(dev, xform, session);
1837 dpaa_sec_cipher_init(dev, xform->next, session);
1839 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1843 /* AEAD operation for AES-GCM kind of Algorithms */
1844 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1845 xform->next == NULL) {
1846 dpaa_sec_aead_init(dev, xform, session);
1849 PMD_DRV_LOG(ERR, "Invalid crypto type");
1852 session->ctx_pool = internals->ctx_pool;
1853 session->inq = dpaa_sec_attach_rxq(internals);
1854 if (session->inq == NULL) {
1855 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1862 rte_free(session->cipher_key.data);
1863 rte_free(session->auth_key.data);
1864 memset(session, 0, sizeof(dpaa_sec_session));
1870 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1871 struct rte_crypto_sym_xform *xform,
1872 struct rte_cryptodev_sym_session *sess,
1873 struct rte_mempool *mempool)
1875 void *sess_private_data;
1878 PMD_INIT_FUNC_TRACE();
1880 if (rte_mempool_get(mempool, &sess_private_data)) {
1882 "Couldn't get object from session mempool");
1886 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1888 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1889 "session parameters");
1891 /* Return session to mempool */
1892 rte_mempool_put(mempool, sess_private_data);
1896 set_session_private_data(sess, dev->driver_id,
1903 /** Clear the memory of session so it doesn't leave key material behind */
1905 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1906 struct rte_cryptodev_sym_session *sess)
1908 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1909 uint8_t index = dev->driver_id;
1910 void *sess_priv = get_session_private_data(sess, index);
1912 PMD_INIT_FUNC_TRACE();
1914 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1917 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1920 dpaa_sec_detach_rxq(qi, s->inq);
1921 rte_free(s->cipher_key.data);
1922 rte_free(s->auth_key.data);
1923 memset(s, 0, sizeof(dpaa_sec_session));
1924 set_session_private_data(sess, index, NULL);
1925 rte_mempool_put(sess_mp, sess_priv);
1930 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1931 struct rte_security_session_conf *conf,
1934 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1935 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1936 struct rte_crypto_auth_xform *auth_xform;
1937 struct rte_crypto_cipher_xform *cipher_xform;
1938 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1940 PMD_INIT_FUNC_TRACE();
1942 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1943 cipher_xform = &conf->crypto_xform->cipher;
1944 auth_xform = &conf->crypto_xform->next->auth;
1946 auth_xform = &conf->crypto_xform->auth;
1947 cipher_xform = &conf->crypto_xform->next->cipher;
1949 session->proto_alg = conf->protocol;
1950 session->cipher_key.data = rte_zmalloc(NULL,
1951 cipher_xform->key.length,
1952 RTE_CACHE_LINE_SIZE);
1953 if (session->cipher_key.data == NULL &&
1954 cipher_xform->key.length > 0) {
1955 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1959 session->cipher_key.length = cipher_xform->key.length;
1960 session->auth_key.data = rte_zmalloc(NULL,
1961 auth_xform->key.length,
1962 RTE_CACHE_LINE_SIZE);
1963 if (session->auth_key.data == NULL &&
1964 auth_xform->key.length > 0) {
1965 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1966 rte_free(session->cipher_key.data);
1969 session->auth_key.length = auth_xform->key.length;
1970 memcpy(session->cipher_key.data, cipher_xform->key.data,
1971 cipher_xform->key.length);
1972 memcpy(session->auth_key.data, auth_xform->key.data,
1973 auth_xform->key.length);
1975 switch (auth_xform->algo) {
1976 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1977 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1979 case RTE_CRYPTO_AUTH_MD5_HMAC:
1980 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1982 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1983 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1985 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1986 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1988 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1989 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1991 case RTE_CRYPTO_AUTH_AES_CMAC:
1992 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1994 case RTE_CRYPTO_AUTH_NULL:
1995 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1997 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1998 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1999 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2000 case RTE_CRYPTO_AUTH_SHA1:
2001 case RTE_CRYPTO_AUTH_SHA256:
2002 case RTE_CRYPTO_AUTH_SHA512:
2003 case RTE_CRYPTO_AUTH_SHA224:
2004 case RTE_CRYPTO_AUTH_SHA384:
2005 case RTE_CRYPTO_AUTH_MD5:
2006 case RTE_CRYPTO_AUTH_AES_GMAC:
2007 case RTE_CRYPTO_AUTH_KASUMI_F9:
2008 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2009 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2010 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2014 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2019 switch (cipher_xform->algo) {
2020 case RTE_CRYPTO_CIPHER_AES_CBC:
2021 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2023 case RTE_CRYPTO_CIPHER_3DES_CBC:
2024 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2026 case RTE_CRYPTO_CIPHER_AES_CTR:
2027 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2029 case RTE_CRYPTO_CIPHER_NULL:
2030 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2031 case RTE_CRYPTO_CIPHER_3DES_ECB:
2032 case RTE_CRYPTO_CIPHER_AES_ECB:
2033 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2034 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2035 cipher_xform->algo);
2038 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2039 cipher_xform->algo);
2043 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2044 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2045 sizeof(session->ip4_hdr));
2046 session->ip4_hdr.ip_v = IPVERSION;
2047 session->ip4_hdr.ip_hl = 5;
2048 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2049 sizeof(session->ip4_hdr));
2050 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2051 session->ip4_hdr.ip_id = 0;
2052 session->ip4_hdr.ip_off = 0;
2053 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2054 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2055 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2057 session->ip4_hdr.ip_sum = 0;
2058 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2059 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2060 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2061 (void *)&session->ip4_hdr,
2064 session->encap_pdb.options =
2065 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2066 PDBOPTS_ESP_OIHI_PDB_INL |
2068 PDBHMO_ESP_ENCAP_DTTL;
2069 session->encap_pdb.spi = ipsec_xform->spi;
2070 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2072 session->dir = DIR_ENC;
2073 } else if (ipsec_xform->direction ==
2074 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2075 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2076 session->decap_pdb.options = sizeof(struct ip) << 16;
2077 session->dir = DIR_DEC;
2080 session->ctx_pool = internals->ctx_pool;
2081 session->inq = dpaa_sec_attach_rxq(internals);
2082 if (session->inq == NULL) {
2083 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2090 rte_free(session->auth_key.data);
2091 rte_free(session->cipher_key.data);
2092 memset(session, 0, sizeof(dpaa_sec_session));
2097 dpaa_sec_security_session_create(void *dev,
2098 struct rte_security_session_conf *conf,
2099 struct rte_security_session *sess,
2100 struct rte_mempool *mempool)
2102 void *sess_private_data;
2103 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2106 if (rte_mempool_get(mempool, &sess_private_data)) {
2108 "Couldn't get object from session mempool");
2112 switch (conf->protocol) {
2113 case RTE_SECURITY_PROTOCOL_IPSEC:
2114 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2117 case RTE_SECURITY_PROTOCOL_MACSEC:
2124 "DPAA2 PMD: failed to configure session parameters");
2126 /* Return session to mempool */
2127 rte_mempool_put(mempool, sess_private_data);
2131 set_sec_session_private_data(sess, sess_private_data);
2136 /** Clear the memory of session so it doesn't leave key material behind */
2138 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2139 struct rte_security_session *sess)
2141 PMD_INIT_FUNC_TRACE();
2142 void *sess_priv = get_sec_session_private_data(sess);
2144 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2147 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2149 rte_free(s->cipher_key.data);
2150 rte_free(s->auth_key.data);
2151 memset(sess, 0, sizeof(dpaa_sec_session));
2152 set_sec_session_private_data(sess, NULL);
2153 rte_mempool_put(sess_mp, sess_priv);
2160 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2161 struct rte_cryptodev_config *config __rte_unused)
2165 struct dpaa_sec_dev_private *internals;
2167 PMD_INIT_FUNC_TRACE();
2169 internals = dev->data->dev_private;
2170 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2171 if (!internals->ctx_pool) {
2172 internals->ctx_pool = rte_mempool_create((const char *)str,
2175 CTX_POOL_CACHE_SIZE, 0,
2176 NULL, NULL, NULL, NULL,
2178 if (!internals->ctx_pool) {
2179 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2183 RTE_LOG(INFO, PMD, "mempool already created for dev_id : %d\n",
2190 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2192 PMD_INIT_FUNC_TRACE();
2197 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2199 PMD_INIT_FUNC_TRACE();
2203 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2205 struct dpaa_sec_dev_private *internals;
2207 PMD_INIT_FUNC_TRACE();
2212 internals = dev->data->dev_private;
2213 rte_mempool_free(internals->ctx_pool);
2214 internals->ctx_pool = NULL;
2220 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2221 struct rte_cryptodev_info *info)
2223 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2225 PMD_INIT_FUNC_TRACE();
2227 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2228 info->feature_flags = dev->feature_flags;
2229 info->capabilities = dpaa_sec_capabilities;
2230 info->sym.max_nb_sessions = internals->max_nb_sessions;
2231 info->sym.max_nb_sessions_per_qp =
2232 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2233 RTE_DPAA_MAX_NB_SEC_QPS;
2234 info->driver_id = cryptodev_driver_id;
2238 static struct rte_cryptodev_ops crypto_ops = {
2239 .dev_configure = dpaa_sec_dev_configure,
2240 .dev_start = dpaa_sec_dev_start,
2241 .dev_stop = dpaa_sec_dev_stop,
2242 .dev_close = dpaa_sec_dev_close,
2243 .dev_infos_get = dpaa_sec_dev_infos_get,
2244 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2245 .queue_pair_release = dpaa_sec_queue_pair_release,
2246 .queue_pair_start = dpaa_sec_queue_pair_start,
2247 .queue_pair_stop = dpaa_sec_queue_pair_stop,
2248 .queue_pair_count = dpaa_sec_queue_pair_count,
2249 .session_get_size = dpaa_sec_session_get_size,
2250 .session_configure = dpaa_sec_session_configure,
2251 .session_clear = dpaa_sec_session_clear,
2252 .qp_attach_session = dpaa_sec_qp_attach_sess,
2253 .qp_detach_session = dpaa_sec_qp_detach_sess,
2256 static const struct rte_security_capability *
2257 dpaa_sec_capabilities_get(void *device __rte_unused)
2259 return dpaa_sec_security_cap;
2262 struct rte_security_ops dpaa_sec_security_ops = {
2263 .session_create = dpaa_sec_security_session_create,
2264 .session_update = NULL,
2265 .session_stats_get = NULL,
2266 .session_destroy = dpaa_sec_security_session_destroy,
2267 .set_pkt_metadata = NULL,
2268 .capabilities_get = dpaa_sec_capabilities_get
2272 dpaa_sec_uninit(struct rte_cryptodev *dev)
2274 struct dpaa_sec_dev_private *internals;
2279 internals = dev->data->dev_private;
2280 rte_free(dev->security_ctx);
2282 /* In case close has been called, internals->ctx_pool would be NULL */
2283 rte_mempool_free(internals->ctx_pool);
2284 rte_free(internals);
2286 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2287 dev->data->name, rte_socket_id());
2293 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2295 struct dpaa_sec_dev_private *internals;
2296 struct rte_security_ctx *security_instance;
2297 struct dpaa_sec_qp *qp;
2301 PMD_INIT_FUNC_TRACE();
2303 cryptodev->driver_id = cryptodev_driver_id;
2304 cryptodev->dev_ops = &crypto_ops;
2306 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2307 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2308 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2309 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2310 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2311 RTE_CRYPTODEV_FF_SECURITY |
2312 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2314 internals = cryptodev->data->dev_private;
2315 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2316 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2319 * For secondary processes, we don't initialise any further as primary
2320 * has already done this work. Only check we don't need a different
2323 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2324 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2328 /* Initialize security_ctx only for primary process*/
2329 security_instance = rte_malloc("rte_security_instances_ops",
2330 sizeof(struct rte_security_ctx), 0);
2331 if (security_instance == NULL)
2333 security_instance->device = (void *)cryptodev;
2334 security_instance->ops = &dpaa_sec_security_ops;
2335 security_instance->sess_cnt = 0;
2336 cryptodev->security_ctx = security_instance;
2338 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2339 /* init qman fq for queue pair */
2340 qp = &internals->qps[i];
2341 ret = dpaa_sec_init_tx(&qp->outq);
2343 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
2348 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2349 QMAN_FQ_FLAG_TO_DCPORTAL;
2350 for (i = 0; i < internals->max_nb_sessions; i++) {
2351 /* create rx qman fq for sessions*/
2352 ret = qman_create_fq(0, flags, &internals->inq[i]);
2353 if (unlikely(ret != 0)) {
2354 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2359 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2363 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2365 dpaa_sec_uninit(cryptodev);
2370 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2371 struct rte_dpaa_device *dpaa_dev)
2373 struct rte_cryptodev *cryptodev;
2374 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2378 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2380 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2381 if (cryptodev == NULL)
2384 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2385 cryptodev->data->dev_private = rte_zmalloc_socket(
2386 "cryptodev private structure",
2387 sizeof(struct dpaa_sec_dev_private),
2388 RTE_CACHE_LINE_SIZE,
2391 if (cryptodev->data->dev_private == NULL)
2392 rte_panic("Cannot allocate memzone for private "
2396 dpaa_dev->crypto_dev = cryptodev;
2397 cryptodev->device = &dpaa_dev->device;
2398 cryptodev->device->driver = &dpaa_drv->driver;
2400 /* init user callbacks */
2401 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2403 /* if sec device version is not configured */
2404 if (!rta_get_sec_era()) {
2405 const struct device_node *caam_node;
2407 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2408 const uint32_t *prop = of_get_property(caam_node,
2413 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2419 /* Invoke PMD device initialization function */
2420 retval = dpaa_sec_dev_init(cryptodev);
2424 /* In case of error, cleanup is done */
2425 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2426 rte_free(cryptodev->data->dev_private);
2428 rte_cryptodev_pmd_release_device(cryptodev);
2434 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2436 struct rte_cryptodev *cryptodev;
2439 cryptodev = dpaa_dev->crypto_dev;
2440 if (cryptodev == NULL)
2443 ret = dpaa_sec_uninit(cryptodev);
2447 return rte_cryptodev_pmd_destroy(cryptodev);
2450 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2451 .drv_type = FSL_DPAA_CRYPTO,
2453 .name = "DPAA SEC PMD"
2455 .probe = cryptodev_dpaa_sec_probe,
2456 .remove = cryptodev_dpaa_sec_remove,
2459 static struct cryptodev_driver dpaa_sec_crypto_drv;
2461 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2462 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2463 cryptodev_driver_id);