1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
42 static uint8_t cryptodev_driver_id;
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 /* report op status to sym->op and then free the ctx memeory */
61 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
86 ctx->ctx_pool = ses->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx
88 - rte_mempool_virt2iova(ctx);
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
96 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97 uint64_t vaddr_64, paddr;
100 vaddr_64 = (size_t)vaddr;
101 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102 if (vaddr_64 >= memseg[i].addr_64 &&
103 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104 paddr = memseg[i].iova +
105 (vaddr_64 - memseg[i].addr_64);
107 return (rte_iova_t)paddr;
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
117 return (size_t)vaddr - ctx->vtop_offset;
121 dpaa_mem_ptov(rte_iova_t paddr)
123 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
126 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
127 if (paddr >= memseg[i].iova &&
128 paddr < memseg[i].iova + memseg[i].len)
129 return (void *)(size_t)(memseg[i].addr_64 +
130 (paddr - memseg[i].iova));
136 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
138 const struct qm_mr_entry *msg)
140 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
141 fq->fqid, msg->ern.rc, msg->ern.seqnum);
144 /* initialize the queue with dest chan as caam chan so that
145 * all the packets in this queue could be dispatched into caam
148 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
151 struct qm_mcc_initfq fq_opts;
155 /* Clear FQ options */
156 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
158 flags = QMAN_INITFQ_FLAG_SCHED;
159 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
160 QM_INITFQ_WE_CONTEXTB;
162 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
163 fq_opts.fqd.context_b = fqid_out;
164 fq_opts.fqd.dest.channel = qm_channel_caam;
165 fq_opts.fqd.dest.wq = 0;
167 fq_in->cb.ern = ern_sec_fq_handler;
169 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
171 ret = qman_init_fq(fq_in, flags, &fq_opts);
172 if (unlikely(ret != 0))
173 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
178 /* something is put into in_fq and caam put the crypto result into out_fq */
179 static enum qman_cb_dqrr_result
180 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
181 struct qman_fq *fq __always_unused,
182 const struct qm_dqrr_entry *dqrr)
184 const struct qm_fd *fd;
185 struct dpaa_sec_job *job;
186 struct dpaa_sec_op_ctx *ctx;
188 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
189 return qman_cb_dqrr_defer;
191 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
192 return qman_cb_dqrr_consume;
195 /* sg is embedded in an op ctx,
196 * sg[0] is for output
199 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
201 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
202 ctx->fd_status = fd->status;
203 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
204 struct qm_sg_entry *sg_out;
207 sg_out = &job->sg[0];
208 hw_sg_to_cpu(sg_out);
209 len = sg_out->length;
210 ctx->op->sym->m_src->pkt_len = len;
211 ctx->op->sym->m_src->data_len = len;
213 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
214 dpaa_sec_op_ending(ctx);
216 return qman_cb_dqrr_consume;
219 /* caam result is put into this queue */
221 dpaa_sec_init_tx(struct qman_fq *fq)
224 struct qm_mcc_initfq opts;
227 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
228 QMAN_FQ_FLAG_DYNAMIC_FQID;
230 ret = qman_create_fq(0, flags, fq);
232 PMD_INIT_LOG(ERR, "qman_create_fq failed");
236 memset(&opts, 0, sizeof(opts));
237 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
238 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
240 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
242 fq->cb.dqrr = dqrr_out_fq_cb_rx;
243 fq->cb.ern = ern_sec_fq_handler;
245 ret = qman_init_fq(fq, 0, &opts);
247 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
254 static inline int is_cipher_only(dpaa_sec_session *ses)
256 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
257 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
260 static inline int is_auth_only(dpaa_sec_session *ses)
262 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
263 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
266 static inline int is_aead(dpaa_sec_session *ses)
268 return ((ses->cipher_alg == 0) &&
269 (ses->auth_alg == 0) &&
270 (ses->aead_alg != 0));
273 static inline int is_auth_cipher(dpaa_sec_session *ses)
275 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
276 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
277 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
280 static inline int is_proto_ipsec(dpaa_sec_session *ses)
282 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
285 static inline int is_encode(dpaa_sec_session *ses)
287 return ses->dir == DIR_ENC;
290 static inline int is_decode(dpaa_sec_session *ses)
292 return ses->dir == DIR_DEC;
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
298 switch (ses->auth_alg) {
299 case RTE_CRYPTO_AUTH_NULL:
300 ses->digest_length = 0;
302 case RTE_CRYPTO_AUTH_MD5_HMAC:
304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA1_HMAC:
310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 case RTE_CRYPTO_AUTH_SHA224_HMAC:
316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320 case RTE_CRYPTO_AUTH_SHA256_HMAC:
322 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 alginfo_a->algmode = OP_ALG_AAI_HMAC;
326 case RTE_CRYPTO_AUTH_SHA384_HMAC:
328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 alginfo_a->algmode = OP_ALG_AAI_HMAC;
332 case RTE_CRYPTO_AUTH_SHA512_HMAC:
334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 alginfo_a->algmode = OP_ALG_AAI_HMAC;
339 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
346 switch (ses->cipher_alg) {
347 case RTE_CRYPTO_CIPHER_NULL:
349 case RTE_CRYPTO_CIPHER_AES_CBC:
351 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
353 alginfo_c->algmode = OP_ALG_AAI_CBC;
355 case RTE_CRYPTO_CIPHER_3DES_CBC:
357 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
359 alginfo_c->algmode = OP_ALG_AAI_CBC;
361 case RTE_CRYPTO_CIPHER_AES_CTR:
363 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
365 alginfo_c->algmode = OP_ALG_AAI_CTR;
368 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
373 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
375 switch (ses->aead_alg) {
376 case RTE_CRYPTO_AEAD_AES_GCM:
377 alginfo->algtype = OP_ALG_ALGSEL_AES;
378 alginfo->algmode = OP_ALG_AAI_GCM;
381 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
386 /* prepare command block of the session */
388 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
390 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
391 uint32_t shared_desc_len = 0;
392 struct sec_cdb *cdb = &ses->cdb;
394 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
400 memset(cdb, 0, sizeof(struct sec_cdb));
402 if (is_cipher_only(ses)) {
403 caam_cipher_alg(ses, &alginfo_c);
404 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 PMD_TX_LOG(ERR, "not supported cipher alg\n");
409 alginfo_c.key = (size_t)ses->cipher_key.data;
410 alginfo_c.keylen = ses->cipher_key.length;
411 alginfo_c.key_enc_flags = 0;
412 alginfo_c.key_type = RTA_DATA_IMM;
414 shared_desc_len = cnstr_shdsc_blkcipher(
420 } else if (is_auth_only(ses)) {
421 caam_auth_alg(ses, &alginfo_a);
422 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 PMD_TX_LOG(ERR, "not supported auth alg\n");
427 alginfo_a.key = (size_t)ses->auth_key.data;
428 alginfo_a.keylen = ses->auth_key.length;
429 alginfo_a.key_enc_flags = 0;
430 alginfo_a.key_type = RTA_DATA_IMM;
432 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
436 } else if (is_aead(ses)) {
437 caam_aead_alg(ses, &alginfo);
438 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 PMD_TX_LOG(ERR, "not supported aead alg\n");
442 alginfo.key = (size_t)ses->aead_key.data;
443 alginfo.keylen = ses->aead_key.length;
444 alginfo.key_enc_flags = 0;
445 alginfo.key_type = RTA_DATA_IMM;
447 if (ses->dir == DIR_ENC)
448 shared_desc_len = cnstr_shdsc_gcm_encap(
449 cdb->sh_desc, true, swap,
454 shared_desc_len = cnstr_shdsc_gcm_decap(
455 cdb->sh_desc, true, swap,
460 caam_cipher_alg(ses, &alginfo_c);
461 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
462 PMD_TX_LOG(ERR, "not supported cipher alg\n");
466 alginfo_c.key = (size_t)ses->cipher_key.data;
467 alginfo_c.keylen = ses->cipher_key.length;
468 alginfo_c.key_enc_flags = 0;
469 alginfo_c.key_type = RTA_DATA_IMM;
471 caam_auth_alg(ses, &alginfo_a);
472 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
473 PMD_TX_LOG(ERR, "not supported auth alg\n");
477 alginfo_a.key = (size_t)ses->auth_key.data;
478 alginfo_a.keylen = ses->auth_key.length;
479 alginfo_a.key_enc_flags = 0;
480 alginfo_a.key_type = RTA_DATA_IMM;
482 cdb->sh_desc[0] = alginfo_c.keylen;
483 cdb->sh_desc[1] = alginfo_a.keylen;
484 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
486 (unsigned int *)cdb->sh_desc,
487 &cdb->sh_desc[2], 2);
490 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
493 if (cdb->sh_desc[2] & 1)
494 alginfo_c.key_type = RTA_DATA_IMM;
496 alginfo_c.key = (size_t)dpaa_mem_vtop(
497 (void *)(size_t)alginfo_c.key);
498 alginfo_c.key_type = RTA_DATA_PTR;
500 if (cdb->sh_desc[2] & (1<<1))
501 alginfo_a.key_type = RTA_DATA_IMM;
503 alginfo_a.key = (size_t)dpaa_mem_vtop(
504 (void *)(size_t)alginfo_a.key);
505 alginfo_a.key_type = RTA_DATA_PTR;
510 if (is_proto_ipsec(ses)) {
511 if (ses->dir == DIR_ENC) {
512 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
514 true, swap, &ses->encap_pdb,
515 (uint8_t *)&ses->ip4_hdr,
516 &alginfo_c, &alginfo_a);
517 } else if (ses->dir == DIR_DEC) {
518 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
520 true, swap, &ses->decap_pdb,
521 &alginfo_c, &alginfo_a);
524 /* Auth_only_len is set as 0 here and it will be
525 * overwritten in fd for each packet.
527 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
528 true, swap, &alginfo_c, &alginfo_a,
530 ses->digest_length, ses->dir);
533 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
534 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
535 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
540 /* qp is lockless, should be accessed by only one thread */
542 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
545 unsigned int pkts = 0;
547 struct qm_dqrr_entry *dq;
550 ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
551 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
556 const struct qm_fd *fd;
557 struct dpaa_sec_job *job;
558 struct dpaa_sec_op_ctx *ctx;
559 struct rte_crypto_op *op;
561 dq = qman_dequeue(fq);
566 /* sg is embedded in an op ctx,
567 * sg[0] is for output
570 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
572 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
573 ctx->fd_status = fd->status;
575 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
576 struct qm_sg_entry *sg_out;
579 sg_out = &job->sg[0];
580 hw_sg_to_cpu(sg_out);
581 len = sg_out->length;
582 op->sym->m_src->pkt_len = len;
583 op->sym->m_src->data_len = len;
585 if (!ctx->fd_status) {
586 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
588 printf("\nSEC return err: 0x%x", ctx->fd_status);
589 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
593 /* report op status to sym->op and then free the ctx memeory */
594 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
596 qman_dqrr_consume(fq, dq);
597 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
602 static inline struct dpaa_sec_job *
603 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
605 struct rte_crypto_sym_op *sym = op->sym;
606 struct rte_mbuf *mbuf = sym->m_src;
607 struct dpaa_sec_job *cf;
608 struct dpaa_sec_op_ctx *ctx;
609 struct qm_sg_entry *sg, *out_sg, *in_sg;
610 phys_addr_t start_addr;
611 uint8_t *old_digest, extra_segs;
618 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
619 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
623 ctx = dpaa_sec_alloc_ctx(ses);
629 old_digest = ctx->digest;
633 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
634 out_sg->length = ses->digest_length;
635 cpu_to_hw_sg(out_sg);
639 /* need to extend the input to a compound frame */
640 in_sg->extension = 1;
642 in_sg->length = sym->auth.data.length;
643 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
647 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
648 sg->length = mbuf->data_len - sym->auth.data.offset;
649 sg->offset = sym->auth.data.offset;
651 /* Successive segs */
656 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
657 sg->length = mbuf->data_len;
661 if (is_decode(ses)) {
662 /* Digest verification case */
665 rte_memcpy(old_digest, sym->auth.digest.data,
667 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
668 qm_sg_entry_set64(sg, start_addr);
669 sg->length = ses->digest_length;
670 in_sg->length += ses->digest_length;
672 /* Digest calculation case */
673 sg->length -= ses->digest_length;
684 * |<----data_len------->|
685 * |ip_header|ah_header|icv|payload|
690 static inline struct dpaa_sec_job *
691 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
693 struct rte_crypto_sym_op *sym = op->sym;
694 struct rte_mbuf *mbuf = sym->m_src;
695 struct dpaa_sec_job *cf;
696 struct dpaa_sec_op_ctx *ctx;
697 struct qm_sg_entry *sg;
698 rte_iova_t start_addr;
701 ctx = dpaa_sec_alloc_ctx(ses);
707 old_digest = ctx->digest;
709 start_addr = rte_pktmbuf_iova(mbuf);
712 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
713 sg->length = ses->digest_length;
718 if (is_decode(ses)) {
719 /* need to extend the input to a compound frame */
721 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
722 sg->length = sym->auth.data.length + ses->digest_length;
727 /* hash result or digest, save digest first */
728 rte_memcpy(old_digest, sym->auth.digest.data,
730 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
731 sg->length = sym->auth.data.length;
734 /* let's check digest by hw */
735 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
737 qm_sg_entry_set64(sg, start_addr);
738 sg->length = ses->digest_length;
742 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
743 sg->length = sym->auth.data.length;
751 static inline struct dpaa_sec_job *
752 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
754 struct rte_crypto_sym_op *sym = op->sym;
755 struct dpaa_sec_job *cf;
756 struct dpaa_sec_op_ctx *ctx;
757 struct qm_sg_entry *sg, *out_sg, *in_sg;
758 struct rte_mbuf *mbuf;
760 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
765 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
768 req_segs = mbuf->nb_segs * 2 + 3;
771 if (req_segs > MAX_SG_ENTRIES) {
772 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
777 ctx = dpaa_sec_alloc_ctx(ses);
786 out_sg->extension = 1;
787 out_sg->length = sym->cipher.data.length;
788 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
789 cpu_to_hw_sg(out_sg);
793 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
794 sg->length = mbuf->data_len - sym->cipher.data.offset;
795 sg->offset = sym->cipher.data.offset;
797 /* Successive segs */
802 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
803 sg->length = mbuf->data_len;
812 in_sg->extension = 1;
814 in_sg->length = sym->cipher.data.length + ses->iv.length;
817 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
821 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
822 sg->length = ses->iv.length;
827 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
828 sg->length = mbuf->data_len - sym->cipher.data.offset;
829 sg->offset = sym->cipher.data.offset;
831 /* Successive segs */
836 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
837 sg->length = mbuf->data_len;
846 static inline struct dpaa_sec_job *
847 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
849 struct rte_crypto_sym_op *sym = op->sym;
850 struct dpaa_sec_job *cf;
851 struct dpaa_sec_op_ctx *ctx;
852 struct qm_sg_entry *sg;
853 rte_iova_t src_start_addr, dst_start_addr;
854 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
857 ctx = dpaa_sec_alloc_ctx(ses);
864 src_start_addr = rte_pktmbuf_iova(sym->m_src);
867 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
869 dst_start_addr = src_start_addr;
873 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
874 sg->length = sym->cipher.data.length + ses->iv.length;
880 /* need to extend the input to a compound frame */
883 sg->length = sym->cipher.data.length + ses->iv.length;
884 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
888 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
889 sg->length = ses->iv.length;
893 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
894 sg->length = sym->cipher.data.length;
901 static inline struct dpaa_sec_job *
902 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
904 struct rte_crypto_sym_op *sym = op->sym;
905 struct dpaa_sec_job *cf;
906 struct dpaa_sec_op_ctx *ctx;
907 struct qm_sg_entry *sg, *out_sg, *in_sg;
908 struct rte_mbuf *mbuf;
910 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
915 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
918 req_segs = mbuf->nb_segs * 2 + 4;
921 if (ses->auth_only_len)
924 if (req_segs > MAX_SG_ENTRIES) {
925 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
930 ctx = dpaa_sec_alloc_ctx(ses);
937 rte_prefetch0(cf->sg);
941 out_sg->extension = 1;
943 out_sg->length = sym->aead.data.length + ses->auth_only_len
944 + ses->digest_length;
946 out_sg->length = sym->aead.data.length + ses->auth_only_len;
948 /* output sg entries */
950 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
951 cpu_to_hw_sg(out_sg);
954 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
955 sg->length = mbuf->data_len - sym->aead.data.offset +
957 sg->offset = sym->aead.data.offset - ses->auth_only_len;
959 /* Successive segs */
964 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
965 sg->length = mbuf->data_len;
968 sg->length -= ses->digest_length;
970 if (is_encode(ses)) {
972 /* set auth output */
974 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
975 sg->length = ses->digest_length;
983 in_sg->extension = 1;
986 in_sg->length = ses->iv.length + sym->aead.data.length
987 + ses->auth_only_len;
989 in_sg->length = ses->iv.length + sym->aead.data.length
990 + ses->auth_only_len + ses->digest_length;
992 /* input sg entries */
994 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
998 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
999 sg->length = ses->iv.length;
1002 /* 2nd seg auth only */
1003 if (ses->auth_only_len) {
1005 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1006 sg->length = ses->auth_only_len;
1012 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1013 sg->length = mbuf->data_len - sym->aead.data.offset;
1014 sg->offset = sym->aead.data.offset;
1016 /* Successive segs */
1021 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1022 sg->length = mbuf->data_len;
1026 if (is_decode(ses)) {
1029 memcpy(ctx->digest, sym->aead.digest.data,
1030 ses->digest_length);
1031 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1032 sg->length = ses->digest_length;
1040 static inline struct dpaa_sec_job *
1041 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1043 struct rte_crypto_sym_op *sym = op->sym;
1044 struct dpaa_sec_job *cf;
1045 struct dpaa_sec_op_ctx *ctx;
1046 struct qm_sg_entry *sg;
1047 uint32_t length = 0;
1048 rte_iova_t src_start_addr, dst_start_addr;
1049 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1052 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1055 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1057 dst_start_addr = src_start_addr;
1059 ctx = dpaa_sec_alloc_ctx(ses);
1067 rte_prefetch0(cf->sg);
1069 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1070 if (is_encode(ses)) {
1071 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1072 sg->length = ses->iv.length;
1073 length += sg->length;
1077 if (ses->auth_only_len) {
1078 qm_sg_entry_set64(sg,
1079 dpaa_mem_vtop(sym->aead.aad.data));
1080 sg->length = ses->auth_only_len;
1081 length += sg->length;
1085 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1086 sg->length = sym->aead.data.length;
1087 length += sg->length;
1091 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1092 sg->length = ses->iv.length;
1093 length += sg->length;
1097 if (ses->auth_only_len) {
1098 qm_sg_entry_set64(sg,
1099 dpaa_mem_vtop(sym->aead.aad.data));
1100 sg->length = ses->auth_only_len;
1101 length += sg->length;
1105 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1106 sg->length = sym->aead.data.length;
1107 length += sg->length;
1110 memcpy(ctx->digest, sym->aead.digest.data,
1111 ses->digest_length);
1114 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1115 sg->length = ses->digest_length;
1116 length += sg->length;
1120 /* input compound frame */
1121 cf->sg[1].length = length;
1122 cf->sg[1].extension = 1;
1123 cf->sg[1].final = 1;
1124 cpu_to_hw_sg(&cf->sg[1]);
1128 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1129 qm_sg_entry_set64(sg,
1130 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1131 sg->length = sym->aead.data.length + ses->auth_only_len;
1132 length = sg->length;
1133 if (is_encode(ses)) {
1135 /* set auth output */
1137 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1138 sg->length = ses->digest_length;
1139 length += sg->length;
1144 /* output compound frame */
1145 cf->sg[0].length = length;
1146 cf->sg[0].extension = 1;
1147 cpu_to_hw_sg(&cf->sg[0]);
1152 static inline struct dpaa_sec_job *
1153 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1155 struct rte_crypto_sym_op *sym = op->sym;
1156 struct dpaa_sec_job *cf;
1157 struct dpaa_sec_op_ctx *ctx;
1158 struct qm_sg_entry *sg, *out_sg, *in_sg;
1159 struct rte_mbuf *mbuf;
1161 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1166 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1169 req_segs = mbuf->nb_segs * 2 + 4;
1172 if (req_segs > MAX_SG_ENTRIES) {
1173 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1178 ctx = dpaa_sec_alloc_ctx(ses);
1185 rte_prefetch0(cf->sg);
1188 out_sg = &cf->sg[0];
1189 out_sg->extension = 1;
1191 out_sg->length = sym->auth.data.length + ses->digest_length;
1193 out_sg->length = sym->auth.data.length;
1195 /* output sg entries */
1197 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1198 cpu_to_hw_sg(out_sg);
1201 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1202 sg->length = mbuf->data_len - sym->auth.data.offset;
1203 sg->offset = sym->auth.data.offset;
1205 /* Successive segs */
1210 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1211 sg->length = mbuf->data_len;
1214 sg->length -= ses->digest_length;
1216 if (is_encode(ses)) {
1218 /* set auth output */
1220 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1221 sg->length = ses->digest_length;
1229 in_sg->extension = 1;
1232 in_sg->length = ses->iv.length + sym->auth.data.length;
1234 in_sg->length = ses->iv.length + sym->auth.data.length
1235 + ses->digest_length;
1237 /* input sg entries */
1239 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1240 cpu_to_hw_sg(in_sg);
1243 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1244 sg->length = ses->iv.length;
1249 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1250 sg->length = mbuf->data_len - sym->auth.data.offset;
1251 sg->offset = sym->auth.data.offset;
1253 /* Successive segs */
1258 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1259 sg->length = mbuf->data_len;
1263 sg->length -= ses->digest_length;
1264 if (is_decode(ses)) {
1267 memcpy(ctx->digest, sym->auth.digest.data,
1268 ses->digest_length);
1269 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1270 sg->length = ses->digest_length;
1278 static inline struct dpaa_sec_job *
1279 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1281 struct rte_crypto_sym_op *sym = op->sym;
1282 struct dpaa_sec_job *cf;
1283 struct dpaa_sec_op_ctx *ctx;
1284 struct qm_sg_entry *sg;
1285 rte_iova_t src_start_addr, dst_start_addr;
1286 uint32_t length = 0;
1287 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1290 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1292 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1294 dst_start_addr = src_start_addr;
1296 ctx = dpaa_sec_alloc_ctx(ses);
1304 rte_prefetch0(cf->sg);
1306 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1307 if (is_encode(ses)) {
1308 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1309 sg->length = ses->iv.length;
1310 length += sg->length;
1314 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1315 sg->length = sym->auth.data.length;
1316 length += sg->length;
1320 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1321 sg->length = ses->iv.length;
1322 length += sg->length;
1327 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1328 sg->length = sym->auth.data.length;
1329 length += sg->length;
1332 memcpy(ctx->digest, sym->auth.digest.data,
1333 ses->digest_length);
1336 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1337 sg->length = ses->digest_length;
1338 length += sg->length;
1342 /* input compound frame */
1343 cf->sg[1].length = length;
1344 cf->sg[1].extension = 1;
1345 cf->sg[1].final = 1;
1346 cpu_to_hw_sg(&cf->sg[1]);
1350 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1351 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1352 sg->length = sym->cipher.data.length;
1353 length = sg->length;
1354 if (is_encode(ses)) {
1356 /* set auth output */
1358 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1359 sg->length = ses->digest_length;
1360 length += sg->length;
1365 /* output compound frame */
1366 cf->sg[0].length = length;
1367 cf->sg[0].extension = 1;
1368 cpu_to_hw_sg(&cf->sg[0]);
1373 static inline struct dpaa_sec_job *
1374 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1376 struct rte_crypto_sym_op *sym = op->sym;
1377 struct dpaa_sec_job *cf;
1378 struct dpaa_sec_op_ctx *ctx;
1379 struct qm_sg_entry *sg;
1380 phys_addr_t src_start_addr, dst_start_addr;
1382 ctx = dpaa_sec_alloc_ctx(ses);
1388 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1391 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1393 dst_start_addr = src_start_addr;
1397 qm_sg_entry_set64(sg, src_start_addr);
1398 sg->length = sym->m_src->pkt_len;
1402 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1405 qm_sg_entry_set64(sg, dst_start_addr);
1406 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1413 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1416 /* Function to transmit the frames to given device and queuepair */
1418 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1419 uint16_t num_tx = 0;
1420 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1421 uint32_t frames_to_send;
1422 struct rte_crypto_op *op;
1423 struct dpaa_sec_job *cf;
1424 dpaa_sec_session *ses;
1425 struct dpaa_sec_op_ctx *ctx;
1426 uint32_t auth_only_len;
1427 struct qman_fq *inq[DPAA_SEC_BURST];
1430 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1431 DPAA_SEC_BURST : nb_ops;
1432 for (loop = 0; loop < frames_to_send; loop++) {
1434 switch (op->sess_type) {
1435 case RTE_CRYPTO_OP_WITH_SESSION:
1436 ses = (dpaa_sec_session *)
1437 get_session_private_data(
1439 cryptodev_driver_id);
1441 case RTE_CRYPTO_OP_SECURITY_SESSION:
1442 ses = (dpaa_sec_session *)
1443 get_sec_session_private_data(
1444 op->sym->sec_session);
1448 "sessionless crypto op not supported");
1449 frames_to_send = loop;
1453 if (unlikely(!ses->qp || ses->qp != qp)) {
1454 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1456 if (dpaa_sec_attach_sess_q(qp, ses)) {
1457 frames_to_send = loop;
1463 auth_only_len = op->sym->auth.data.length -
1464 op->sym->cipher.data.length;
1465 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1466 if (is_auth_only(ses)) {
1467 cf = build_auth_only(op, ses);
1468 } else if (is_cipher_only(ses)) {
1469 cf = build_cipher_only(op, ses);
1470 } else if (is_aead(ses)) {
1471 cf = build_cipher_auth_gcm(op, ses);
1472 auth_only_len = ses->auth_only_len;
1473 } else if (is_auth_cipher(ses)) {
1474 cf = build_cipher_auth(op, ses);
1475 } else if (is_proto_ipsec(ses)) {
1476 cf = build_proto(op, ses);
1478 PMD_TX_LOG(ERR, "not supported sec op");
1479 frames_to_send = loop;
1484 if (is_auth_only(ses)) {
1485 cf = build_auth_only_sg(op, ses);
1486 } else if (is_cipher_only(ses)) {
1487 cf = build_cipher_only_sg(op, ses);
1488 } else if (is_aead(ses)) {
1489 cf = build_cipher_auth_gcm_sg(op, ses);
1490 auth_only_len = ses->auth_only_len;
1491 } else if (is_auth_cipher(ses)) {
1492 cf = build_cipher_auth_sg(op, ses);
1494 PMD_TX_LOG(ERR, "not supported sec op");
1495 frames_to_send = loop;
1500 if (unlikely(!cf)) {
1501 frames_to_send = loop;
1507 inq[loop] = ses->inq;
1508 fd->opaque_addr = 0;
1510 ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1511 qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1512 fd->_format1 = qm_fd_compound;
1513 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1514 /* Auth_only_len is set as 0 in descriptor and it is
1515 * overwritten here in the fd.cmd which will update
1519 fd->cmd = 0x80000000 | auth_only_len;
1524 while (loop < frames_to_send) {
1525 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1526 frames_to_send - loop);
1528 nb_ops -= frames_to_send;
1529 num_tx += frames_to_send;
1532 dpaa_qp->tx_pkts += num_tx;
1533 dpaa_qp->tx_errs += nb_ops - num_tx;
1539 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1543 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1545 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1547 dpaa_qp->rx_pkts += num_rx;
1548 dpaa_qp->rx_errs += nb_ops - num_rx;
1550 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1555 /** Release queue pair */
1557 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1560 struct dpaa_sec_dev_private *internals;
1561 struct dpaa_sec_qp *qp = NULL;
1563 PMD_INIT_FUNC_TRACE();
1565 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1567 internals = dev->data->dev_private;
1568 if (qp_id >= internals->max_nb_queue_pairs) {
1569 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1570 internals->max_nb_queue_pairs);
1574 qp = &internals->qps[qp_id];
1575 qp->internals = NULL;
1576 dev->data->queue_pairs[qp_id] = NULL;
1581 /** Setup a queue pair */
1583 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1584 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1585 __rte_unused int socket_id,
1586 __rte_unused struct rte_mempool *session_pool)
1588 struct dpaa_sec_dev_private *internals;
1589 struct dpaa_sec_qp *qp = NULL;
1591 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1592 dev, qp_id, qp_conf);
1594 internals = dev->data->dev_private;
1595 if (qp_id >= internals->max_nb_queue_pairs) {
1596 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1597 internals->max_nb_queue_pairs);
1601 qp = &internals->qps[qp_id];
1602 qp->internals = internals;
1603 dev->data->queue_pairs[qp_id] = qp;
1608 /** Start queue pair */
1610 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1611 __rte_unused uint16_t queue_pair_id)
1613 PMD_INIT_FUNC_TRACE();
1618 /** Stop queue pair */
1620 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1621 __rte_unused uint16_t queue_pair_id)
1623 PMD_INIT_FUNC_TRACE();
1628 /** Return the number of allocated queue pairs */
1630 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1632 PMD_INIT_FUNC_TRACE();
1634 return dev->data->nb_queue_pairs;
1637 /** Returns the size of session structure */
1639 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1641 PMD_INIT_FUNC_TRACE();
1643 return sizeof(dpaa_sec_session);
1647 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1648 struct rte_crypto_sym_xform *xform,
1649 dpaa_sec_session *session)
1651 session->cipher_alg = xform->cipher.algo;
1652 session->iv.length = xform->cipher.iv.length;
1653 session->iv.offset = xform->cipher.iv.offset;
1654 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1655 RTE_CACHE_LINE_SIZE);
1656 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1657 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1660 session->cipher_key.length = xform->cipher.key.length;
1662 memcpy(session->cipher_key.data, xform->cipher.key.data,
1663 xform->cipher.key.length);
1664 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1671 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1672 struct rte_crypto_sym_xform *xform,
1673 dpaa_sec_session *session)
1675 session->auth_alg = xform->auth.algo;
1676 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1677 RTE_CACHE_LINE_SIZE);
1678 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1679 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1682 session->auth_key.length = xform->auth.key.length;
1683 session->digest_length = xform->auth.digest_length;
1685 memcpy(session->auth_key.data, xform->auth.key.data,
1686 xform->auth.key.length);
1687 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1694 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1695 struct rte_crypto_sym_xform *xform,
1696 dpaa_sec_session *session)
1698 session->aead_alg = xform->aead.algo;
1699 session->iv.length = xform->aead.iv.length;
1700 session->iv.offset = xform->aead.iv.offset;
1701 session->auth_only_len = xform->aead.aad_length;
1702 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1703 RTE_CACHE_LINE_SIZE);
1704 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1705 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1708 session->aead_key.length = xform->aead.key.length;
1709 session->digest_length = xform->aead.digest_length;
1711 memcpy(session->aead_key.data, xform->aead.key.data,
1712 xform->aead.key.length);
1713 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1719 static struct qman_fq *
1720 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1724 for (i = 0; i < qi->max_nb_sessions; i++) {
1725 if (qi->inq_attach[i] == 0) {
1726 qi->inq_attach[i] = 1;
1730 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1736 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1740 for (i = 0; i < qi->max_nb_sessions; i++) {
1741 if (&qi->inq[i] == fq) {
1742 qman_retire_fq(fq, NULL);
1744 qi->inq_attach[i] = 0;
1752 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1757 ret = dpaa_sec_prep_cdb(sess);
1759 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1763 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1764 qman_fq_fqid(&qp->outq));
1766 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1772 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1773 uint16_t qp_id __rte_unused,
1774 void *ses __rte_unused)
1776 PMD_INIT_FUNC_TRACE();
1781 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1782 uint16_t qp_id __rte_unused,
1785 dpaa_sec_session *sess = ses;
1786 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1788 PMD_INIT_FUNC_TRACE();
1791 dpaa_sec_detach_rxq(qi, sess->inq);
1800 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1801 struct rte_crypto_sym_xform *xform, void *sess)
1803 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1804 dpaa_sec_session *session = sess;
1806 PMD_INIT_FUNC_TRACE();
1808 if (unlikely(sess == NULL)) {
1809 RTE_LOG(ERR, PMD, "invalid session struct\n");
1813 /* Default IV length = 0 */
1814 session->iv.length = 0;
1817 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1818 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1819 dpaa_sec_cipher_init(dev, xform, session);
1821 /* Authentication Only */
1822 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1823 xform->next == NULL) {
1824 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1825 dpaa_sec_auth_init(dev, xform, session);
1827 /* Cipher then Authenticate */
1828 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1829 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1830 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1831 dpaa_sec_cipher_init(dev, xform, session);
1832 dpaa_sec_auth_init(dev, xform->next, session);
1834 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1838 /* Authenticate then Cipher */
1839 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1840 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1841 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1842 dpaa_sec_auth_init(dev, xform, session);
1843 dpaa_sec_cipher_init(dev, xform->next, session);
1845 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1849 /* AEAD operation for AES-GCM kind of Algorithms */
1850 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1851 xform->next == NULL) {
1852 dpaa_sec_aead_init(dev, xform, session);
1855 PMD_DRV_LOG(ERR, "Invalid crypto type");
1858 session->ctx_pool = internals->ctx_pool;
1859 session->inq = dpaa_sec_attach_rxq(internals);
1860 if (session->inq == NULL) {
1861 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1868 rte_free(session->cipher_key.data);
1869 rte_free(session->auth_key.data);
1870 memset(session, 0, sizeof(dpaa_sec_session));
1876 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1877 struct rte_crypto_sym_xform *xform,
1878 struct rte_cryptodev_sym_session *sess,
1879 struct rte_mempool *mempool)
1881 void *sess_private_data;
1884 PMD_INIT_FUNC_TRACE();
1886 if (rte_mempool_get(mempool, &sess_private_data)) {
1888 "Couldn't get object from session mempool");
1892 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1894 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1895 "session parameters");
1897 /* Return session to mempool */
1898 rte_mempool_put(mempool, sess_private_data);
1902 set_session_private_data(sess, dev->driver_id,
1909 /** Clear the memory of session so it doesn't leave key material behind */
1911 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1912 struct rte_cryptodev_sym_session *sess)
1914 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1915 uint8_t index = dev->driver_id;
1916 void *sess_priv = get_session_private_data(sess, index);
1918 PMD_INIT_FUNC_TRACE();
1920 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1923 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1926 dpaa_sec_detach_rxq(qi, s->inq);
1927 rte_free(s->cipher_key.data);
1928 rte_free(s->auth_key.data);
1929 memset(s, 0, sizeof(dpaa_sec_session));
1930 set_session_private_data(sess, index, NULL);
1931 rte_mempool_put(sess_mp, sess_priv);
1936 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1937 struct rte_security_session_conf *conf,
1940 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1941 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1942 struct rte_crypto_auth_xform *auth_xform;
1943 struct rte_crypto_cipher_xform *cipher_xform;
1944 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1946 PMD_INIT_FUNC_TRACE();
1948 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1949 cipher_xform = &conf->crypto_xform->cipher;
1950 auth_xform = &conf->crypto_xform->next->auth;
1952 auth_xform = &conf->crypto_xform->auth;
1953 cipher_xform = &conf->crypto_xform->next->cipher;
1955 session->proto_alg = conf->protocol;
1956 session->cipher_key.data = rte_zmalloc(NULL,
1957 cipher_xform->key.length,
1958 RTE_CACHE_LINE_SIZE);
1959 if (session->cipher_key.data == NULL &&
1960 cipher_xform->key.length > 0) {
1961 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1965 session->cipher_key.length = cipher_xform->key.length;
1966 session->auth_key.data = rte_zmalloc(NULL,
1967 auth_xform->key.length,
1968 RTE_CACHE_LINE_SIZE);
1969 if (session->auth_key.data == NULL &&
1970 auth_xform->key.length > 0) {
1971 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1972 rte_free(session->cipher_key.data);
1975 session->auth_key.length = auth_xform->key.length;
1976 memcpy(session->cipher_key.data, cipher_xform->key.data,
1977 cipher_xform->key.length);
1978 memcpy(session->auth_key.data, auth_xform->key.data,
1979 auth_xform->key.length);
1981 switch (auth_xform->algo) {
1982 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1983 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1985 case RTE_CRYPTO_AUTH_MD5_HMAC:
1986 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1988 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1989 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1991 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1992 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1994 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1995 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1997 case RTE_CRYPTO_AUTH_AES_CMAC:
1998 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2000 case RTE_CRYPTO_AUTH_NULL:
2001 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2003 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2004 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2005 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2006 case RTE_CRYPTO_AUTH_SHA1:
2007 case RTE_CRYPTO_AUTH_SHA256:
2008 case RTE_CRYPTO_AUTH_SHA512:
2009 case RTE_CRYPTO_AUTH_SHA224:
2010 case RTE_CRYPTO_AUTH_SHA384:
2011 case RTE_CRYPTO_AUTH_MD5:
2012 case RTE_CRYPTO_AUTH_AES_GMAC:
2013 case RTE_CRYPTO_AUTH_KASUMI_F9:
2014 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2015 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2016 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2020 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2025 switch (cipher_xform->algo) {
2026 case RTE_CRYPTO_CIPHER_AES_CBC:
2027 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2029 case RTE_CRYPTO_CIPHER_3DES_CBC:
2030 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2032 case RTE_CRYPTO_CIPHER_AES_CTR:
2033 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2035 case RTE_CRYPTO_CIPHER_NULL:
2036 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2037 case RTE_CRYPTO_CIPHER_3DES_ECB:
2038 case RTE_CRYPTO_CIPHER_AES_ECB:
2039 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2040 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2041 cipher_xform->algo);
2044 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2045 cipher_xform->algo);
2049 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2050 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2051 sizeof(session->ip4_hdr));
2052 session->ip4_hdr.ip_v = IPVERSION;
2053 session->ip4_hdr.ip_hl = 5;
2054 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2055 sizeof(session->ip4_hdr));
2056 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2057 session->ip4_hdr.ip_id = 0;
2058 session->ip4_hdr.ip_off = 0;
2059 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2060 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2061 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2063 session->ip4_hdr.ip_sum = 0;
2064 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2065 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2066 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2067 (void *)&session->ip4_hdr,
2070 session->encap_pdb.options =
2071 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2072 PDBOPTS_ESP_OIHI_PDB_INL |
2074 PDBHMO_ESP_ENCAP_DTTL;
2075 session->encap_pdb.spi = ipsec_xform->spi;
2076 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2078 session->dir = DIR_ENC;
2079 } else if (ipsec_xform->direction ==
2080 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2081 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2082 session->decap_pdb.options = sizeof(struct ip) << 16;
2083 session->dir = DIR_DEC;
2086 session->ctx_pool = internals->ctx_pool;
2087 session->inq = dpaa_sec_attach_rxq(internals);
2088 if (session->inq == NULL) {
2089 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2096 rte_free(session->auth_key.data);
2097 rte_free(session->cipher_key.data);
2098 memset(session, 0, sizeof(dpaa_sec_session));
2103 dpaa_sec_security_session_create(void *dev,
2104 struct rte_security_session_conf *conf,
2105 struct rte_security_session *sess,
2106 struct rte_mempool *mempool)
2108 void *sess_private_data;
2109 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2112 if (rte_mempool_get(mempool, &sess_private_data)) {
2114 "Couldn't get object from session mempool");
2118 switch (conf->protocol) {
2119 case RTE_SECURITY_PROTOCOL_IPSEC:
2120 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2123 case RTE_SECURITY_PROTOCOL_MACSEC:
2130 "DPAA2 PMD: failed to configure session parameters");
2132 /* Return session to mempool */
2133 rte_mempool_put(mempool, sess_private_data);
2137 set_sec_session_private_data(sess, sess_private_data);
2142 /** Clear the memory of session so it doesn't leave key material behind */
2144 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2145 struct rte_security_session *sess)
2147 PMD_INIT_FUNC_TRACE();
2148 void *sess_priv = get_sec_session_private_data(sess);
2150 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2153 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2155 rte_free(s->cipher_key.data);
2156 rte_free(s->auth_key.data);
2157 memset(sess, 0, sizeof(dpaa_sec_session));
2158 set_sec_session_private_data(sess, NULL);
2159 rte_mempool_put(sess_mp, sess_priv);
2166 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2167 struct rte_cryptodev_config *config __rte_unused)
2169 PMD_INIT_FUNC_TRACE();
2175 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2177 PMD_INIT_FUNC_TRACE();
2182 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2184 PMD_INIT_FUNC_TRACE();
2188 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2190 PMD_INIT_FUNC_TRACE();
2195 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2196 struct rte_cryptodev_info *info)
2198 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2200 PMD_INIT_FUNC_TRACE();
2202 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2203 info->feature_flags = dev->feature_flags;
2204 info->capabilities = dpaa_sec_capabilities;
2205 info->sym.max_nb_sessions = internals->max_nb_sessions;
2206 info->sym.max_nb_sessions_per_qp =
2207 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2208 RTE_DPAA_MAX_NB_SEC_QPS;
2209 info->driver_id = cryptodev_driver_id;
2213 static struct rte_cryptodev_ops crypto_ops = {
2214 .dev_configure = dpaa_sec_dev_configure,
2215 .dev_start = dpaa_sec_dev_start,
2216 .dev_stop = dpaa_sec_dev_stop,
2217 .dev_close = dpaa_sec_dev_close,
2218 .dev_infos_get = dpaa_sec_dev_infos_get,
2219 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2220 .queue_pair_release = dpaa_sec_queue_pair_release,
2221 .queue_pair_start = dpaa_sec_queue_pair_start,
2222 .queue_pair_stop = dpaa_sec_queue_pair_stop,
2223 .queue_pair_count = dpaa_sec_queue_pair_count,
2224 .session_get_size = dpaa_sec_session_get_size,
2225 .session_configure = dpaa_sec_session_configure,
2226 .session_clear = dpaa_sec_session_clear,
2227 .qp_attach_session = dpaa_sec_qp_attach_sess,
2228 .qp_detach_session = dpaa_sec_qp_detach_sess,
2231 static const struct rte_security_capability *
2232 dpaa_sec_capabilities_get(void *device __rte_unused)
2234 return dpaa_sec_security_cap;
2237 struct rte_security_ops dpaa_sec_security_ops = {
2238 .session_create = dpaa_sec_security_session_create,
2239 .session_update = NULL,
2240 .session_stats_get = NULL,
2241 .session_destroy = dpaa_sec_security_session_destroy,
2242 .set_pkt_metadata = NULL,
2243 .capabilities_get = dpaa_sec_capabilities_get
2247 dpaa_sec_uninit(struct rte_cryptodev *dev)
2249 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2254 rte_free(dev->security_ctx);
2256 rte_mempool_free(internals->ctx_pool);
2257 rte_free(internals);
2259 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2260 dev->data->name, rte_socket_id());
2266 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2268 struct dpaa_sec_dev_private *internals;
2269 struct rte_security_ctx *security_instance;
2270 struct dpaa_sec_qp *qp;
2275 PMD_INIT_FUNC_TRACE();
2277 cryptodev->driver_id = cryptodev_driver_id;
2278 cryptodev->dev_ops = &crypto_ops;
2280 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2281 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2282 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2283 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2284 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2285 RTE_CRYPTODEV_FF_SECURITY |
2286 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2288 internals = cryptodev->data->dev_private;
2289 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2290 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2293 * For secondary processes, we don't initialise any further as primary
2294 * has already done this work. Only check we don't need a different
2297 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2298 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2302 /* Initialize security_ctx only for primary process*/
2303 security_instance = rte_malloc("rte_security_instances_ops",
2304 sizeof(struct rte_security_ctx), 0);
2305 if (security_instance == NULL)
2307 security_instance->device = (void *)cryptodev;
2308 security_instance->ops = &dpaa_sec_security_ops;
2309 security_instance->sess_cnt = 0;
2310 cryptodev->security_ctx = security_instance;
2312 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2313 /* init qman fq for queue pair */
2314 qp = &internals->qps[i];
2315 ret = dpaa_sec_init_tx(&qp->outq);
2317 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
2322 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2323 QMAN_FQ_FLAG_TO_DCPORTAL;
2324 for (i = 0; i < internals->max_nb_sessions; i++) {
2325 /* create rx qman fq for sessions*/
2326 ret = qman_create_fq(0, flags, &internals->inq[i]);
2327 if (unlikely(ret != 0)) {
2328 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2333 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2334 internals->ctx_pool = rte_mempool_create((const char *)str,
2337 CTX_POOL_CACHE_SIZE, 0,
2338 NULL, NULL, NULL, NULL,
2340 if (!internals->ctx_pool) {
2341 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2345 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2349 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2351 dpaa_sec_uninit(cryptodev);
2356 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2357 struct rte_dpaa_device *dpaa_dev)
2359 struct rte_cryptodev *cryptodev;
2360 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2364 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2366 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2367 if (cryptodev == NULL)
2370 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2371 cryptodev->data->dev_private = rte_zmalloc_socket(
2372 "cryptodev private structure",
2373 sizeof(struct dpaa_sec_dev_private),
2374 RTE_CACHE_LINE_SIZE,
2377 if (cryptodev->data->dev_private == NULL)
2378 rte_panic("Cannot allocate memzone for private "
2382 dpaa_dev->crypto_dev = cryptodev;
2383 cryptodev->device = &dpaa_dev->device;
2384 cryptodev->device->driver = &dpaa_drv->driver;
2386 /* init user callbacks */
2387 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2389 /* if sec device version is not configured */
2390 if (!rta_get_sec_era()) {
2391 const struct device_node *caam_node;
2393 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2394 const uint32_t *prop = of_get_property(caam_node,
2399 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2405 /* Invoke PMD device initialization function */
2406 retval = dpaa_sec_dev_init(cryptodev);
2410 /* In case of error, cleanup is done */
2411 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2412 rte_free(cryptodev->data->dev_private);
2414 rte_cryptodev_pmd_release_device(cryptodev);
2420 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2422 struct rte_cryptodev *cryptodev;
2425 cryptodev = dpaa_dev->crypto_dev;
2426 if (cryptodev == NULL)
2429 ret = dpaa_sec_uninit(cryptodev);
2433 return rte_cryptodev_pmd_destroy(cryptodev);
2436 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2437 .drv_type = FSL_DPAA_CRYPTO,
2439 .name = "DPAA SEC PMD"
2441 .probe = cryptodev_dpaa_sec_probe,
2442 .remove = cryptodev_dpaa_sec_remove,
2445 static struct cryptodev_driver dpaa_sec_crypto_drv;
2447 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2448 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
2449 cryptodev_driver_id);