1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
44 enum rta_sec_era rta_sec_era;
48 static uint8_t cryptodev_driver_id;
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
59 if (!ctx->fd_status) {
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
62 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(
74 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 dcbz_64(&ctx->job.sg[i]);
89 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
109 dpaa_mem_ptov(rte_iova_t paddr)
113 va = (void *)dpaax_iova_table_get_va(paddr);
117 return rte_mem_iova2virt(paddr);
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
123 const struct qm_mr_entry *msg)
125 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 /* initialize the queue with dest chan as caam chan so that
130 * all the packets in this queue could be dispatched into caam
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 struct qm_mcc_initfq fq_opts;
140 /* Clear FQ options */
141 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
143 flags = QMAN_INITFQ_FLAG_SCHED;
144 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 QM_INITFQ_WE_CONTEXTB;
147 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 fq_opts.fqd.context_b = fqid_out;
149 fq_opts.fqd.dest.channel = qm_channel_caam;
150 fq_opts.fqd.dest.wq = 0;
152 fq_in->cb.ern = ern_sec_fq_handler;
154 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
156 ret = qman_init_fq(fq_in, flags, &fq_opts);
157 if (unlikely(ret != 0))
158 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 struct qman_fq *fq __always_unused,
167 const struct qm_dqrr_entry *dqrr)
169 const struct qm_fd *fd;
170 struct dpaa_sec_job *job;
171 struct dpaa_sec_op_ctx *ctx;
173 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 return qman_cb_dqrr_defer;
176 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 return qman_cb_dqrr_consume;
180 /* sg is embedded in an op ctx,
181 * sg[0] is for output
184 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
186 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 ctx->fd_status = fd->status;
188 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 struct qm_sg_entry *sg_out;
191 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 ctx->op->sym->m_src : ctx->op->sym->m_dst;
194 sg_out = &job->sg[0];
195 hw_sg_to_cpu(sg_out);
196 len = sg_out->length;
198 while (mbuf->next != NULL) {
199 len -= mbuf->data_len;
202 mbuf->data_len = len;
204 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 dpaa_sec_op_ending(ctx);
207 return qman_cb_dqrr_consume;
210 /* caam result is put into this queue */
212 dpaa_sec_init_tx(struct qman_fq *fq)
215 struct qm_mcc_initfq opts;
218 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 QMAN_FQ_FLAG_DYNAMIC_FQID;
221 ret = qman_create_fq(0, flags, fq);
223 DPAA_SEC_ERR("qman_create_fq failed");
227 memset(&opts, 0, sizeof(opts));
228 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233 fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 fq->cb.ern = ern_sec_fq_handler;
236 ret = qman_init_fq(fq, 0, &opts);
238 DPAA_SEC_ERR("unable to init caam source fq!");
245 static inline int is_cipher_only(dpaa_sec_session *ses)
247 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
251 static inline int is_auth_only(dpaa_sec_session *ses)
253 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
257 static inline int is_aead(dpaa_sec_session *ses)
259 return ((ses->cipher_alg == 0) &&
260 (ses->auth_alg == 0) &&
261 (ses->aead_alg != 0));
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
266 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
269 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC) &&
270 (ses->aead_alg == 0));
273 static inline int is_proto_ipsec(dpaa_sec_session *ses)
275 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
278 static inline int is_proto_pdcp(dpaa_sec_session *ses)
280 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
283 static inline int is_encode(dpaa_sec_session *ses)
285 return ses->dir == DIR_ENC;
288 static inline int is_decode(dpaa_sec_session *ses)
290 return ses->dir == DIR_DEC;
294 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
296 switch (ses->auth_alg) {
297 case RTE_CRYPTO_AUTH_NULL:
299 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 OP_PCL_IPSEC_HMAC_NULL : 0;
301 ses->digest_length = 0;
303 case RTE_CRYPTO_AUTH_MD5_HMAC:
305 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
307 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 case RTE_CRYPTO_AUTH_SHA1_HMAC:
311 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
313 alginfo_a->algmode = OP_ALG_AAI_HMAC;
315 case RTE_CRYPTO_AUTH_SHA224_HMAC:
317 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
318 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
319 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 case RTE_CRYPTO_AUTH_SHA256_HMAC:
323 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
324 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
325 alginfo_a->algmode = OP_ALG_AAI_HMAC;
327 case RTE_CRYPTO_AUTH_SHA384_HMAC:
329 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
330 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
331 alginfo_a->algmode = OP_ALG_AAI_HMAC;
333 case RTE_CRYPTO_AUTH_SHA512_HMAC:
335 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
337 alginfo_a->algmode = OP_ALG_AAI_HMAC;
340 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
345 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
347 switch (ses->cipher_alg) {
348 case RTE_CRYPTO_CIPHER_NULL:
350 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
351 OP_PCL_IPSEC_NULL : 0;
353 case RTE_CRYPTO_CIPHER_AES_CBC:
355 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
356 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
357 alginfo_c->algmode = OP_ALG_AAI_CBC;
359 case RTE_CRYPTO_CIPHER_3DES_CBC:
361 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
362 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
363 alginfo_c->algmode = OP_ALG_AAI_CBC;
365 case RTE_CRYPTO_CIPHER_AES_CTR:
367 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
368 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
369 alginfo_c->algmode = OP_ALG_AAI_CTR;
372 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
377 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
379 switch (ses->aead_alg) {
380 case RTE_CRYPTO_AEAD_AES_GCM:
381 alginfo->algtype = OP_ALG_ALGSEL_AES;
382 alginfo->algmode = OP_ALG_AAI_GCM;
385 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
390 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
392 struct alginfo authdata = {0}, cipherdata = {0};
393 struct sec_cdb *cdb = &ses->cdb;
394 struct alginfo *p_authdata = NULL;
395 int32_t shared_desc_len = 0;
397 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
403 switch (ses->cipher_alg) {
404 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
405 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
407 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
408 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
410 case RTE_CRYPTO_CIPHER_AES_CTR:
411 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
413 case RTE_CRYPTO_CIPHER_NULL:
414 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
417 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
422 cipherdata.key = (size_t)ses->cipher_key.data;
423 cipherdata.keylen = ses->cipher_key.length;
424 cipherdata.key_enc_flags = 0;
425 cipherdata.key_type = RTA_DATA_IMM;
427 cdb->sh_desc[0] = cipherdata.keylen;
432 switch (ses->auth_alg) {
433 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
434 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
436 case RTE_CRYPTO_AUTH_ZUC_EIA3:
437 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
439 case RTE_CRYPTO_AUTH_AES_CMAC:
440 authdata.algtype = PDCP_AUTH_TYPE_AES;
442 case RTE_CRYPTO_AUTH_NULL:
443 authdata.algtype = PDCP_AUTH_TYPE_NULL;
446 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
451 authdata.key = (size_t)ses->auth_key.data;
452 authdata.keylen = ses->auth_key.length;
453 authdata.key_enc_flags = 0;
454 authdata.key_type = RTA_DATA_IMM;
456 p_authdata = &authdata;
458 cdb->sh_desc[1] = authdata.keylen;
461 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
463 (unsigned int *)cdb->sh_desc,
464 &cdb->sh_desc[2], 2);
466 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
470 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
472 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
473 cipherdata.key_type = RTA_DATA_PTR;
475 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
477 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
478 authdata.key_type = RTA_DATA_PTR;
485 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
486 if (ses->dir == DIR_ENC)
487 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
488 cdb->sh_desc, 1, swap,
493 ses->pdcp.hfn_threshold,
494 &cipherdata, &authdata,
496 else if (ses->dir == DIR_DEC)
497 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
498 cdb->sh_desc, 1, swap,
503 ses->pdcp.hfn_threshold,
504 &cipherdata, &authdata,
507 if (ses->dir == DIR_ENC)
508 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509 cdb->sh_desc, 1, swap,
514 ses->pdcp.hfn_threshold,
515 &cipherdata, p_authdata, 0);
516 else if (ses->dir == DIR_DEC)
517 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518 cdb->sh_desc, 1, swap,
523 ses->pdcp.hfn_threshold,
524 &cipherdata, p_authdata, 0);
527 return shared_desc_len;
530 /* prepare ipsec proto command block of the session */
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
534 struct alginfo cipherdata = {0}, authdata = {0};
535 struct sec_cdb *cdb = &ses->cdb;
536 int32_t shared_desc_len = 0;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
544 caam_cipher_alg(ses, &cipherdata);
545 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546 DPAA_SEC_ERR("not supported cipher alg");
550 cipherdata.key = (size_t)ses->cipher_key.data;
551 cipherdata.keylen = ses->cipher_key.length;
552 cipherdata.key_enc_flags = 0;
553 cipherdata.key_type = RTA_DATA_IMM;
555 caam_auth_alg(ses, &authdata);
556 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557 DPAA_SEC_ERR("not supported auth alg");
561 authdata.key = (size_t)ses->auth_key.data;
562 authdata.keylen = ses->auth_key.length;
563 authdata.key_enc_flags = 0;
564 authdata.key_type = RTA_DATA_IMM;
566 cdb->sh_desc[0] = cipherdata.keylen;
567 cdb->sh_desc[1] = authdata.keylen;
568 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
570 (unsigned int *)cdb->sh_desc,
571 &cdb->sh_desc[2], 2);
574 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
577 if (cdb->sh_desc[2] & 1)
578 cipherdata.key_type = RTA_DATA_IMM;
580 cipherdata.key = (size_t)dpaa_mem_vtop(
581 (void *)(size_t)cipherdata.key);
582 cipherdata.key_type = RTA_DATA_PTR;
584 if (cdb->sh_desc[2] & (1<<1))
585 authdata.key_type = RTA_DATA_IMM;
587 authdata.key = (size_t)dpaa_mem_vtop(
588 (void *)(size_t)authdata.key);
589 authdata.key_type = RTA_DATA_PTR;
595 if (ses->dir == DIR_ENC) {
596 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
598 true, swap, SHR_SERIAL,
600 (uint8_t *)&ses->ip4_hdr,
601 &cipherdata, &authdata);
602 } else if (ses->dir == DIR_DEC) {
603 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
605 true, swap, SHR_SERIAL,
607 &cipherdata, &authdata);
609 return shared_desc_len;
612 /* prepare command block of the session */
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
616 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617 int32_t shared_desc_len = 0;
618 struct sec_cdb *cdb = &ses->cdb;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
626 memset(cdb, 0, sizeof(struct sec_cdb));
628 if (is_proto_ipsec(ses)) {
629 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630 } else if (is_proto_pdcp(ses)) {
631 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632 } else if (is_cipher_only(ses)) {
633 caam_cipher_alg(ses, &alginfo_c);
634 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
635 DPAA_SEC_ERR("not supported cipher alg");
639 alginfo_c.key = (size_t)ses->cipher_key.data;
640 alginfo_c.keylen = ses->cipher_key.length;
641 alginfo_c.key_enc_flags = 0;
642 alginfo_c.key_type = RTA_DATA_IMM;
644 shared_desc_len = cnstr_shdsc_blkcipher(
646 swap, SHR_NEVER, &alginfo_c,
650 } else if (is_auth_only(ses)) {
651 caam_auth_alg(ses, &alginfo_a);
652 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
653 DPAA_SEC_ERR("not supported auth alg");
657 alginfo_a.key = (size_t)ses->auth_key.data;
658 alginfo_a.keylen = ses->auth_key.length;
659 alginfo_a.key_enc_flags = 0;
660 alginfo_a.key_type = RTA_DATA_IMM;
662 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
663 swap, SHR_NEVER, &alginfo_a,
666 } else if (is_aead(ses)) {
667 caam_aead_alg(ses, &alginfo);
668 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
669 DPAA_SEC_ERR("not supported aead alg");
672 alginfo.key = (size_t)ses->aead_key.data;
673 alginfo.keylen = ses->aead_key.length;
674 alginfo.key_enc_flags = 0;
675 alginfo.key_type = RTA_DATA_IMM;
677 if (ses->dir == DIR_ENC)
678 shared_desc_len = cnstr_shdsc_gcm_encap(
679 cdb->sh_desc, true, swap, SHR_NEVER,
684 shared_desc_len = cnstr_shdsc_gcm_decap(
685 cdb->sh_desc, true, swap, SHR_NEVER,
690 caam_cipher_alg(ses, &alginfo_c);
691 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
692 DPAA_SEC_ERR("not supported cipher alg");
696 alginfo_c.key = (size_t)ses->cipher_key.data;
697 alginfo_c.keylen = ses->cipher_key.length;
698 alginfo_c.key_enc_flags = 0;
699 alginfo_c.key_type = RTA_DATA_IMM;
701 caam_auth_alg(ses, &alginfo_a);
702 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
703 DPAA_SEC_ERR("not supported auth alg");
707 alginfo_a.key = (size_t)ses->auth_key.data;
708 alginfo_a.keylen = ses->auth_key.length;
709 alginfo_a.key_enc_flags = 0;
710 alginfo_a.key_type = RTA_DATA_IMM;
712 cdb->sh_desc[0] = alginfo_c.keylen;
713 cdb->sh_desc[1] = alginfo_a.keylen;
714 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
716 (unsigned int *)cdb->sh_desc,
717 &cdb->sh_desc[2], 2);
720 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
723 if (cdb->sh_desc[2] & 1)
724 alginfo_c.key_type = RTA_DATA_IMM;
726 alginfo_c.key = (size_t)dpaa_mem_vtop(
727 (void *)(size_t)alginfo_c.key);
728 alginfo_c.key_type = RTA_DATA_PTR;
730 if (cdb->sh_desc[2] & (1<<1))
731 alginfo_a.key_type = RTA_DATA_IMM;
733 alginfo_a.key = (size_t)dpaa_mem_vtop(
734 (void *)(size_t)alginfo_a.key);
735 alginfo_a.key_type = RTA_DATA_PTR;
740 /* Auth_only_len is set as 0 here and it will be
741 * overwritten in fd for each packet.
743 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
744 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
746 ses->digest_length, ses->dir);
749 if (shared_desc_len < 0) {
750 DPAA_SEC_ERR("error in preparing command block");
751 return shared_desc_len;
754 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
755 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
756 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
761 /* qp is lockless, should be accessed by only one thread */
763 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
766 unsigned int pkts = 0;
767 int num_rx_bufs, ret;
768 struct qm_dqrr_entry *dq;
769 uint32_t vdqcr_flags = 0;
773 * Until request for four buffers, we provide exact number of buffers.
774 * Otherwise we do not set the QM_VDQCR_EXACT flag.
775 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
776 * requested, so we request two less in this case.
779 vdqcr_flags = QM_VDQCR_EXACT;
780 num_rx_bufs = nb_ops;
782 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
783 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
785 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
790 const struct qm_fd *fd;
791 struct dpaa_sec_job *job;
792 struct dpaa_sec_op_ctx *ctx;
793 struct rte_crypto_op *op;
795 dq = qman_dequeue(fq);
800 /* sg is embedded in an op ctx,
801 * sg[0] is for output
804 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
806 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
807 ctx->fd_status = fd->status;
809 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
810 struct qm_sg_entry *sg_out;
812 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
813 op->sym->m_src : op->sym->m_dst;
815 sg_out = &job->sg[0];
816 hw_sg_to_cpu(sg_out);
817 len = sg_out->length;
819 while (mbuf->next != NULL) {
820 len -= mbuf->data_len;
823 mbuf->data_len = len;
825 if (!ctx->fd_status) {
826 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
828 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
829 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
833 /* report op status to sym->op and then free the ctx memeory */
834 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
836 qman_dqrr_consume(fq, dq);
837 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
842 static inline struct dpaa_sec_job *
843 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
845 struct rte_crypto_sym_op *sym = op->sym;
846 struct rte_mbuf *mbuf = sym->m_src;
847 struct dpaa_sec_job *cf;
848 struct dpaa_sec_op_ctx *ctx;
849 struct qm_sg_entry *sg, *out_sg, *in_sg;
850 phys_addr_t start_addr;
851 uint8_t *old_digest, extra_segs;
858 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
859 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
863 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
869 old_digest = ctx->digest;
873 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
874 out_sg->length = ses->digest_length;
875 cpu_to_hw_sg(out_sg);
879 /* need to extend the input to a compound frame */
880 in_sg->extension = 1;
882 in_sg->length = sym->auth.data.length;
883 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
887 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
888 sg->length = mbuf->data_len - sym->auth.data.offset;
889 sg->offset = sym->auth.data.offset;
891 /* Successive segs */
896 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
897 sg->length = mbuf->data_len;
901 if (is_decode(ses)) {
902 /* Digest verification case */
905 rte_memcpy(old_digest, sym->auth.digest.data,
907 start_addr = dpaa_mem_vtop(old_digest);
908 qm_sg_entry_set64(sg, start_addr);
909 sg->length = ses->digest_length;
910 in_sg->length += ses->digest_length;
912 /* Digest calculation case */
913 sg->length -= ses->digest_length;
924 * |<----data_len------->|
925 * |ip_header|ah_header|icv|payload|
930 static inline struct dpaa_sec_job *
931 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
933 struct rte_crypto_sym_op *sym = op->sym;
934 struct rte_mbuf *mbuf = sym->m_src;
935 struct dpaa_sec_job *cf;
936 struct dpaa_sec_op_ctx *ctx;
937 struct qm_sg_entry *sg;
938 rte_iova_t start_addr;
941 ctx = dpaa_sec_alloc_ctx(ses, 4);
947 old_digest = ctx->digest;
949 start_addr = rte_pktmbuf_iova(mbuf);
952 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
953 sg->length = ses->digest_length;
958 if (is_decode(ses)) {
959 /* need to extend the input to a compound frame */
961 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
962 sg->length = sym->auth.data.length + ses->digest_length;
967 /* hash result or digest, save digest first */
968 rte_memcpy(old_digest, sym->auth.digest.data,
970 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
971 sg->length = sym->auth.data.length;
974 /* let's check digest by hw */
975 start_addr = dpaa_mem_vtop(old_digest);
977 qm_sg_entry_set64(sg, start_addr);
978 sg->length = ses->digest_length;
982 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
983 sg->length = sym->auth.data.length;
991 static inline struct dpaa_sec_job *
992 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
994 struct rte_crypto_sym_op *sym = op->sym;
995 struct dpaa_sec_job *cf;
996 struct dpaa_sec_op_ctx *ctx;
997 struct qm_sg_entry *sg, *out_sg, *in_sg;
998 struct rte_mbuf *mbuf;
1000 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1005 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1008 req_segs = mbuf->nb_segs * 2 + 3;
1011 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1012 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1017 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1025 out_sg = &cf->sg[0];
1026 out_sg->extension = 1;
1027 out_sg->length = sym->cipher.data.length;
1028 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1029 cpu_to_hw_sg(out_sg);
1033 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1034 sg->length = mbuf->data_len - sym->cipher.data.offset;
1035 sg->offset = sym->cipher.data.offset;
1037 /* Successive segs */
1042 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1043 sg->length = mbuf->data_len;
1052 in_sg->extension = 1;
1054 in_sg->length = sym->cipher.data.length + ses->iv.length;
1057 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1058 cpu_to_hw_sg(in_sg);
1061 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1062 sg->length = ses->iv.length;
1067 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1068 sg->length = mbuf->data_len - sym->cipher.data.offset;
1069 sg->offset = sym->cipher.data.offset;
1071 /* Successive segs */
1076 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1077 sg->length = mbuf->data_len;
1086 static inline struct dpaa_sec_job *
1087 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1089 struct rte_crypto_sym_op *sym = op->sym;
1090 struct dpaa_sec_job *cf;
1091 struct dpaa_sec_op_ctx *ctx;
1092 struct qm_sg_entry *sg;
1093 rte_iova_t src_start_addr, dst_start_addr;
1094 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1097 ctx = dpaa_sec_alloc_ctx(ses, 4);
1104 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1107 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1109 dst_start_addr = src_start_addr;
1113 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1114 sg->length = sym->cipher.data.length + ses->iv.length;
1120 /* need to extend the input to a compound frame */
1123 sg->length = sym->cipher.data.length + ses->iv.length;
1124 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1128 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1129 sg->length = ses->iv.length;
1133 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1134 sg->length = sym->cipher.data.length;
1141 static inline struct dpaa_sec_job *
1142 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1144 struct rte_crypto_sym_op *sym = op->sym;
1145 struct dpaa_sec_job *cf;
1146 struct dpaa_sec_op_ctx *ctx;
1147 struct qm_sg_entry *sg, *out_sg, *in_sg;
1148 struct rte_mbuf *mbuf;
1150 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1155 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1158 req_segs = mbuf->nb_segs * 2 + 4;
1161 if (ses->auth_only_len)
1164 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1165 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1170 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1177 rte_prefetch0(cf->sg);
1180 out_sg = &cf->sg[0];
1181 out_sg->extension = 1;
1183 out_sg->length = sym->aead.data.length + ses->auth_only_len
1184 + ses->digest_length;
1186 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1188 /* output sg entries */
1190 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1191 cpu_to_hw_sg(out_sg);
1194 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1195 sg->length = mbuf->data_len - sym->aead.data.offset +
1197 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1199 /* Successive segs */
1204 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1205 sg->length = mbuf->data_len;
1208 sg->length -= ses->digest_length;
1210 if (is_encode(ses)) {
1212 /* set auth output */
1214 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1215 sg->length = ses->digest_length;
1223 in_sg->extension = 1;
1226 in_sg->length = ses->iv.length + sym->aead.data.length
1227 + ses->auth_only_len;
1229 in_sg->length = ses->iv.length + sym->aead.data.length
1230 + ses->auth_only_len + ses->digest_length;
1232 /* input sg entries */
1234 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1235 cpu_to_hw_sg(in_sg);
1238 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1239 sg->length = ses->iv.length;
1242 /* 2nd seg auth only */
1243 if (ses->auth_only_len) {
1245 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1246 sg->length = ses->auth_only_len;
1252 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1253 sg->length = mbuf->data_len - sym->aead.data.offset;
1254 sg->offset = sym->aead.data.offset;
1256 /* Successive segs */
1261 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1262 sg->length = mbuf->data_len;
1266 if (is_decode(ses)) {
1269 memcpy(ctx->digest, sym->aead.digest.data,
1270 ses->digest_length);
1271 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1272 sg->length = ses->digest_length;
1280 static inline struct dpaa_sec_job *
1281 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1283 struct rte_crypto_sym_op *sym = op->sym;
1284 struct dpaa_sec_job *cf;
1285 struct dpaa_sec_op_ctx *ctx;
1286 struct qm_sg_entry *sg;
1287 uint32_t length = 0;
1288 rte_iova_t src_start_addr, dst_start_addr;
1289 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1292 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1295 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1297 dst_start_addr = src_start_addr;
1299 ctx = dpaa_sec_alloc_ctx(ses, 7);
1307 rte_prefetch0(cf->sg);
1309 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1310 if (is_encode(ses)) {
1311 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1312 sg->length = ses->iv.length;
1313 length += sg->length;
1317 if (ses->auth_only_len) {
1318 qm_sg_entry_set64(sg,
1319 dpaa_mem_vtop(sym->aead.aad.data));
1320 sg->length = ses->auth_only_len;
1321 length += sg->length;
1325 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1326 sg->length = sym->aead.data.length;
1327 length += sg->length;
1331 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1332 sg->length = ses->iv.length;
1333 length += sg->length;
1337 if (ses->auth_only_len) {
1338 qm_sg_entry_set64(sg,
1339 dpaa_mem_vtop(sym->aead.aad.data));
1340 sg->length = ses->auth_only_len;
1341 length += sg->length;
1345 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1346 sg->length = sym->aead.data.length;
1347 length += sg->length;
1350 memcpy(ctx->digest, sym->aead.digest.data,
1351 ses->digest_length);
1354 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1355 sg->length = ses->digest_length;
1356 length += sg->length;
1360 /* input compound frame */
1361 cf->sg[1].length = length;
1362 cf->sg[1].extension = 1;
1363 cf->sg[1].final = 1;
1364 cpu_to_hw_sg(&cf->sg[1]);
1368 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1369 qm_sg_entry_set64(sg,
1370 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1371 sg->length = sym->aead.data.length + ses->auth_only_len;
1372 length = sg->length;
1373 if (is_encode(ses)) {
1375 /* set auth output */
1377 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1378 sg->length = ses->digest_length;
1379 length += sg->length;
1384 /* output compound frame */
1385 cf->sg[0].length = length;
1386 cf->sg[0].extension = 1;
1387 cpu_to_hw_sg(&cf->sg[0]);
1392 static inline struct dpaa_sec_job *
1393 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1395 struct rte_crypto_sym_op *sym = op->sym;
1396 struct dpaa_sec_job *cf;
1397 struct dpaa_sec_op_ctx *ctx;
1398 struct qm_sg_entry *sg, *out_sg, *in_sg;
1399 struct rte_mbuf *mbuf;
1401 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1406 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1409 req_segs = mbuf->nb_segs * 2 + 4;
1412 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1413 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1418 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1425 rte_prefetch0(cf->sg);
1428 out_sg = &cf->sg[0];
1429 out_sg->extension = 1;
1431 out_sg->length = sym->auth.data.length + ses->digest_length;
1433 out_sg->length = sym->auth.data.length;
1435 /* output sg entries */
1437 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1438 cpu_to_hw_sg(out_sg);
1441 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1442 sg->length = mbuf->data_len - sym->auth.data.offset;
1443 sg->offset = sym->auth.data.offset;
1445 /* Successive segs */
1450 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1451 sg->length = mbuf->data_len;
1454 sg->length -= ses->digest_length;
1456 if (is_encode(ses)) {
1458 /* set auth output */
1460 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1461 sg->length = ses->digest_length;
1469 in_sg->extension = 1;
1472 in_sg->length = ses->iv.length + sym->auth.data.length;
1474 in_sg->length = ses->iv.length + sym->auth.data.length
1475 + ses->digest_length;
1477 /* input sg entries */
1479 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1480 cpu_to_hw_sg(in_sg);
1483 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1484 sg->length = ses->iv.length;
1489 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1490 sg->length = mbuf->data_len - sym->auth.data.offset;
1491 sg->offset = sym->auth.data.offset;
1493 /* Successive segs */
1498 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1499 sg->length = mbuf->data_len;
1503 sg->length -= ses->digest_length;
1504 if (is_decode(ses)) {
1507 memcpy(ctx->digest, sym->auth.digest.data,
1508 ses->digest_length);
1509 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1510 sg->length = ses->digest_length;
1518 static inline struct dpaa_sec_job *
1519 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1521 struct rte_crypto_sym_op *sym = op->sym;
1522 struct dpaa_sec_job *cf;
1523 struct dpaa_sec_op_ctx *ctx;
1524 struct qm_sg_entry *sg;
1525 rte_iova_t src_start_addr, dst_start_addr;
1526 uint32_t length = 0;
1527 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1530 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1532 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1534 dst_start_addr = src_start_addr;
1536 ctx = dpaa_sec_alloc_ctx(ses, 7);
1544 rte_prefetch0(cf->sg);
1546 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1547 if (is_encode(ses)) {
1548 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1549 sg->length = ses->iv.length;
1550 length += sg->length;
1554 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1555 sg->length = sym->auth.data.length;
1556 length += sg->length;
1560 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1561 sg->length = ses->iv.length;
1562 length += sg->length;
1567 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1568 sg->length = sym->auth.data.length;
1569 length += sg->length;
1572 memcpy(ctx->digest, sym->auth.digest.data,
1573 ses->digest_length);
1576 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1577 sg->length = ses->digest_length;
1578 length += sg->length;
1582 /* input compound frame */
1583 cf->sg[1].length = length;
1584 cf->sg[1].extension = 1;
1585 cf->sg[1].final = 1;
1586 cpu_to_hw_sg(&cf->sg[1]);
1590 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1591 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1592 sg->length = sym->cipher.data.length;
1593 length = sg->length;
1594 if (is_encode(ses)) {
1596 /* set auth output */
1598 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1599 sg->length = ses->digest_length;
1600 length += sg->length;
1605 /* output compound frame */
1606 cf->sg[0].length = length;
1607 cf->sg[0].extension = 1;
1608 cpu_to_hw_sg(&cf->sg[0]);
1613 static inline struct dpaa_sec_job *
1614 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1616 struct rte_crypto_sym_op *sym = op->sym;
1617 struct dpaa_sec_job *cf;
1618 struct dpaa_sec_op_ctx *ctx;
1619 struct qm_sg_entry *sg;
1620 phys_addr_t src_start_addr, dst_start_addr;
1622 ctx = dpaa_sec_alloc_ctx(ses, 2);
1628 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1631 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1633 dst_start_addr = src_start_addr;
1637 qm_sg_entry_set64(sg, src_start_addr);
1638 sg->length = sym->m_src->pkt_len;
1642 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1645 qm_sg_entry_set64(sg, dst_start_addr);
1646 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1652 static inline struct dpaa_sec_job *
1653 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1655 struct rte_crypto_sym_op *sym = op->sym;
1656 struct dpaa_sec_job *cf;
1657 struct dpaa_sec_op_ctx *ctx;
1658 struct qm_sg_entry *sg, *out_sg, *in_sg;
1659 struct rte_mbuf *mbuf;
1661 uint32_t in_len = 0, out_len = 0;
1668 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1669 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1670 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1675 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1681 out_sg = &cf->sg[0];
1682 out_sg->extension = 1;
1683 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1687 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1690 /* Successive segs */
1691 while (mbuf->next) {
1692 sg->length = mbuf->data_len;
1693 out_len += sg->length;
1697 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1700 sg->length = mbuf->buf_len - mbuf->data_off;
1701 out_len += sg->length;
1705 out_sg->length = out_len;
1706 cpu_to_hw_sg(out_sg);
1711 in_sg->extension = 1;
1713 in_len = mbuf->data_len;
1716 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1719 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1720 sg->length = mbuf->data_len;
1723 /* Successive segs */
1728 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1729 sg->length = mbuf->data_len;
1731 in_len += sg->length;
1737 in_sg->length = in_len;
1738 cpu_to_hw_sg(in_sg);
1740 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1746 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1749 /* Function to transmit the frames to given device and queuepair */
1751 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1752 uint16_t num_tx = 0;
1753 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1754 uint32_t frames_to_send;
1755 struct rte_crypto_op *op;
1756 struct dpaa_sec_job *cf;
1757 dpaa_sec_session *ses;
1758 uint32_t auth_only_len, index, flags[DPAA_SEC_BURST] = {0};
1759 struct qman_fq *inq[DPAA_SEC_BURST];
1762 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1763 DPAA_SEC_BURST : nb_ops;
1764 for (loop = 0; loop < frames_to_send; loop++) {
1766 if (op->sym->m_src->seqn != 0) {
1767 index = op->sym->m_src->seqn - 1;
1768 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1769 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1770 flags[loop] = ((index & 0x0f) << 8);
1771 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1772 DPAA_PER_LCORE_DQRR_SIZE--;
1773 DPAA_PER_LCORE_DQRR_HELD &=
1778 switch (op->sess_type) {
1779 case RTE_CRYPTO_OP_WITH_SESSION:
1780 ses = (dpaa_sec_session *)
1781 get_sym_session_private_data(
1783 cryptodev_driver_id);
1785 case RTE_CRYPTO_OP_SECURITY_SESSION:
1786 ses = (dpaa_sec_session *)
1787 get_sec_session_private_data(
1788 op->sym->sec_session);
1792 "sessionless crypto op not supported");
1793 frames_to_send = loop;
1797 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1798 if (dpaa_sec_attach_sess_q(qp, ses)) {
1799 frames_to_send = loop;
1803 } else if (unlikely(ses->qp[rte_lcore_id() %
1804 MAX_DPAA_CORES] != qp)) {
1805 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1807 ses->qp[rte_lcore_id() %
1808 MAX_DPAA_CORES], qp);
1809 frames_to_send = loop;
1814 auth_only_len = op->sym->auth.data.length -
1815 op->sym->cipher.data.length;
1816 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1817 ((op->sym->m_dst == NULL) ||
1818 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1819 if (is_proto_ipsec(ses)) {
1820 cf = build_proto(op, ses);
1821 } else if (is_proto_pdcp(ses)) {
1822 cf = build_proto(op, ses);
1823 } else if (is_auth_only(ses)) {
1824 cf = build_auth_only(op, ses);
1825 } else if (is_cipher_only(ses)) {
1826 cf = build_cipher_only(op, ses);
1827 } else if (is_aead(ses)) {
1828 cf = build_cipher_auth_gcm(op, ses);
1829 auth_only_len = ses->auth_only_len;
1830 } else if (is_auth_cipher(ses)) {
1831 cf = build_cipher_auth(op, ses);
1833 DPAA_SEC_DP_ERR("not supported ops");
1834 frames_to_send = loop;
1839 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1840 cf = build_proto_sg(op, ses);
1841 } else if (is_auth_only(ses)) {
1842 cf = build_auth_only_sg(op, ses);
1843 } else if (is_cipher_only(ses)) {
1844 cf = build_cipher_only_sg(op, ses);
1845 } else if (is_aead(ses)) {
1846 cf = build_cipher_auth_gcm_sg(op, ses);
1847 auth_only_len = ses->auth_only_len;
1848 } else if (is_auth_cipher(ses)) {
1849 cf = build_cipher_auth_sg(op, ses);
1851 DPAA_SEC_DP_ERR("not supported ops");
1852 frames_to_send = loop;
1857 if (unlikely(!cf)) {
1858 frames_to_send = loop;
1864 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1865 fd->opaque_addr = 0;
1867 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1868 fd->_format1 = qm_fd_compound;
1869 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1870 /* Auth_only_len is set as 0 in descriptor and it is
1871 * overwritten here in the fd.cmd which will update
1875 fd->cmd = 0x80000000 | auth_only_len;
1877 /* In case of PDCP, per packet HFN is stored in
1878 * mbuf priv after sym_op.
1880 if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1881 fd->cmd = 0x80000000 |
1882 *((uint32_t *)((uint8_t *)op +
1883 ses->pdcp.hfn_ovd_offset));
1884 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1885 *((uint32_t *)((uint8_t *)op +
1886 ses->pdcp.hfn_ovd_offset)),
1888 is_proto_pdcp(ses));
1894 while (loop < frames_to_send) {
1895 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1896 &flags[loop], frames_to_send - loop);
1898 nb_ops -= frames_to_send;
1899 num_tx += frames_to_send;
1902 dpaa_qp->tx_pkts += num_tx;
1903 dpaa_qp->tx_errs += nb_ops - num_tx;
1909 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1913 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1915 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1917 dpaa_qp->rx_pkts += num_rx;
1918 dpaa_qp->rx_errs += nb_ops - num_rx;
1920 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1925 /** Release queue pair */
1927 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1930 struct dpaa_sec_dev_private *internals;
1931 struct dpaa_sec_qp *qp = NULL;
1933 PMD_INIT_FUNC_TRACE();
1935 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1937 internals = dev->data->dev_private;
1938 if (qp_id >= internals->max_nb_queue_pairs) {
1939 DPAA_SEC_ERR("Max supported qpid %d",
1940 internals->max_nb_queue_pairs);
1944 qp = &internals->qps[qp_id];
1945 rte_mempool_free(qp->ctx_pool);
1946 qp->internals = NULL;
1947 dev->data->queue_pairs[qp_id] = NULL;
1952 /** Setup a queue pair */
1954 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1955 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1956 __rte_unused int socket_id)
1958 struct dpaa_sec_dev_private *internals;
1959 struct dpaa_sec_qp *qp = NULL;
1962 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1964 internals = dev->data->dev_private;
1965 if (qp_id >= internals->max_nb_queue_pairs) {
1966 DPAA_SEC_ERR("Max supported qpid %d",
1967 internals->max_nb_queue_pairs);
1971 qp = &internals->qps[qp_id];
1972 qp->internals = internals;
1973 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1974 dev->data->dev_id, qp_id);
1975 if (!qp->ctx_pool) {
1976 qp->ctx_pool = rte_mempool_create((const char *)str,
1979 CTX_POOL_CACHE_SIZE, 0,
1980 NULL, NULL, NULL, NULL,
1982 if (!qp->ctx_pool) {
1983 DPAA_SEC_ERR("%s create failed\n", str);
1987 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1988 dev->data->dev_id, qp_id);
1989 dev->data->queue_pairs[qp_id] = qp;
1994 /** Return the number of allocated queue pairs */
1996 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1998 PMD_INIT_FUNC_TRACE();
2000 return dev->data->nb_queue_pairs;
2003 /** Returns the size of session structure */
2005 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2007 PMD_INIT_FUNC_TRACE();
2009 return sizeof(dpaa_sec_session);
2013 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2014 struct rte_crypto_sym_xform *xform,
2015 dpaa_sec_session *session)
2017 session->cipher_alg = xform->cipher.algo;
2018 session->iv.length = xform->cipher.iv.length;
2019 session->iv.offset = xform->cipher.iv.offset;
2020 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2021 RTE_CACHE_LINE_SIZE);
2022 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2023 DPAA_SEC_ERR("No Memory for cipher key");
2026 session->cipher_key.length = xform->cipher.key.length;
2028 memcpy(session->cipher_key.data, xform->cipher.key.data,
2029 xform->cipher.key.length);
2030 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2037 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2038 struct rte_crypto_sym_xform *xform,
2039 dpaa_sec_session *session)
2041 session->auth_alg = xform->auth.algo;
2042 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2043 RTE_CACHE_LINE_SIZE);
2044 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2045 DPAA_SEC_ERR("No Memory for auth key");
2048 session->auth_key.length = xform->auth.key.length;
2049 session->digest_length = xform->auth.digest_length;
2051 memcpy(session->auth_key.data, xform->auth.key.data,
2052 xform->auth.key.length);
2053 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2060 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2061 struct rte_crypto_sym_xform *xform,
2062 dpaa_sec_session *session)
2064 session->aead_alg = xform->aead.algo;
2065 session->iv.length = xform->aead.iv.length;
2066 session->iv.offset = xform->aead.iv.offset;
2067 session->auth_only_len = xform->aead.aad_length;
2068 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2069 RTE_CACHE_LINE_SIZE);
2070 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2071 DPAA_SEC_ERR("No Memory for aead key\n");
2074 session->aead_key.length = xform->aead.key.length;
2075 session->digest_length = xform->aead.digest_length;
2077 memcpy(session->aead_key.data, xform->aead.key.data,
2078 xform->aead.key.length);
2079 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2085 static struct qman_fq *
2086 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2090 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2091 if (qi->inq_attach[i] == 0) {
2092 qi->inq_attach[i] = 1;
2096 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2102 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2106 for (i = 0; i < qi->max_nb_sessions; i++) {
2107 if (&qi->inq[i] == fq) {
2108 qman_retire_fq(fq, NULL);
2110 qi->inq_attach[i] = 0;
2118 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2122 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2123 ret = dpaa_sec_prep_cdb(sess);
2125 DPAA_SEC_ERR("Unable to prepare sec cdb");
2128 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2129 ret = rte_dpaa_portal_init((void *)0);
2131 DPAA_SEC_ERR("Failure in affining portal");
2135 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2136 dpaa_mem_vtop(&sess->cdb),
2137 qman_fq_fqid(&qp->outq));
2139 DPAA_SEC_ERR("Unable to init sec queue");
2145 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2146 struct rte_crypto_sym_xform *xform, void *sess)
2148 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2149 dpaa_sec_session *session = sess;
2152 PMD_INIT_FUNC_TRACE();
2154 if (unlikely(sess == NULL)) {
2155 DPAA_SEC_ERR("invalid session struct");
2158 memset(session, 0, sizeof(dpaa_sec_session));
2160 /* Default IV length = 0 */
2161 session->iv.length = 0;
2164 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2165 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2166 dpaa_sec_cipher_init(dev, xform, session);
2168 /* Authentication Only */
2169 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2170 xform->next == NULL) {
2171 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2172 dpaa_sec_auth_init(dev, xform, session);
2174 /* Cipher then Authenticate */
2175 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2176 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2177 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2178 dpaa_sec_cipher_init(dev, xform, session);
2179 dpaa_sec_auth_init(dev, xform->next, session);
2181 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2185 /* Authenticate then Cipher */
2186 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2187 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2188 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2189 dpaa_sec_auth_init(dev, xform, session);
2190 dpaa_sec_cipher_init(dev, xform->next, session);
2192 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2196 /* AEAD operation for AES-GCM kind of Algorithms */
2197 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2198 xform->next == NULL) {
2199 dpaa_sec_aead_init(dev, xform, session);
2202 DPAA_SEC_ERR("Invalid crypto type");
2205 rte_spinlock_lock(&internals->lock);
2206 for (i = 0; i < MAX_DPAA_CORES; i++) {
2207 session->inq[i] = dpaa_sec_attach_rxq(internals);
2208 if (session->inq[i] == NULL) {
2209 DPAA_SEC_ERR("unable to attach sec queue");
2210 rte_spinlock_unlock(&internals->lock);
2214 rte_spinlock_unlock(&internals->lock);
2219 rte_free(session->cipher_key.data);
2220 rte_free(session->auth_key.data);
2221 memset(session, 0, sizeof(dpaa_sec_session));
2227 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2228 struct rte_crypto_sym_xform *xform,
2229 struct rte_cryptodev_sym_session *sess,
2230 struct rte_mempool *mempool)
2232 void *sess_private_data;
2235 PMD_INIT_FUNC_TRACE();
2237 if (rte_mempool_get(mempool, &sess_private_data)) {
2238 DPAA_SEC_ERR("Couldn't get object from session mempool");
2242 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2244 DPAA_SEC_ERR("failed to configure session parameters");
2246 /* Return session to mempool */
2247 rte_mempool_put(mempool, sess_private_data);
2251 set_sym_session_private_data(sess, dev->driver_id,
2259 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2261 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2262 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2265 for (i = 0; i < MAX_DPAA_CORES; i++) {
2267 dpaa_sec_detach_rxq(qi, s->inq[i]);
2271 rte_free(s->cipher_key.data);
2272 rte_free(s->auth_key.data);
2273 memset(s, 0, sizeof(dpaa_sec_session));
2274 rte_mempool_put(sess_mp, (void *)s);
2277 /** Clear the memory of session so it doesn't leave key material behind */
2279 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2280 struct rte_cryptodev_sym_session *sess)
2282 PMD_INIT_FUNC_TRACE();
2283 uint8_t index = dev->driver_id;
2284 void *sess_priv = get_sym_session_private_data(sess, index);
2285 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2288 free_session_memory(dev, s);
2289 set_sym_session_private_data(sess, index, NULL);
2294 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2295 struct rte_security_session_conf *conf,
2298 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2299 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2300 struct rte_crypto_auth_xform *auth_xform = NULL;
2301 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2302 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2305 PMD_INIT_FUNC_TRACE();
2307 memset(session, 0, sizeof(dpaa_sec_session));
2308 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2309 cipher_xform = &conf->crypto_xform->cipher;
2310 if (conf->crypto_xform->next)
2311 auth_xform = &conf->crypto_xform->next->auth;
2313 auth_xform = &conf->crypto_xform->auth;
2314 if (conf->crypto_xform->next)
2315 cipher_xform = &conf->crypto_xform->next->cipher;
2317 session->proto_alg = conf->protocol;
2319 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2320 session->cipher_key.data = rte_zmalloc(NULL,
2321 cipher_xform->key.length,
2322 RTE_CACHE_LINE_SIZE);
2323 if (session->cipher_key.data == NULL &&
2324 cipher_xform->key.length > 0) {
2325 DPAA_SEC_ERR("No Memory for cipher key");
2328 memcpy(session->cipher_key.data, cipher_xform->key.data,
2329 cipher_xform->key.length);
2330 session->cipher_key.length = cipher_xform->key.length;
2332 switch (cipher_xform->algo) {
2333 case RTE_CRYPTO_CIPHER_AES_CBC:
2334 case RTE_CRYPTO_CIPHER_3DES_CBC:
2335 case RTE_CRYPTO_CIPHER_AES_CTR:
2338 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2339 cipher_xform->algo);
2342 session->cipher_alg = cipher_xform->algo;
2344 session->cipher_key.data = NULL;
2345 session->cipher_key.length = 0;
2346 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2349 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2350 session->auth_key.data = rte_zmalloc(NULL,
2351 auth_xform->key.length,
2352 RTE_CACHE_LINE_SIZE);
2353 if (session->auth_key.data == NULL &&
2354 auth_xform->key.length > 0) {
2355 DPAA_SEC_ERR("No Memory for auth key");
2356 rte_free(session->cipher_key.data);
2359 memcpy(session->auth_key.data, auth_xform->key.data,
2360 auth_xform->key.length);
2361 session->auth_key.length = auth_xform->key.length;
2363 switch (auth_xform->algo) {
2364 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2365 case RTE_CRYPTO_AUTH_MD5_HMAC:
2366 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2367 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2368 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2369 case RTE_CRYPTO_AUTH_AES_CMAC:
2372 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2376 session->auth_alg = auth_xform->algo;
2378 session->auth_key.data = NULL;
2379 session->auth_key.length = 0;
2380 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2383 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2384 if (ipsec_xform->tunnel.type ==
2385 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2386 memset(&session->encap_pdb, 0,
2387 sizeof(struct ipsec_encap_pdb) +
2388 sizeof(session->ip4_hdr));
2389 session->ip4_hdr.ip_v = IPVERSION;
2390 session->ip4_hdr.ip_hl = 5;
2391 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2392 sizeof(session->ip4_hdr));
2393 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2394 session->ip4_hdr.ip_id = 0;
2395 session->ip4_hdr.ip_off = 0;
2396 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2397 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2398 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2399 IPPROTO_ESP : IPPROTO_AH;
2400 session->ip4_hdr.ip_sum = 0;
2401 session->ip4_hdr.ip_src =
2402 ipsec_xform->tunnel.ipv4.src_ip;
2403 session->ip4_hdr.ip_dst =
2404 ipsec_xform->tunnel.ipv4.dst_ip;
2405 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2406 (void *)&session->ip4_hdr,
2408 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2409 } else if (ipsec_xform->tunnel.type ==
2410 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2411 memset(&session->encap_pdb, 0,
2412 sizeof(struct ipsec_encap_pdb) +
2413 sizeof(session->ip6_hdr));
2414 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2415 DPAA_IPv6_DEFAULT_VTC_FLOW |
2416 ((ipsec_xform->tunnel.ipv6.dscp <<
2417 RTE_IPV6_HDR_TC_SHIFT) &
2418 RTE_IPV6_HDR_TC_MASK) |
2419 ((ipsec_xform->tunnel.ipv6.flabel <<
2420 RTE_IPV6_HDR_FL_SHIFT) &
2421 RTE_IPV6_HDR_FL_MASK));
2422 /* Payload length will be updated by HW */
2423 session->ip6_hdr.payload_len = 0;
2424 session->ip6_hdr.hop_limits =
2425 ipsec_xform->tunnel.ipv6.hlimit;
2426 session->ip6_hdr.proto = (ipsec_xform->proto ==
2427 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2428 IPPROTO_ESP : IPPROTO_AH;
2429 memcpy(&session->ip6_hdr.src_addr,
2430 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2431 memcpy(&session->ip6_hdr.dst_addr,
2432 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2433 session->encap_pdb.ip_hdr_len =
2434 sizeof(struct rte_ipv6_hdr);
2436 session->encap_pdb.options =
2437 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2438 PDBOPTS_ESP_OIHI_PDB_INL |
2440 PDBHMO_ESP_ENCAP_DTTL |
2442 if (ipsec_xform->options.esn)
2443 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2444 session->encap_pdb.spi = ipsec_xform->spi;
2445 session->dir = DIR_ENC;
2446 } else if (ipsec_xform->direction ==
2447 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2448 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2449 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2450 session->decap_pdb.options = sizeof(struct ip) << 16;
2452 session->decap_pdb.options =
2453 sizeof(struct rte_ipv6_hdr) << 16;
2454 if (ipsec_xform->options.esn)
2455 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2456 session->dir = DIR_DEC;
2459 rte_spinlock_lock(&internals->lock);
2460 for (i = 0; i < MAX_DPAA_CORES; i++) {
2461 session->inq[i] = dpaa_sec_attach_rxq(internals);
2462 if (session->inq[i] == NULL) {
2463 DPAA_SEC_ERR("unable to attach sec queue");
2464 rte_spinlock_unlock(&internals->lock);
2468 rte_spinlock_unlock(&internals->lock);
2472 rte_free(session->auth_key.data);
2473 rte_free(session->cipher_key.data);
2474 memset(session, 0, sizeof(dpaa_sec_session));
2479 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2480 struct rte_security_session_conf *conf,
2483 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2484 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2485 struct rte_crypto_auth_xform *auth_xform = NULL;
2486 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2487 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2488 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2491 PMD_INIT_FUNC_TRACE();
2493 memset(session, 0, sizeof(dpaa_sec_session));
2495 /* find xfrm types */
2496 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2497 cipher_xform = &xform->cipher;
2498 if (xform->next != NULL)
2499 auth_xform = &xform->next->auth;
2500 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2501 auth_xform = &xform->auth;
2502 if (xform->next != NULL)
2503 cipher_xform = &xform->next->cipher;
2505 DPAA_SEC_ERR("Invalid crypto type");
2509 session->proto_alg = conf->protocol;
2511 session->cipher_key.data = rte_zmalloc(NULL,
2512 cipher_xform->key.length,
2513 RTE_CACHE_LINE_SIZE);
2514 if (session->cipher_key.data == NULL &&
2515 cipher_xform->key.length > 0) {
2516 DPAA_SEC_ERR("No Memory for cipher key");
2519 session->cipher_key.length = cipher_xform->key.length;
2520 memcpy(session->cipher_key.data, cipher_xform->key.data,
2521 cipher_xform->key.length);
2522 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2524 session->cipher_alg = cipher_xform->algo;
2526 session->cipher_key.data = NULL;
2527 session->cipher_key.length = 0;
2528 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2529 session->dir = DIR_ENC;
2532 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2533 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2534 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2536 "PDCP Seq Num size should be 5/12 bits for cmode");
2542 session->auth_key.data = rte_zmalloc(NULL,
2543 auth_xform->key.length,
2544 RTE_CACHE_LINE_SIZE);
2545 if (!session->auth_key.data &&
2546 auth_xform->key.length > 0) {
2547 DPAA_SEC_ERR("No Memory for auth key");
2548 rte_free(session->cipher_key.data);
2551 session->auth_key.length = auth_xform->key.length;
2552 memcpy(session->auth_key.data, auth_xform->key.data,
2553 auth_xform->key.length);
2554 session->auth_alg = auth_xform->algo;
2556 session->auth_key.data = NULL;
2557 session->auth_key.length = 0;
2558 session->auth_alg = 0;
2560 session->pdcp.domain = pdcp_xform->domain;
2561 session->pdcp.bearer = pdcp_xform->bearer;
2562 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2563 session->pdcp.sn_size = pdcp_xform->sn_size;
2564 session->pdcp.hfn = pdcp_xform->hfn;
2565 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2566 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2567 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2569 rte_spinlock_lock(&dev_priv->lock);
2570 for (i = 0; i < MAX_DPAA_CORES; i++) {
2571 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2572 if (session->inq[i] == NULL) {
2573 DPAA_SEC_ERR("unable to attach sec queue");
2574 rte_spinlock_unlock(&dev_priv->lock);
2578 rte_spinlock_unlock(&dev_priv->lock);
2581 rte_free(session->auth_key.data);
2582 rte_free(session->cipher_key.data);
2583 memset(session, 0, sizeof(dpaa_sec_session));
2588 dpaa_sec_security_session_create(void *dev,
2589 struct rte_security_session_conf *conf,
2590 struct rte_security_session *sess,
2591 struct rte_mempool *mempool)
2593 void *sess_private_data;
2594 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2597 if (rte_mempool_get(mempool, &sess_private_data)) {
2598 DPAA_SEC_ERR("Couldn't get object from session mempool");
2602 switch (conf->protocol) {
2603 case RTE_SECURITY_PROTOCOL_IPSEC:
2604 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2607 case RTE_SECURITY_PROTOCOL_PDCP:
2608 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2611 case RTE_SECURITY_PROTOCOL_MACSEC:
2617 DPAA_SEC_ERR("failed to configure session parameters");
2618 /* Return session to mempool */
2619 rte_mempool_put(mempool, sess_private_data);
2623 set_sec_session_private_data(sess, sess_private_data);
2628 /** Clear the memory of session so it doesn't leave key material behind */
2630 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2631 struct rte_security_session *sess)
2633 PMD_INIT_FUNC_TRACE();
2634 void *sess_priv = get_sec_session_private_data(sess);
2635 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2638 free_session_memory((struct rte_cryptodev *)dev, s);
2639 set_sec_session_private_data(sess, NULL);
2645 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2646 struct rte_cryptodev_config *config __rte_unused)
2648 PMD_INIT_FUNC_TRACE();
2654 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2656 PMD_INIT_FUNC_TRACE();
2661 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2663 PMD_INIT_FUNC_TRACE();
2667 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2669 PMD_INIT_FUNC_TRACE();
2678 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2679 struct rte_cryptodev_info *info)
2681 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2683 PMD_INIT_FUNC_TRACE();
2685 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2686 info->feature_flags = dev->feature_flags;
2687 info->capabilities = dpaa_sec_capabilities;
2688 info->sym.max_nb_sessions = internals->max_nb_sessions;
2689 info->driver_id = cryptodev_driver_id;
2693 static enum qman_cb_dqrr_result
2694 dpaa_sec_process_parallel_event(void *event,
2695 struct qman_portal *qm __always_unused,
2696 struct qman_fq *outq,
2697 const struct qm_dqrr_entry *dqrr,
2700 const struct qm_fd *fd;
2701 struct dpaa_sec_job *job;
2702 struct dpaa_sec_op_ctx *ctx;
2703 struct rte_event *ev = (struct rte_event *)event;
2707 /* sg is embedded in an op ctx,
2708 * sg[0] is for output
2711 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2713 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2714 ctx->fd_status = fd->status;
2715 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2716 struct qm_sg_entry *sg_out;
2719 sg_out = &job->sg[0];
2720 hw_sg_to_cpu(sg_out);
2721 len = sg_out->length;
2722 ctx->op->sym->m_src->pkt_len = len;
2723 ctx->op->sym->m_src->data_len = len;
2725 if (!ctx->fd_status) {
2726 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2728 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2729 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2731 ev->event_ptr = (void *)ctx->op;
2733 ev->flow_id = outq->ev.flow_id;
2734 ev->sub_event_type = outq->ev.sub_event_type;
2735 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2736 ev->op = RTE_EVENT_OP_NEW;
2737 ev->sched_type = outq->ev.sched_type;
2738 ev->queue_id = outq->ev.queue_id;
2739 ev->priority = outq->ev.priority;
2740 *bufs = (void *)ctx->op;
2742 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2744 return qman_cb_dqrr_consume;
2747 static enum qman_cb_dqrr_result
2748 dpaa_sec_process_atomic_event(void *event,
2749 struct qman_portal *qm __rte_unused,
2750 struct qman_fq *outq,
2751 const struct qm_dqrr_entry *dqrr,
2755 const struct qm_fd *fd;
2756 struct dpaa_sec_job *job;
2757 struct dpaa_sec_op_ctx *ctx;
2758 struct rte_event *ev = (struct rte_event *)event;
2762 /* sg is embedded in an op ctx,
2763 * sg[0] is for output
2766 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2768 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2769 ctx->fd_status = fd->status;
2770 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2771 struct qm_sg_entry *sg_out;
2774 sg_out = &job->sg[0];
2775 hw_sg_to_cpu(sg_out);
2776 len = sg_out->length;
2777 ctx->op->sym->m_src->pkt_len = len;
2778 ctx->op->sym->m_src->data_len = len;
2780 if (!ctx->fd_status) {
2781 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2783 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2784 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2786 ev->event_ptr = (void *)ctx->op;
2787 ev->flow_id = outq->ev.flow_id;
2788 ev->sub_event_type = outq->ev.sub_event_type;
2789 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2790 ev->op = RTE_EVENT_OP_NEW;
2791 ev->sched_type = outq->ev.sched_type;
2792 ev->queue_id = outq->ev.queue_id;
2793 ev->priority = outq->ev.priority;
2795 /* Save active dqrr entries */
2796 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
2797 DPAA_PER_LCORE_DQRR_SIZE++;
2798 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
2799 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
2800 ev->impl_opaque = index + 1;
2801 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
2802 *bufs = (void *)ctx->op;
2804 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2806 return qman_cb_dqrr_defer;
2810 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
2813 const struct rte_event *event)
2815 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2816 struct qm_mcc_initfq opts = {0};
2820 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2821 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2822 opts.fqd.dest.channel = ch_id;
2824 switch (event->sched_type) {
2825 case RTE_SCHED_TYPE_ATOMIC:
2826 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
2827 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
2828 * configuration with HOLD_ACTIVE setting
2830 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
2831 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
2833 case RTE_SCHED_TYPE_ORDERED:
2834 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
2837 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
2838 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
2842 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
2843 if (unlikely(ret)) {
2844 DPAA_SEC_ERR("unable to init caam source fq!");
2848 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
2854 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
2857 struct qm_mcc_initfq opts = {0};
2859 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2861 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2862 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2863 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
2864 qp->outq.cb.ern = ern_sec_fq_handler;
2865 qman_retire_fq(&qp->outq, NULL);
2866 qman_oos_fq(&qp->outq);
2867 ret = qman_init_fq(&qp->outq, 0, &opts);
2869 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
2870 qp->outq.cb.dqrr = NULL;
2875 static struct rte_cryptodev_ops crypto_ops = {
2876 .dev_configure = dpaa_sec_dev_configure,
2877 .dev_start = dpaa_sec_dev_start,
2878 .dev_stop = dpaa_sec_dev_stop,
2879 .dev_close = dpaa_sec_dev_close,
2880 .dev_infos_get = dpaa_sec_dev_infos_get,
2881 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2882 .queue_pair_release = dpaa_sec_queue_pair_release,
2883 .queue_pair_count = dpaa_sec_queue_pair_count,
2884 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2885 .sym_session_configure = dpaa_sec_sym_session_configure,
2886 .sym_session_clear = dpaa_sec_sym_session_clear
2889 static const struct rte_security_capability *
2890 dpaa_sec_capabilities_get(void *device __rte_unused)
2892 return dpaa_sec_security_cap;
2895 static const struct rte_security_ops dpaa_sec_security_ops = {
2896 .session_create = dpaa_sec_security_session_create,
2897 .session_update = NULL,
2898 .session_stats_get = NULL,
2899 .session_destroy = dpaa_sec_security_session_destroy,
2900 .set_pkt_metadata = NULL,
2901 .capabilities_get = dpaa_sec_capabilities_get
2905 dpaa_sec_uninit(struct rte_cryptodev *dev)
2907 struct dpaa_sec_dev_private *internals;
2912 internals = dev->data->dev_private;
2913 rte_free(dev->security_ctx);
2915 rte_free(internals);
2917 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2918 dev->data->name, rte_socket_id());
2924 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2926 struct dpaa_sec_dev_private *internals;
2927 struct rte_security_ctx *security_instance;
2928 struct dpaa_sec_qp *qp;
2932 PMD_INIT_FUNC_TRACE();
2934 cryptodev->driver_id = cryptodev_driver_id;
2935 cryptodev->dev_ops = &crypto_ops;
2937 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2938 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2939 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2940 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2941 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2942 RTE_CRYPTODEV_FF_SECURITY |
2943 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2944 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2945 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2946 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2947 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2949 internals = cryptodev->data->dev_private;
2950 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2951 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2954 * For secondary processes, we don't initialise any further as primary
2955 * has already done this work. Only check we don't need a different
2958 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2959 DPAA_SEC_WARN("Device already init by primary process");
2963 /* Initialize security_ctx only for primary process*/
2964 security_instance = rte_malloc("rte_security_instances_ops",
2965 sizeof(struct rte_security_ctx), 0);
2966 if (security_instance == NULL)
2968 security_instance->device = (void *)cryptodev;
2969 security_instance->ops = &dpaa_sec_security_ops;
2970 security_instance->sess_cnt = 0;
2971 cryptodev->security_ctx = security_instance;
2973 rte_spinlock_init(&internals->lock);
2974 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2975 /* init qman fq for queue pair */
2976 qp = &internals->qps[i];
2977 ret = dpaa_sec_init_tx(&qp->outq);
2979 DPAA_SEC_ERR("config tx of queue pair %d", i);
2984 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2985 QMAN_FQ_FLAG_TO_DCPORTAL;
2986 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2987 /* create rx qman fq for sessions*/
2988 ret = qman_create_fq(0, flags, &internals->inq[i]);
2989 if (unlikely(ret != 0)) {
2990 DPAA_SEC_ERR("sec qman_create_fq failed");
2995 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2999 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3001 dpaa_sec_uninit(cryptodev);
3006 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3007 struct rte_dpaa_device *dpaa_dev)
3009 struct rte_cryptodev *cryptodev;
3010 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3014 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3016 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3017 if (cryptodev == NULL)
3020 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3021 cryptodev->data->dev_private = rte_zmalloc_socket(
3022 "cryptodev private structure",
3023 sizeof(struct dpaa_sec_dev_private),
3024 RTE_CACHE_LINE_SIZE,
3027 if (cryptodev->data->dev_private == NULL)
3028 rte_panic("Cannot allocate memzone for private "
3032 dpaa_dev->crypto_dev = cryptodev;
3033 cryptodev->device = &dpaa_dev->device;
3035 /* init user callbacks */
3036 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3038 /* if sec device version is not configured */
3039 if (!rta_get_sec_era()) {
3040 const struct device_node *caam_node;
3042 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3043 const uint32_t *prop = of_get_property(caam_node,
3048 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3054 /* Invoke PMD device initialization function */
3055 retval = dpaa_sec_dev_init(cryptodev);
3059 /* In case of error, cleanup is done */
3060 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3061 rte_free(cryptodev->data->dev_private);
3063 rte_cryptodev_pmd_release_device(cryptodev);
3069 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3071 struct rte_cryptodev *cryptodev;
3074 cryptodev = dpaa_dev->crypto_dev;
3075 if (cryptodev == NULL)
3078 ret = dpaa_sec_uninit(cryptodev);
3082 return rte_cryptodev_pmd_destroy(cryptodev);
3085 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3086 .drv_type = FSL_DPAA_CRYPTO,
3088 .name = "DPAA SEC PMD"
3090 .probe = cryptodev_dpaa_sec_probe,
3091 .remove = cryptodev_dpaa_sec_remove,
3094 static struct cryptodev_driver dpaa_sec_crypto_drv;
3096 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3097 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3098 cryptodev_driver_id);
3100 RTE_INIT(dpaa_sec_init_log)
3102 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3103 if (dpaa_logtype_sec >= 0)
3104 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);