1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
44 enum rta_sec_era rta_sec_era;
48 static uint8_t cryptodev_driver_id;
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
59 if (!ctx->fd_status) {
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
62 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(
74 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 dcbz_64(&ctx->job.sg[i]);
89 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
109 dpaa_mem_ptov(rte_iova_t paddr)
113 va = (void *)dpaax_iova_table_get_va(paddr);
117 return rte_mem_iova2virt(paddr);
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
123 const struct qm_mr_entry *msg)
125 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 /* initialize the queue with dest chan as caam chan so that
130 * all the packets in this queue could be dispatched into caam
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 struct qm_mcc_initfq fq_opts;
140 /* Clear FQ options */
141 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
143 flags = QMAN_INITFQ_FLAG_SCHED;
144 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 QM_INITFQ_WE_CONTEXTB;
147 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 fq_opts.fqd.context_b = fqid_out;
149 fq_opts.fqd.dest.channel = qm_channel_caam;
150 fq_opts.fqd.dest.wq = 0;
152 fq_in->cb.ern = ern_sec_fq_handler;
154 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
156 ret = qman_init_fq(fq_in, flags, &fq_opts);
157 if (unlikely(ret != 0))
158 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 struct qman_fq *fq __always_unused,
167 const struct qm_dqrr_entry *dqrr)
169 const struct qm_fd *fd;
170 struct dpaa_sec_job *job;
171 struct dpaa_sec_op_ctx *ctx;
173 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 return qman_cb_dqrr_defer;
176 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 return qman_cb_dqrr_consume;
180 /* sg is embedded in an op ctx,
181 * sg[0] is for output
184 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
186 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 ctx->fd_status = fd->status;
188 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 struct qm_sg_entry *sg_out;
191 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 ctx->op->sym->m_src : ctx->op->sym->m_dst;
194 sg_out = &job->sg[0];
195 hw_sg_to_cpu(sg_out);
196 len = sg_out->length;
198 while (mbuf->next != NULL) {
199 len -= mbuf->data_len;
202 mbuf->data_len = len;
204 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 dpaa_sec_op_ending(ctx);
207 return qman_cb_dqrr_consume;
210 /* caam result is put into this queue */
212 dpaa_sec_init_tx(struct qman_fq *fq)
215 struct qm_mcc_initfq opts;
218 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 QMAN_FQ_FLAG_DYNAMIC_FQID;
221 ret = qman_create_fq(0, flags, fq);
223 DPAA_SEC_ERR("qman_create_fq failed");
227 memset(&opts, 0, sizeof(opts));
228 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233 fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 fq->cb.ern = ern_sec_fq_handler;
236 ret = qman_init_fq(fq, 0, &opts);
238 DPAA_SEC_ERR("unable to init caam source fq!");
245 static inline int is_cipher_only(dpaa_sec_session *ses)
247 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
251 static inline int is_auth_only(dpaa_sec_session *ses)
253 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
257 static inline int is_aead(dpaa_sec_session *ses)
259 return ((ses->cipher_alg == 0) &&
260 (ses->auth_alg == 0) &&
261 (ses->aead_alg != 0));
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
266 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
269 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC) &&
270 (ses->aead_alg == 0));
273 static inline int is_proto_ipsec(dpaa_sec_session *ses)
275 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
278 static inline int is_proto_pdcp(dpaa_sec_session *ses)
280 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
283 static inline int is_encode(dpaa_sec_session *ses)
285 return ses->dir == DIR_ENC;
288 static inline int is_decode(dpaa_sec_session *ses)
290 return ses->dir == DIR_DEC;
294 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
296 switch (ses->auth_alg) {
297 case RTE_CRYPTO_AUTH_NULL:
299 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 OP_PCL_IPSEC_HMAC_NULL : 0;
301 ses->digest_length = 0;
303 case RTE_CRYPTO_AUTH_MD5_HMAC:
305 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
307 alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 case RTE_CRYPTO_AUTH_SHA1_HMAC:
311 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
313 alginfo_a->algmode = OP_ALG_AAI_HMAC;
315 case RTE_CRYPTO_AUTH_SHA224_HMAC:
317 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
318 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
319 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 case RTE_CRYPTO_AUTH_SHA256_HMAC:
323 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
324 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
325 alginfo_a->algmode = OP_ALG_AAI_HMAC;
327 case RTE_CRYPTO_AUTH_SHA384_HMAC:
329 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
330 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
331 alginfo_a->algmode = OP_ALG_AAI_HMAC;
333 case RTE_CRYPTO_AUTH_SHA512_HMAC:
335 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
337 alginfo_a->algmode = OP_ALG_AAI_HMAC;
340 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
345 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
347 switch (ses->cipher_alg) {
348 case RTE_CRYPTO_CIPHER_NULL:
350 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
351 OP_PCL_IPSEC_NULL : 0;
353 case RTE_CRYPTO_CIPHER_AES_CBC:
355 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
356 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
357 alginfo_c->algmode = OP_ALG_AAI_CBC;
359 case RTE_CRYPTO_CIPHER_3DES_CBC:
361 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
362 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
363 alginfo_c->algmode = OP_ALG_AAI_CBC;
365 case RTE_CRYPTO_CIPHER_AES_CTR:
367 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
368 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
369 alginfo_c->algmode = OP_ALG_AAI_CTR;
372 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
377 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
379 switch (ses->aead_alg) {
380 case RTE_CRYPTO_AEAD_AES_GCM:
381 alginfo->algtype = OP_ALG_ALGSEL_AES;
382 alginfo->algmode = OP_ALG_AAI_GCM;
385 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
390 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
392 struct alginfo authdata = {0}, cipherdata = {0};
393 struct sec_cdb *cdb = &ses->cdb;
394 struct alginfo *p_authdata = NULL;
395 int32_t shared_desc_len = 0;
397 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
403 switch (ses->cipher_alg) {
404 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
405 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
407 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
408 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
410 case RTE_CRYPTO_CIPHER_AES_CTR:
411 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
413 case RTE_CRYPTO_CIPHER_NULL:
414 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
417 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
422 cipherdata.key = (size_t)ses->cipher_key.data;
423 cipherdata.keylen = ses->cipher_key.length;
424 cipherdata.key_enc_flags = 0;
425 cipherdata.key_type = RTA_DATA_IMM;
427 cdb->sh_desc[0] = cipherdata.keylen;
432 switch (ses->auth_alg) {
433 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
434 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
436 case RTE_CRYPTO_AUTH_ZUC_EIA3:
437 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
439 case RTE_CRYPTO_AUTH_AES_CMAC:
440 authdata.algtype = PDCP_AUTH_TYPE_AES;
442 case RTE_CRYPTO_AUTH_NULL:
443 authdata.algtype = PDCP_AUTH_TYPE_NULL;
446 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
451 authdata.key = (size_t)ses->auth_key.data;
452 authdata.keylen = ses->auth_key.length;
453 authdata.key_enc_flags = 0;
454 authdata.key_type = RTA_DATA_IMM;
456 p_authdata = &authdata;
458 cdb->sh_desc[1] = authdata.keylen;
461 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
463 (unsigned int *)cdb->sh_desc,
464 &cdb->sh_desc[2], 2);
466 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
470 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
472 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
473 cipherdata.key_type = RTA_DATA_PTR;
475 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
477 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
478 authdata.key_type = RTA_DATA_PTR;
485 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
486 if (ses->dir == DIR_ENC)
487 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
488 cdb->sh_desc, 1, swap,
493 ses->pdcp.hfn_threshold,
494 &cipherdata, &authdata,
496 else if (ses->dir == DIR_DEC)
497 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
498 cdb->sh_desc, 1, swap,
503 ses->pdcp.hfn_threshold,
504 &cipherdata, &authdata,
507 if (ses->dir == DIR_ENC)
508 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509 cdb->sh_desc, 1, swap,
514 ses->pdcp.hfn_threshold,
515 &cipherdata, p_authdata, 0);
516 else if (ses->dir == DIR_DEC)
517 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518 cdb->sh_desc, 1, swap,
523 ses->pdcp.hfn_threshold,
524 &cipherdata, p_authdata, 0);
527 return shared_desc_len;
530 /* prepare ipsec proto command block of the session */
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
534 struct alginfo cipherdata = {0}, authdata = {0};
535 struct sec_cdb *cdb = &ses->cdb;
536 int32_t shared_desc_len = 0;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
544 caam_cipher_alg(ses, &cipherdata);
545 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546 DPAA_SEC_ERR("not supported cipher alg");
550 cipherdata.key = (size_t)ses->cipher_key.data;
551 cipherdata.keylen = ses->cipher_key.length;
552 cipherdata.key_enc_flags = 0;
553 cipherdata.key_type = RTA_DATA_IMM;
555 caam_auth_alg(ses, &authdata);
556 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557 DPAA_SEC_ERR("not supported auth alg");
561 authdata.key = (size_t)ses->auth_key.data;
562 authdata.keylen = ses->auth_key.length;
563 authdata.key_enc_flags = 0;
564 authdata.key_type = RTA_DATA_IMM;
566 cdb->sh_desc[0] = cipherdata.keylen;
567 cdb->sh_desc[1] = authdata.keylen;
568 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
570 (unsigned int *)cdb->sh_desc,
571 &cdb->sh_desc[2], 2);
574 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
577 if (cdb->sh_desc[2] & 1)
578 cipherdata.key_type = RTA_DATA_IMM;
580 cipherdata.key = (size_t)dpaa_mem_vtop(
581 (void *)(size_t)cipherdata.key);
582 cipherdata.key_type = RTA_DATA_PTR;
584 if (cdb->sh_desc[2] & (1<<1))
585 authdata.key_type = RTA_DATA_IMM;
587 authdata.key = (size_t)dpaa_mem_vtop(
588 (void *)(size_t)authdata.key);
589 authdata.key_type = RTA_DATA_PTR;
595 if (ses->dir == DIR_ENC) {
596 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
598 true, swap, SHR_SERIAL,
600 (uint8_t *)&ses->ip4_hdr,
601 &cipherdata, &authdata);
602 } else if (ses->dir == DIR_DEC) {
603 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
605 true, swap, SHR_SERIAL,
607 &cipherdata, &authdata);
609 return shared_desc_len;
612 /* prepare command block of the session */
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
616 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617 int32_t shared_desc_len = 0;
618 struct sec_cdb *cdb = &ses->cdb;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
626 memset(cdb, 0, sizeof(struct sec_cdb));
628 if (is_proto_ipsec(ses)) {
629 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630 } else if (is_proto_pdcp(ses)) {
631 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632 } else if (is_cipher_only(ses)) {
633 alginfo_c.key = (size_t)ses->cipher_key.data;
634 alginfo_c.keylen = ses->cipher_key.length;
635 alginfo_c.key_enc_flags = 0;
636 alginfo_c.key_type = RTA_DATA_IMM;
637 switch (ses->cipher_alg) {
638 case RTE_CRYPTO_CIPHER_NULL:
639 alginfo_c.algtype = 0;
640 shared_desc_len = cnstr_shdsc_blkcipher(
642 swap, SHR_NEVER, &alginfo_c,
647 case RTE_CRYPTO_CIPHER_AES_CBC:
648 alginfo_c.algtype = OP_ALG_ALGSEL_AES;
649 alginfo_c.algmode = OP_ALG_AAI_CBC;
650 shared_desc_len = cnstr_shdsc_blkcipher(
652 swap, SHR_NEVER, &alginfo_c,
657 case RTE_CRYPTO_CIPHER_3DES_CBC:
658 alginfo_c.algtype = OP_ALG_ALGSEL_3DES;
659 alginfo_c.algmode = OP_ALG_AAI_CBC;
660 shared_desc_len = cnstr_shdsc_blkcipher(
662 swap, SHR_NEVER, &alginfo_c,
667 case RTE_CRYPTO_CIPHER_AES_CTR:
668 alginfo_c.algtype = OP_ALG_ALGSEL_AES;
669 alginfo_c.algmode = OP_ALG_AAI_CTR;
670 shared_desc_len = cnstr_shdsc_blkcipher(
672 swap, SHR_NEVER, &alginfo_c,
677 case RTE_CRYPTO_CIPHER_3DES_CTR:
678 alginfo_c.algtype = OP_ALG_ALGSEL_3DES;
679 alginfo_c.algmode = OP_ALG_AAI_CTR;
680 shared_desc_len = cnstr_shdsc_blkcipher(
682 swap, SHR_NEVER, &alginfo_c,
687 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
688 alginfo_c.algtype = OP_ALG_ALGSEL_SNOW_F8;
689 shared_desc_len = cnstr_shdsc_snow_f8(
690 cdb->sh_desc, true, swap,
694 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
695 alginfo_c.algtype = OP_ALG_ALGSEL_ZUCE;
696 shared_desc_len = cnstr_shdsc_zuce(
697 cdb->sh_desc, true, swap,
702 DPAA_SEC_ERR("unsupported cipher alg %d",
706 } else if (is_auth_only(ses)) {
707 alginfo_a.key = (size_t)ses->auth_key.data;
708 alginfo_a.keylen = ses->auth_key.length;
709 alginfo_a.key_enc_flags = 0;
710 alginfo_a.key_type = RTA_DATA_IMM;
711 switch (ses->auth_alg) {
712 case RTE_CRYPTO_AUTH_NULL:
713 alginfo_a.algtype = 0;
714 ses->digest_length = 0;
715 shared_desc_len = cnstr_shdsc_hmac(
717 swap, SHR_NEVER, &alginfo_a,
721 case RTE_CRYPTO_AUTH_MD5_HMAC:
722 alginfo_a.algtype = OP_ALG_ALGSEL_MD5;
723 alginfo_a.algmode = OP_ALG_AAI_HMAC;
724 shared_desc_len = cnstr_shdsc_hmac(
726 swap, SHR_NEVER, &alginfo_a,
730 case RTE_CRYPTO_AUTH_SHA1_HMAC:
731 alginfo_a.algtype = OP_ALG_ALGSEL_SHA1;
732 alginfo_a.algmode = OP_ALG_AAI_HMAC;
733 shared_desc_len = cnstr_shdsc_hmac(
735 swap, SHR_NEVER, &alginfo_a,
739 case RTE_CRYPTO_AUTH_SHA224_HMAC:
740 alginfo_a.algtype = OP_ALG_ALGSEL_SHA224;
741 alginfo_a.algmode = OP_ALG_AAI_HMAC;
742 shared_desc_len = cnstr_shdsc_hmac(
744 swap, SHR_NEVER, &alginfo_a,
748 case RTE_CRYPTO_AUTH_SHA256_HMAC:
749 alginfo_a.algtype = OP_ALG_ALGSEL_SHA256;
750 alginfo_a.algmode = OP_ALG_AAI_HMAC;
751 shared_desc_len = cnstr_shdsc_hmac(
753 swap, SHR_NEVER, &alginfo_a,
757 case RTE_CRYPTO_AUTH_SHA384_HMAC:
758 alginfo_a.algtype = OP_ALG_ALGSEL_SHA384;
759 alginfo_a.algmode = OP_ALG_AAI_HMAC;
760 shared_desc_len = cnstr_shdsc_hmac(
762 swap, SHR_NEVER, &alginfo_a,
766 case RTE_CRYPTO_AUTH_SHA512_HMAC:
767 alginfo_a.algtype = OP_ALG_ALGSEL_SHA512;
768 alginfo_a.algmode = OP_ALG_AAI_HMAC;
769 shared_desc_len = cnstr_shdsc_hmac(
771 swap, SHR_NEVER, &alginfo_a,
775 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
776 alginfo_a.algtype = OP_ALG_ALGSEL_SNOW_F9;
777 alginfo_a.algmode = OP_ALG_AAI_F9;
778 ses->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
779 shared_desc_len = cnstr_shdsc_snow_f9(
780 cdb->sh_desc, true, swap,
785 case RTE_CRYPTO_AUTH_ZUC_EIA3:
786 alginfo_a.algtype = OP_ALG_ALGSEL_ZUCA;
787 alginfo_a.algmode = OP_ALG_AAI_F9;
788 ses->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
789 shared_desc_len = cnstr_shdsc_zuca(
790 cdb->sh_desc, true, swap,
796 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
798 } else if (is_aead(ses)) {
799 caam_aead_alg(ses, &alginfo);
800 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
801 DPAA_SEC_ERR("not supported aead alg");
804 alginfo.key = (size_t)ses->aead_key.data;
805 alginfo.keylen = ses->aead_key.length;
806 alginfo.key_enc_flags = 0;
807 alginfo.key_type = RTA_DATA_IMM;
809 if (ses->dir == DIR_ENC)
810 shared_desc_len = cnstr_shdsc_gcm_encap(
811 cdb->sh_desc, true, swap, SHR_NEVER,
816 shared_desc_len = cnstr_shdsc_gcm_decap(
817 cdb->sh_desc, true, swap, SHR_NEVER,
822 caam_cipher_alg(ses, &alginfo_c);
823 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
824 DPAA_SEC_ERR("not supported cipher alg");
828 alginfo_c.key = (size_t)ses->cipher_key.data;
829 alginfo_c.keylen = ses->cipher_key.length;
830 alginfo_c.key_enc_flags = 0;
831 alginfo_c.key_type = RTA_DATA_IMM;
833 caam_auth_alg(ses, &alginfo_a);
834 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
835 DPAA_SEC_ERR("not supported auth alg");
839 alginfo_a.key = (size_t)ses->auth_key.data;
840 alginfo_a.keylen = ses->auth_key.length;
841 alginfo_a.key_enc_flags = 0;
842 alginfo_a.key_type = RTA_DATA_IMM;
844 cdb->sh_desc[0] = alginfo_c.keylen;
845 cdb->sh_desc[1] = alginfo_a.keylen;
846 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
848 (unsigned int *)cdb->sh_desc,
849 &cdb->sh_desc[2], 2);
852 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
855 if (cdb->sh_desc[2] & 1)
856 alginfo_c.key_type = RTA_DATA_IMM;
858 alginfo_c.key = (size_t)dpaa_mem_vtop(
859 (void *)(size_t)alginfo_c.key);
860 alginfo_c.key_type = RTA_DATA_PTR;
862 if (cdb->sh_desc[2] & (1<<1))
863 alginfo_a.key_type = RTA_DATA_IMM;
865 alginfo_a.key = (size_t)dpaa_mem_vtop(
866 (void *)(size_t)alginfo_a.key);
867 alginfo_a.key_type = RTA_DATA_PTR;
872 /* Auth_only_len is set as 0 here and it will be
873 * overwritten in fd for each packet.
875 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
876 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
878 ses->digest_length, ses->dir);
881 if (shared_desc_len < 0) {
882 DPAA_SEC_ERR("error in preparing command block");
883 return shared_desc_len;
886 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
887 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
888 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
893 /* qp is lockless, should be accessed by only one thread */
895 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
898 unsigned int pkts = 0;
899 int num_rx_bufs, ret;
900 struct qm_dqrr_entry *dq;
901 uint32_t vdqcr_flags = 0;
905 * Until request for four buffers, we provide exact number of buffers.
906 * Otherwise we do not set the QM_VDQCR_EXACT flag.
907 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
908 * requested, so we request two less in this case.
911 vdqcr_flags = QM_VDQCR_EXACT;
912 num_rx_bufs = nb_ops;
914 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
915 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
917 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
922 const struct qm_fd *fd;
923 struct dpaa_sec_job *job;
924 struct dpaa_sec_op_ctx *ctx;
925 struct rte_crypto_op *op;
927 dq = qman_dequeue(fq);
932 /* sg is embedded in an op ctx,
933 * sg[0] is for output
936 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
938 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
939 ctx->fd_status = fd->status;
941 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
942 struct qm_sg_entry *sg_out;
944 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
945 op->sym->m_src : op->sym->m_dst;
947 sg_out = &job->sg[0];
948 hw_sg_to_cpu(sg_out);
949 len = sg_out->length;
951 while (mbuf->next != NULL) {
952 len -= mbuf->data_len;
955 mbuf->data_len = len;
957 if (!ctx->fd_status) {
958 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
960 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
961 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
965 /* report op status to sym->op and then free the ctx memeory */
966 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
968 qman_dqrr_consume(fq, dq);
969 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
974 static inline struct dpaa_sec_job *
975 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
977 struct rte_crypto_sym_op *sym = op->sym;
978 struct rte_mbuf *mbuf = sym->m_src;
979 struct dpaa_sec_job *cf;
980 struct dpaa_sec_op_ctx *ctx;
981 struct qm_sg_entry *sg, *out_sg, *in_sg;
982 phys_addr_t start_addr;
983 uint8_t *old_digest, extra_segs;
984 int data_len, data_offset;
986 data_len = sym->auth.data.length;
987 data_offset = sym->auth.data.offset;
989 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
990 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
991 if ((data_len & 7) || (data_offset & 7)) {
992 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
996 data_len = data_len >> 3;
997 data_offset = data_offset >> 3;
1005 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1006 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
1010 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
1016 old_digest = ctx->digest;
1019 out_sg = &cf->sg[0];
1020 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
1021 out_sg->length = ses->digest_length;
1022 cpu_to_hw_sg(out_sg);
1026 /* need to extend the input to a compound frame */
1027 in_sg->extension = 1;
1029 in_sg->length = data_len;
1030 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
1035 if (ses->iv.length) {
1038 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1041 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1042 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1044 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1045 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1048 sg->length = ses->iv.length;
1050 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
1051 in_sg->length += sg->length;
1056 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1057 sg->offset = data_offset;
1059 if (data_len <= (mbuf->data_len - data_offset)) {
1060 sg->length = data_len;
1062 sg->length = mbuf->data_len - data_offset;
1064 /* remaining i/p segs */
1065 while ((data_len = data_len - sg->length) &&
1066 (mbuf = mbuf->next)) {
1069 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1070 if (data_len > mbuf->data_len)
1071 sg->length = mbuf->data_len;
1073 sg->length = data_len;
1077 if (is_decode(ses)) {
1078 /* Digest verification case */
1081 rte_memcpy(old_digest, sym->auth.digest.data,
1082 ses->digest_length);
1083 start_addr = dpaa_mem_vtop(old_digest);
1084 qm_sg_entry_set64(sg, start_addr);
1085 sg->length = ses->digest_length;
1086 in_sg->length += ses->digest_length;
1090 cpu_to_hw_sg(in_sg);
1096 * packet looks like:
1097 * |<----data_len------->|
1098 * |ip_header|ah_header|icv|payload|
1103 static inline struct dpaa_sec_job *
1104 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1106 struct rte_crypto_sym_op *sym = op->sym;
1107 struct rte_mbuf *mbuf = sym->m_src;
1108 struct dpaa_sec_job *cf;
1109 struct dpaa_sec_op_ctx *ctx;
1110 struct qm_sg_entry *sg, *in_sg;
1111 rte_iova_t start_addr;
1112 uint8_t *old_digest;
1113 int data_len, data_offset;
1115 data_len = sym->auth.data.length;
1116 data_offset = sym->auth.data.offset;
1118 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1119 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1120 if ((data_len & 7) || (data_offset & 7)) {
1121 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1125 data_len = data_len >> 3;
1126 data_offset = data_offset >> 3;
1129 ctx = dpaa_sec_alloc_ctx(ses, 4);
1135 old_digest = ctx->digest;
1137 start_addr = rte_pktmbuf_iova(mbuf);
1140 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1141 sg->length = ses->digest_length;
1146 /* need to extend the input to a compound frame */
1147 in_sg->extension = 1;
1149 in_sg->length = data_len;
1150 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
1153 if (ses->iv.length) {
1156 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1159 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1160 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1162 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1163 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1166 sg->length = ses->iv.length;
1168 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
1169 in_sg->length += sg->length;
1174 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1175 sg->offset = data_offset;
1176 sg->length = data_len;
1178 if (is_decode(ses)) {
1179 /* Digest verification case */
1181 /* hash result or digest, save digest first */
1182 rte_memcpy(old_digest, sym->auth.digest.data,
1183 ses->digest_length);
1184 /* let's check digest by hw */
1185 start_addr = dpaa_mem_vtop(old_digest);
1187 qm_sg_entry_set64(sg, start_addr);
1188 sg->length = ses->digest_length;
1189 in_sg->length += ses->digest_length;
1193 cpu_to_hw_sg(in_sg);
1198 static inline struct dpaa_sec_job *
1199 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1201 struct rte_crypto_sym_op *sym = op->sym;
1202 struct dpaa_sec_job *cf;
1203 struct dpaa_sec_op_ctx *ctx;
1204 struct qm_sg_entry *sg, *out_sg, *in_sg;
1205 struct rte_mbuf *mbuf;
1207 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1209 int data_len, data_offset;
1211 data_len = sym->cipher.data.length;
1212 data_offset = sym->cipher.data.offset;
1214 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1215 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1216 if ((data_len & 7) || (data_offset & 7)) {
1217 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1221 data_len = data_len >> 3;
1222 data_offset = data_offset >> 3;
1227 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1230 req_segs = mbuf->nb_segs * 2 + 3;
1232 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1233 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1238 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1246 out_sg = &cf->sg[0];
1247 out_sg->extension = 1;
1248 out_sg->length = data_len;
1249 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1250 cpu_to_hw_sg(out_sg);
1254 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255 sg->length = mbuf->data_len - data_offset;
1256 sg->offset = data_offset;
1258 /* Successive segs */
1263 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1264 sg->length = mbuf->data_len;
1273 in_sg->extension = 1;
1275 in_sg->length = data_len + ses->iv.length;
1278 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1279 cpu_to_hw_sg(in_sg);
1282 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1283 sg->length = ses->iv.length;
1288 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1289 sg->length = mbuf->data_len - data_offset;
1290 sg->offset = data_offset;
1292 /* Successive segs */
1297 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1298 sg->length = mbuf->data_len;
1307 static inline struct dpaa_sec_job *
1308 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1310 struct rte_crypto_sym_op *sym = op->sym;
1311 struct dpaa_sec_job *cf;
1312 struct dpaa_sec_op_ctx *ctx;
1313 struct qm_sg_entry *sg;
1314 rte_iova_t src_start_addr, dst_start_addr;
1315 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1317 int data_len, data_offset;
1319 data_len = sym->cipher.data.length;
1320 data_offset = sym->cipher.data.offset;
1322 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1323 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1324 if ((data_len & 7) || (data_offset & 7)) {
1325 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1329 data_len = data_len >> 3;
1330 data_offset = data_offset >> 3;
1333 ctx = dpaa_sec_alloc_ctx(ses, 4);
1340 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1343 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1345 dst_start_addr = src_start_addr;
1349 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1350 sg->length = data_len + ses->iv.length;
1356 /* need to extend the input to a compound frame */
1359 sg->length = data_len + ses->iv.length;
1360 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1364 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1365 sg->length = ses->iv.length;
1369 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1370 sg->length = data_len;
1377 static inline struct dpaa_sec_job *
1378 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1380 struct rte_crypto_sym_op *sym = op->sym;
1381 struct dpaa_sec_job *cf;
1382 struct dpaa_sec_op_ctx *ctx;
1383 struct qm_sg_entry *sg, *out_sg, *in_sg;
1384 struct rte_mbuf *mbuf;
1386 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1391 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1394 req_segs = mbuf->nb_segs * 2 + 4;
1397 if (ses->auth_only_len)
1400 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1401 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1406 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1413 rte_prefetch0(cf->sg);
1416 out_sg = &cf->sg[0];
1417 out_sg->extension = 1;
1419 out_sg->length = sym->aead.data.length + ses->digest_length;
1421 out_sg->length = sym->aead.data.length;
1423 /* output sg entries */
1425 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1426 cpu_to_hw_sg(out_sg);
1429 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1430 sg->length = mbuf->data_len - sym->aead.data.offset;
1431 sg->offset = sym->aead.data.offset;
1433 /* Successive segs */
1438 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1439 sg->length = mbuf->data_len;
1442 sg->length -= ses->digest_length;
1444 if (is_encode(ses)) {
1446 /* set auth output */
1448 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1449 sg->length = ses->digest_length;
1457 in_sg->extension = 1;
1460 in_sg->length = ses->iv.length + sym->aead.data.length
1461 + ses->auth_only_len;
1463 in_sg->length = ses->iv.length + sym->aead.data.length
1464 + ses->auth_only_len + ses->digest_length;
1466 /* input sg entries */
1468 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1469 cpu_to_hw_sg(in_sg);
1472 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1473 sg->length = ses->iv.length;
1476 /* 2nd seg auth only */
1477 if (ses->auth_only_len) {
1479 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1480 sg->length = ses->auth_only_len;
1486 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1487 sg->length = mbuf->data_len - sym->aead.data.offset;
1488 sg->offset = sym->aead.data.offset;
1490 /* Successive segs */
1495 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1496 sg->length = mbuf->data_len;
1500 if (is_decode(ses)) {
1503 memcpy(ctx->digest, sym->aead.digest.data,
1504 ses->digest_length);
1505 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1506 sg->length = ses->digest_length;
1514 static inline struct dpaa_sec_job *
1515 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1517 struct rte_crypto_sym_op *sym = op->sym;
1518 struct dpaa_sec_job *cf;
1519 struct dpaa_sec_op_ctx *ctx;
1520 struct qm_sg_entry *sg;
1521 uint32_t length = 0;
1522 rte_iova_t src_start_addr, dst_start_addr;
1523 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1526 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1529 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1531 dst_start_addr = src_start_addr;
1533 ctx = dpaa_sec_alloc_ctx(ses, 7);
1541 rte_prefetch0(cf->sg);
1543 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1544 if (is_encode(ses)) {
1545 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1546 sg->length = ses->iv.length;
1547 length += sg->length;
1551 if (ses->auth_only_len) {
1552 qm_sg_entry_set64(sg,
1553 dpaa_mem_vtop(sym->aead.aad.data));
1554 sg->length = ses->auth_only_len;
1555 length += sg->length;
1559 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1560 sg->length = sym->aead.data.length;
1561 length += sg->length;
1565 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1566 sg->length = ses->iv.length;
1567 length += sg->length;
1571 if (ses->auth_only_len) {
1572 qm_sg_entry_set64(sg,
1573 dpaa_mem_vtop(sym->aead.aad.data));
1574 sg->length = ses->auth_only_len;
1575 length += sg->length;
1579 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1580 sg->length = sym->aead.data.length;
1581 length += sg->length;
1584 memcpy(ctx->digest, sym->aead.digest.data,
1585 ses->digest_length);
1588 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1589 sg->length = ses->digest_length;
1590 length += sg->length;
1594 /* input compound frame */
1595 cf->sg[1].length = length;
1596 cf->sg[1].extension = 1;
1597 cf->sg[1].final = 1;
1598 cpu_to_hw_sg(&cf->sg[1]);
1602 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1603 qm_sg_entry_set64(sg,
1604 dst_start_addr + sym->aead.data.offset);
1605 sg->length = sym->aead.data.length;
1606 length = sg->length;
1607 if (is_encode(ses)) {
1609 /* set auth output */
1611 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1612 sg->length = ses->digest_length;
1613 length += sg->length;
1618 /* output compound frame */
1619 cf->sg[0].length = length;
1620 cf->sg[0].extension = 1;
1621 cpu_to_hw_sg(&cf->sg[0]);
1626 static inline struct dpaa_sec_job *
1627 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1629 struct rte_crypto_sym_op *sym = op->sym;
1630 struct dpaa_sec_job *cf;
1631 struct dpaa_sec_op_ctx *ctx;
1632 struct qm_sg_entry *sg, *out_sg, *in_sg;
1633 struct rte_mbuf *mbuf;
1635 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1640 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1643 req_segs = mbuf->nb_segs * 2 + 4;
1646 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1647 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1652 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1659 rte_prefetch0(cf->sg);
1662 out_sg = &cf->sg[0];
1663 out_sg->extension = 1;
1665 out_sg->length = sym->auth.data.length + ses->digest_length;
1667 out_sg->length = sym->auth.data.length;
1669 /* output sg entries */
1671 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1672 cpu_to_hw_sg(out_sg);
1675 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1676 sg->length = mbuf->data_len - sym->auth.data.offset;
1677 sg->offset = sym->auth.data.offset;
1679 /* Successive segs */
1684 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1685 sg->length = mbuf->data_len;
1688 sg->length -= ses->digest_length;
1690 if (is_encode(ses)) {
1692 /* set auth output */
1694 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1695 sg->length = ses->digest_length;
1703 in_sg->extension = 1;
1706 in_sg->length = ses->iv.length + sym->auth.data.length;
1708 in_sg->length = ses->iv.length + sym->auth.data.length
1709 + ses->digest_length;
1711 /* input sg entries */
1713 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1714 cpu_to_hw_sg(in_sg);
1717 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1718 sg->length = ses->iv.length;
1723 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1724 sg->length = mbuf->data_len - sym->auth.data.offset;
1725 sg->offset = sym->auth.data.offset;
1727 /* Successive segs */
1732 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1733 sg->length = mbuf->data_len;
1737 sg->length -= ses->digest_length;
1738 if (is_decode(ses)) {
1741 memcpy(ctx->digest, sym->auth.digest.data,
1742 ses->digest_length);
1743 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1744 sg->length = ses->digest_length;
1752 static inline struct dpaa_sec_job *
1753 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1755 struct rte_crypto_sym_op *sym = op->sym;
1756 struct dpaa_sec_job *cf;
1757 struct dpaa_sec_op_ctx *ctx;
1758 struct qm_sg_entry *sg;
1759 rte_iova_t src_start_addr, dst_start_addr;
1760 uint32_t length = 0;
1761 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1764 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1766 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1768 dst_start_addr = src_start_addr;
1770 ctx = dpaa_sec_alloc_ctx(ses, 7);
1778 rte_prefetch0(cf->sg);
1780 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1781 if (is_encode(ses)) {
1782 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1783 sg->length = ses->iv.length;
1784 length += sg->length;
1788 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1789 sg->length = sym->auth.data.length;
1790 length += sg->length;
1794 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1795 sg->length = ses->iv.length;
1796 length += sg->length;
1801 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1802 sg->length = sym->auth.data.length;
1803 length += sg->length;
1806 memcpy(ctx->digest, sym->auth.digest.data,
1807 ses->digest_length);
1810 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1811 sg->length = ses->digest_length;
1812 length += sg->length;
1816 /* input compound frame */
1817 cf->sg[1].length = length;
1818 cf->sg[1].extension = 1;
1819 cf->sg[1].final = 1;
1820 cpu_to_hw_sg(&cf->sg[1]);
1824 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1825 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1826 sg->length = sym->cipher.data.length;
1827 length = sg->length;
1828 if (is_encode(ses)) {
1830 /* set auth output */
1832 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1833 sg->length = ses->digest_length;
1834 length += sg->length;
1839 /* output compound frame */
1840 cf->sg[0].length = length;
1841 cf->sg[0].extension = 1;
1842 cpu_to_hw_sg(&cf->sg[0]);
1847 static inline struct dpaa_sec_job *
1848 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1850 struct rte_crypto_sym_op *sym = op->sym;
1851 struct dpaa_sec_job *cf;
1852 struct dpaa_sec_op_ctx *ctx;
1853 struct qm_sg_entry *sg;
1854 phys_addr_t src_start_addr, dst_start_addr;
1856 ctx = dpaa_sec_alloc_ctx(ses, 2);
1862 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1865 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1867 dst_start_addr = src_start_addr;
1871 qm_sg_entry_set64(sg, src_start_addr);
1872 sg->length = sym->m_src->pkt_len;
1876 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1879 qm_sg_entry_set64(sg, dst_start_addr);
1880 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1886 static inline struct dpaa_sec_job *
1887 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1889 struct rte_crypto_sym_op *sym = op->sym;
1890 struct dpaa_sec_job *cf;
1891 struct dpaa_sec_op_ctx *ctx;
1892 struct qm_sg_entry *sg, *out_sg, *in_sg;
1893 struct rte_mbuf *mbuf;
1895 uint32_t in_len = 0, out_len = 0;
1902 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1903 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1904 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1909 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1915 out_sg = &cf->sg[0];
1916 out_sg->extension = 1;
1917 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1921 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1924 /* Successive segs */
1925 while (mbuf->next) {
1926 sg->length = mbuf->data_len;
1927 out_len += sg->length;
1931 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1934 sg->length = mbuf->buf_len - mbuf->data_off;
1935 out_len += sg->length;
1939 out_sg->length = out_len;
1940 cpu_to_hw_sg(out_sg);
1945 in_sg->extension = 1;
1947 in_len = mbuf->data_len;
1950 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1953 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1954 sg->length = mbuf->data_len;
1957 /* Successive segs */
1962 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1963 sg->length = mbuf->data_len;
1965 in_len += sg->length;
1971 in_sg->length = in_len;
1972 cpu_to_hw_sg(in_sg);
1974 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1980 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1983 /* Function to transmit the frames to given device and queuepair */
1985 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1986 uint16_t num_tx = 0;
1987 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1988 uint32_t frames_to_send;
1989 struct rte_crypto_op *op;
1990 struct dpaa_sec_job *cf;
1991 dpaa_sec_session *ses;
1992 uint16_t auth_hdr_len, auth_tail_len;
1993 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1994 struct qman_fq *inq[DPAA_SEC_BURST];
1997 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1998 DPAA_SEC_BURST : nb_ops;
1999 for (loop = 0; loop < frames_to_send; loop++) {
2001 if (op->sym->m_src->seqn != 0) {
2002 index = op->sym->m_src->seqn - 1;
2003 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
2004 /* QM_EQCR_DCA_IDXMASK = 0x0f */
2005 flags[loop] = ((index & 0x0f) << 8);
2006 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
2007 DPAA_PER_LCORE_DQRR_SIZE--;
2008 DPAA_PER_LCORE_DQRR_HELD &=
2013 switch (op->sess_type) {
2014 case RTE_CRYPTO_OP_WITH_SESSION:
2015 ses = (dpaa_sec_session *)
2016 get_sym_session_private_data(
2018 cryptodev_driver_id);
2020 case RTE_CRYPTO_OP_SECURITY_SESSION:
2021 ses = (dpaa_sec_session *)
2022 get_sec_session_private_data(
2023 op->sym->sec_session);
2027 "sessionless crypto op not supported");
2028 frames_to_send = loop;
2032 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
2033 if (dpaa_sec_attach_sess_q(qp, ses)) {
2034 frames_to_send = loop;
2038 } else if (unlikely(ses->qp[rte_lcore_id() %
2039 MAX_DPAA_CORES] != qp)) {
2040 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
2042 ses->qp[rte_lcore_id() %
2043 MAX_DPAA_CORES], qp);
2044 frames_to_send = loop;
2049 auth_hdr_len = op->sym->auth.data.length -
2050 op->sym->cipher.data.length;
2053 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
2054 ((op->sym->m_dst == NULL) ||
2055 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
2056 if (is_proto_ipsec(ses)) {
2057 cf = build_proto(op, ses);
2058 } else if (is_proto_pdcp(ses)) {
2059 cf = build_proto(op, ses);
2060 } else if (is_auth_only(ses)) {
2061 cf = build_auth_only(op, ses);
2062 } else if (is_cipher_only(ses)) {
2063 cf = build_cipher_only(op, ses);
2064 } else if (is_aead(ses)) {
2065 cf = build_cipher_auth_gcm(op, ses);
2066 auth_hdr_len = ses->auth_only_len;
2067 } else if (is_auth_cipher(ses)) {
2069 op->sym->cipher.data.offset
2070 - op->sym->auth.data.offset;
2072 op->sym->auth.data.length
2073 - op->sym->cipher.data.length
2075 cf = build_cipher_auth(op, ses);
2077 DPAA_SEC_DP_ERR("not supported ops");
2078 frames_to_send = loop;
2083 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
2084 cf = build_proto_sg(op, ses);
2085 } else if (is_auth_only(ses)) {
2086 cf = build_auth_only_sg(op, ses);
2087 } else if (is_cipher_only(ses)) {
2088 cf = build_cipher_only_sg(op, ses);
2089 } else if (is_aead(ses)) {
2090 cf = build_cipher_auth_gcm_sg(op, ses);
2091 auth_hdr_len = ses->auth_only_len;
2092 } else if (is_auth_cipher(ses)) {
2094 op->sym->cipher.data.offset
2095 - op->sym->auth.data.offset;
2097 op->sym->auth.data.length
2098 - op->sym->cipher.data.length
2100 cf = build_cipher_auth_sg(op, ses);
2102 DPAA_SEC_DP_ERR("not supported ops");
2103 frames_to_send = loop;
2108 if (unlikely(!cf)) {
2109 frames_to_send = loop;
2115 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2116 fd->opaque_addr = 0;
2118 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
2119 fd->_format1 = qm_fd_compound;
2120 fd->length29 = 2 * sizeof(struct qm_sg_entry);
2122 /* Auth_only_len is set as 0 in descriptor and it is
2123 * overwritten here in the fd.cmd which will update
2126 if (auth_hdr_len || auth_tail_len) {
2127 fd->cmd = 0x80000000;
2129 ((auth_tail_len << 16) | auth_hdr_len);
2132 /* In case of PDCP, per packet HFN is stored in
2133 * mbuf priv after sym_op.
2135 if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
2136 fd->cmd = 0x80000000 |
2137 *((uint32_t *)((uint8_t *)op +
2138 ses->pdcp.hfn_ovd_offset));
2139 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
2140 *((uint32_t *)((uint8_t *)op +
2141 ses->pdcp.hfn_ovd_offset)),
2143 is_proto_pdcp(ses));
2149 while (loop < frames_to_send) {
2150 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2151 &flags[loop], frames_to_send - loop);
2153 nb_ops -= frames_to_send;
2154 num_tx += frames_to_send;
2157 dpaa_qp->tx_pkts += num_tx;
2158 dpaa_qp->tx_errs += nb_ops - num_tx;
2164 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2168 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2170 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2172 dpaa_qp->rx_pkts += num_rx;
2173 dpaa_qp->rx_errs += nb_ops - num_rx;
2175 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2180 /** Release queue pair */
2182 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2185 struct dpaa_sec_dev_private *internals;
2186 struct dpaa_sec_qp *qp = NULL;
2188 PMD_INIT_FUNC_TRACE();
2190 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2192 internals = dev->data->dev_private;
2193 if (qp_id >= internals->max_nb_queue_pairs) {
2194 DPAA_SEC_ERR("Max supported qpid %d",
2195 internals->max_nb_queue_pairs);
2199 qp = &internals->qps[qp_id];
2200 rte_mempool_free(qp->ctx_pool);
2201 qp->internals = NULL;
2202 dev->data->queue_pairs[qp_id] = NULL;
2207 /** Setup a queue pair */
2209 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2210 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2211 __rte_unused int socket_id)
2213 struct dpaa_sec_dev_private *internals;
2214 struct dpaa_sec_qp *qp = NULL;
2217 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2219 internals = dev->data->dev_private;
2220 if (qp_id >= internals->max_nb_queue_pairs) {
2221 DPAA_SEC_ERR("Max supported qpid %d",
2222 internals->max_nb_queue_pairs);
2226 qp = &internals->qps[qp_id];
2227 qp->internals = internals;
2228 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2229 dev->data->dev_id, qp_id);
2230 if (!qp->ctx_pool) {
2231 qp->ctx_pool = rte_mempool_create((const char *)str,
2234 CTX_POOL_CACHE_SIZE, 0,
2235 NULL, NULL, NULL, NULL,
2237 if (!qp->ctx_pool) {
2238 DPAA_SEC_ERR("%s create failed\n", str);
2242 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2243 dev->data->dev_id, qp_id);
2244 dev->data->queue_pairs[qp_id] = qp;
2249 /** Return the number of allocated queue pairs */
2251 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2253 PMD_INIT_FUNC_TRACE();
2255 return dev->data->nb_queue_pairs;
2258 /** Returns the size of session structure */
2260 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2262 PMD_INIT_FUNC_TRACE();
2264 return sizeof(dpaa_sec_session);
2268 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2269 struct rte_crypto_sym_xform *xform,
2270 dpaa_sec_session *session)
2272 session->cipher_alg = xform->cipher.algo;
2273 session->iv.length = xform->cipher.iv.length;
2274 session->iv.offset = xform->cipher.iv.offset;
2275 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2276 RTE_CACHE_LINE_SIZE);
2277 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2278 DPAA_SEC_ERR("No Memory for cipher key");
2281 session->cipher_key.length = xform->cipher.key.length;
2283 memcpy(session->cipher_key.data, xform->cipher.key.data,
2284 xform->cipher.key.length);
2285 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2292 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2293 struct rte_crypto_sym_xform *xform,
2294 dpaa_sec_session *session)
2296 session->auth_alg = xform->auth.algo;
2297 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2298 RTE_CACHE_LINE_SIZE);
2299 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2300 DPAA_SEC_ERR("No Memory for auth key");
2303 session->auth_key.length = xform->auth.key.length;
2304 session->digest_length = xform->auth.digest_length;
2305 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2306 session->iv.offset = xform->auth.iv.offset;
2307 session->iv.length = xform->auth.iv.length;
2310 memcpy(session->auth_key.data, xform->auth.key.data,
2311 xform->auth.key.length);
2312 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2319 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2320 struct rte_crypto_sym_xform *xform,
2321 dpaa_sec_session *session)
2323 session->aead_alg = xform->aead.algo;
2324 session->iv.length = xform->aead.iv.length;
2325 session->iv.offset = xform->aead.iv.offset;
2326 session->auth_only_len = xform->aead.aad_length;
2327 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2328 RTE_CACHE_LINE_SIZE);
2329 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2330 DPAA_SEC_ERR("No Memory for aead key\n");
2333 session->aead_key.length = xform->aead.key.length;
2334 session->digest_length = xform->aead.digest_length;
2336 memcpy(session->aead_key.data, xform->aead.key.data,
2337 xform->aead.key.length);
2338 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2344 static struct qman_fq *
2345 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2349 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2350 if (qi->inq_attach[i] == 0) {
2351 qi->inq_attach[i] = 1;
2355 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2361 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2365 for (i = 0; i < qi->max_nb_sessions; i++) {
2366 if (&qi->inq[i] == fq) {
2367 qman_retire_fq(fq, NULL);
2369 qi->inq_attach[i] = 0;
2377 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2381 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2382 ret = dpaa_sec_prep_cdb(sess);
2384 DPAA_SEC_ERR("Unable to prepare sec cdb");
2387 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2388 ret = rte_dpaa_portal_init((void *)0);
2390 DPAA_SEC_ERR("Failure in affining portal");
2394 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2395 dpaa_mem_vtop(&sess->cdb),
2396 qman_fq_fqid(&qp->outq));
2398 DPAA_SEC_ERR("Unable to init sec queue");
2404 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2405 struct rte_crypto_sym_xform *xform, void *sess)
2407 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2408 dpaa_sec_session *session = sess;
2411 PMD_INIT_FUNC_TRACE();
2413 if (unlikely(sess == NULL)) {
2414 DPAA_SEC_ERR("invalid session struct");
2417 memset(session, 0, sizeof(dpaa_sec_session));
2419 /* Default IV length = 0 */
2420 session->iv.length = 0;
2423 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2424 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2425 dpaa_sec_cipher_init(dev, xform, session);
2427 /* Authentication Only */
2428 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2429 xform->next == NULL) {
2430 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2431 dpaa_sec_auth_init(dev, xform, session);
2433 /* Cipher then Authenticate */
2434 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2435 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2436 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2437 dpaa_sec_cipher_init(dev, xform, session);
2438 dpaa_sec_auth_init(dev, xform->next, session);
2440 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2444 /* Authenticate then Cipher */
2445 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2446 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2447 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2448 dpaa_sec_auth_init(dev, xform, session);
2449 dpaa_sec_cipher_init(dev, xform->next, session);
2451 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2455 /* AEAD operation for AES-GCM kind of Algorithms */
2456 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2457 xform->next == NULL) {
2458 dpaa_sec_aead_init(dev, xform, session);
2461 DPAA_SEC_ERR("Invalid crypto type");
2464 rte_spinlock_lock(&internals->lock);
2465 for (i = 0; i < MAX_DPAA_CORES; i++) {
2466 session->inq[i] = dpaa_sec_attach_rxq(internals);
2467 if (session->inq[i] == NULL) {
2468 DPAA_SEC_ERR("unable to attach sec queue");
2469 rte_spinlock_unlock(&internals->lock);
2473 rte_spinlock_unlock(&internals->lock);
2478 rte_free(session->cipher_key.data);
2479 rte_free(session->auth_key.data);
2480 memset(session, 0, sizeof(dpaa_sec_session));
2486 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2487 struct rte_crypto_sym_xform *xform,
2488 struct rte_cryptodev_sym_session *sess,
2489 struct rte_mempool *mempool)
2491 void *sess_private_data;
2494 PMD_INIT_FUNC_TRACE();
2496 if (rte_mempool_get(mempool, &sess_private_data)) {
2497 DPAA_SEC_ERR("Couldn't get object from session mempool");
2501 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2503 DPAA_SEC_ERR("failed to configure session parameters");
2505 /* Return session to mempool */
2506 rte_mempool_put(mempool, sess_private_data);
2510 set_sym_session_private_data(sess, dev->driver_id,
2518 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2520 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2521 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2524 for (i = 0; i < MAX_DPAA_CORES; i++) {
2526 dpaa_sec_detach_rxq(qi, s->inq[i]);
2530 rte_free(s->cipher_key.data);
2531 rte_free(s->auth_key.data);
2532 memset(s, 0, sizeof(dpaa_sec_session));
2533 rte_mempool_put(sess_mp, (void *)s);
2536 /** Clear the memory of session so it doesn't leave key material behind */
2538 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2539 struct rte_cryptodev_sym_session *sess)
2541 PMD_INIT_FUNC_TRACE();
2542 uint8_t index = dev->driver_id;
2543 void *sess_priv = get_sym_session_private_data(sess, index);
2544 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2547 free_session_memory(dev, s);
2548 set_sym_session_private_data(sess, index, NULL);
2553 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2554 struct rte_security_session_conf *conf,
2557 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2558 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2559 struct rte_crypto_auth_xform *auth_xform = NULL;
2560 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2561 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2564 PMD_INIT_FUNC_TRACE();
2566 memset(session, 0, sizeof(dpaa_sec_session));
2567 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2568 cipher_xform = &conf->crypto_xform->cipher;
2569 if (conf->crypto_xform->next)
2570 auth_xform = &conf->crypto_xform->next->auth;
2572 auth_xform = &conf->crypto_xform->auth;
2573 if (conf->crypto_xform->next)
2574 cipher_xform = &conf->crypto_xform->next->cipher;
2576 session->proto_alg = conf->protocol;
2578 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2579 session->cipher_key.data = rte_zmalloc(NULL,
2580 cipher_xform->key.length,
2581 RTE_CACHE_LINE_SIZE);
2582 if (session->cipher_key.data == NULL &&
2583 cipher_xform->key.length > 0) {
2584 DPAA_SEC_ERR("No Memory for cipher key");
2587 memcpy(session->cipher_key.data, cipher_xform->key.data,
2588 cipher_xform->key.length);
2589 session->cipher_key.length = cipher_xform->key.length;
2591 switch (cipher_xform->algo) {
2592 case RTE_CRYPTO_CIPHER_AES_CBC:
2593 case RTE_CRYPTO_CIPHER_3DES_CBC:
2594 case RTE_CRYPTO_CIPHER_AES_CTR:
2597 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2598 cipher_xform->algo);
2601 session->cipher_alg = cipher_xform->algo;
2603 session->cipher_key.data = NULL;
2604 session->cipher_key.length = 0;
2605 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2608 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2609 session->auth_key.data = rte_zmalloc(NULL,
2610 auth_xform->key.length,
2611 RTE_CACHE_LINE_SIZE);
2612 if (session->auth_key.data == NULL &&
2613 auth_xform->key.length > 0) {
2614 DPAA_SEC_ERR("No Memory for auth key");
2615 rte_free(session->cipher_key.data);
2618 memcpy(session->auth_key.data, auth_xform->key.data,
2619 auth_xform->key.length);
2620 session->auth_key.length = auth_xform->key.length;
2622 switch (auth_xform->algo) {
2623 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2624 case RTE_CRYPTO_AUTH_MD5_HMAC:
2625 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2626 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2627 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2628 case RTE_CRYPTO_AUTH_AES_CMAC:
2631 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2635 session->auth_alg = auth_xform->algo;
2637 session->auth_key.data = NULL;
2638 session->auth_key.length = 0;
2639 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2642 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2643 if (ipsec_xform->tunnel.type ==
2644 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2645 memset(&session->encap_pdb, 0,
2646 sizeof(struct ipsec_encap_pdb) +
2647 sizeof(session->ip4_hdr));
2648 session->ip4_hdr.ip_v = IPVERSION;
2649 session->ip4_hdr.ip_hl = 5;
2650 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2651 sizeof(session->ip4_hdr));
2652 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2653 session->ip4_hdr.ip_id = 0;
2654 session->ip4_hdr.ip_off = 0;
2655 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2656 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2657 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2658 IPPROTO_ESP : IPPROTO_AH;
2659 session->ip4_hdr.ip_sum = 0;
2660 session->ip4_hdr.ip_src =
2661 ipsec_xform->tunnel.ipv4.src_ip;
2662 session->ip4_hdr.ip_dst =
2663 ipsec_xform->tunnel.ipv4.dst_ip;
2664 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2665 (void *)&session->ip4_hdr,
2667 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2668 } else if (ipsec_xform->tunnel.type ==
2669 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2670 memset(&session->encap_pdb, 0,
2671 sizeof(struct ipsec_encap_pdb) +
2672 sizeof(session->ip6_hdr));
2673 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2674 DPAA_IPv6_DEFAULT_VTC_FLOW |
2675 ((ipsec_xform->tunnel.ipv6.dscp <<
2676 RTE_IPV6_HDR_TC_SHIFT) &
2677 RTE_IPV6_HDR_TC_MASK) |
2678 ((ipsec_xform->tunnel.ipv6.flabel <<
2679 RTE_IPV6_HDR_FL_SHIFT) &
2680 RTE_IPV6_HDR_FL_MASK));
2681 /* Payload length will be updated by HW */
2682 session->ip6_hdr.payload_len = 0;
2683 session->ip6_hdr.hop_limits =
2684 ipsec_xform->tunnel.ipv6.hlimit;
2685 session->ip6_hdr.proto = (ipsec_xform->proto ==
2686 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2687 IPPROTO_ESP : IPPROTO_AH;
2688 memcpy(&session->ip6_hdr.src_addr,
2689 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2690 memcpy(&session->ip6_hdr.dst_addr,
2691 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2692 session->encap_pdb.ip_hdr_len =
2693 sizeof(struct rte_ipv6_hdr);
2695 session->encap_pdb.options =
2696 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2697 PDBOPTS_ESP_OIHI_PDB_INL |
2699 PDBHMO_ESP_ENCAP_DTTL |
2701 if (ipsec_xform->options.esn)
2702 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2703 session->encap_pdb.spi = ipsec_xform->spi;
2704 session->dir = DIR_ENC;
2705 } else if (ipsec_xform->direction ==
2706 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2707 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2708 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2709 session->decap_pdb.options = sizeof(struct ip) << 16;
2711 session->decap_pdb.options =
2712 sizeof(struct rte_ipv6_hdr) << 16;
2713 if (ipsec_xform->options.esn)
2714 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2715 session->dir = DIR_DEC;
2718 rte_spinlock_lock(&internals->lock);
2719 for (i = 0; i < MAX_DPAA_CORES; i++) {
2720 session->inq[i] = dpaa_sec_attach_rxq(internals);
2721 if (session->inq[i] == NULL) {
2722 DPAA_SEC_ERR("unable to attach sec queue");
2723 rte_spinlock_unlock(&internals->lock);
2727 rte_spinlock_unlock(&internals->lock);
2731 rte_free(session->auth_key.data);
2732 rte_free(session->cipher_key.data);
2733 memset(session, 0, sizeof(dpaa_sec_session));
2738 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2739 struct rte_security_session_conf *conf,
2742 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2743 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2744 struct rte_crypto_auth_xform *auth_xform = NULL;
2745 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2746 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2747 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2750 PMD_INIT_FUNC_TRACE();
2752 memset(session, 0, sizeof(dpaa_sec_session));
2754 /* find xfrm types */
2755 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2756 cipher_xform = &xform->cipher;
2757 if (xform->next != NULL)
2758 auth_xform = &xform->next->auth;
2759 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2760 auth_xform = &xform->auth;
2761 if (xform->next != NULL)
2762 cipher_xform = &xform->next->cipher;
2764 DPAA_SEC_ERR("Invalid crypto type");
2768 session->proto_alg = conf->protocol;
2770 session->cipher_key.data = rte_zmalloc(NULL,
2771 cipher_xform->key.length,
2772 RTE_CACHE_LINE_SIZE);
2773 if (session->cipher_key.data == NULL &&
2774 cipher_xform->key.length > 0) {
2775 DPAA_SEC_ERR("No Memory for cipher key");
2778 session->cipher_key.length = cipher_xform->key.length;
2779 memcpy(session->cipher_key.data, cipher_xform->key.data,
2780 cipher_xform->key.length);
2781 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2783 session->cipher_alg = cipher_xform->algo;
2785 session->cipher_key.data = NULL;
2786 session->cipher_key.length = 0;
2787 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2788 session->dir = DIR_ENC;
2791 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2792 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2793 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2795 "PDCP Seq Num size should be 5/12 bits for cmode");
2801 session->auth_key.data = rte_zmalloc(NULL,
2802 auth_xform->key.length,
2803 RTE_CACHE_LINE_SIZE);
2804 if (!session->auth_key.data &&
2805 auth_xform->key.length > 0) {
2806 DPAA_SEC_ERR("No Memory for auth key");
2807 rte_free(session->cipher_key.data);
2810 session->auth_key.length = auth_xform->key.length;
2811 memcpy(session->auth_key.data, auth_xform->key.data,
2812 auth_xform->key.length);
2813 session->auth_alg = auth_xform->algo;
2815 session->auth_key.data = NULL;
2816 session->auth_key.length = 0;
2817 session->auth_alg = 0;
2819 session->pdcp.domain = pdcp_xform->domain;
2820 session->pdcp.bearer = pdcp_xform->bearer;
2821 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2822 session->pdcp.sn_size = pdcp_xform->sn_size;
2823 session->pdcp.hfn = pdcp_xform->hfn;
2824 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2825 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2826 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2828 rte_spinlock_lock(&dev_priv->lock);
2829 for (i = 0; i < MAX_DPAA_CORES; i++) {
2830 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2831 if (session->inq[i] == NULL) {
2832 DPAA_SEC_ERR("unable to attach sec queue");
2833 rte_spinlock_unlock(&dev_priv->lock);
2837 rte_spinlock_unlock(&dev_priv->lock);
2840 rte_free(session->auth_key.data);
2841 rte_free(session->cipher_key.data);
2842 memset(session, 0, sizeof(dpaa_sec_session));
2847 dpaa_sec_security_session_create(void *dev,
2848 struct rte_security_session_conf *conf,
2849 struct rte_security_session *sess,
2850 struct rte_mempool *mempool)
2852 void *sess_private_data;
2853 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2856 if (rte_mempool_get(mempool, &sess_private_data)) {
2857 DPAA_SEC_ERR("Couldn't get object from session mempool");
2861 switch (conf->protocol) {
2862 case RTE_SECURITY_PROTOCOL_IPSEC:
2863 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2866 case RTE_SECURITY_PROTOCOL_PDCP:
2867 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2870 case RTE_SECURITY_PROTOCOL_MACSEC:
2876 DPAA_SEC_ERR("failed to configure session parameters");
2877 /* Return session to mempool */
2878 rte_mempool_put(mempool, sess_private_data);
2882 set_sec_session_private_data(sess, sess_private_data);
2887 /** Clear the memory of session so it doesn't leave key material behind */
2889 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2890 struct rte_security_session *sess)
2892 PMD_INIT_FUNC_TRACE();
2893 void *sess_priv = get_sec_session_private_data(sess);
2894 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2897 free_session_memory((struct rte_cryptodev *)dev, s);
2898 set_sec_session_private_data(sess, NULL);
2904 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2905 struct rte_cryptodev_config *config __rte_unused)
2907 PMD_INIT_FUNC_TRACE();
2913 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2915 PMD_INIT_FUNC_TRACE();
2920 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2922 PMD_INIT_FUNC_TRACE();
2926 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2928 PMD_INIT_FUNC_TRACE();
2937 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2938 struct rte_cryptodev_info *info)
2940 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2942 PMD_INIT_FUNC_TRACE();
2944 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2945 info->feature_flags = dev->feature_flags;
2946 info->capabilities = dpaa_sec_capabilities;
2947 info->sym.max_nb_sessions = internals->max_nb_sessions;
2948 info->driver_id = cryptodev_driver_id;
2952 static enum qman_cb_dqrr_result
2953 dpaa_sec_process_parallel_event(void *event,
2954 struct qman_portal *qm __always_unused,
2955 struct qman_fq *outq,
2956 const struct qm_dqrr_entry *dqrr,
2959 const struct qm_fd *fd;
2960 struct dpaa_sec_job *job;
2961 struct dpaa_sec_op_ctx *ctx;
2962 struct rte_event *ev = (struct rte_event *)event;
2966 /* sg is embedded in an op ctx,
2967 * sg[0] is for output
2970 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2972 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2973 ctx->fd_status = fd->status;
2974 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2975 struct qm_sg_entry *sg_out;
2978 sg_out = &job->sg[0];
2979 hw_sg_to_cpu(sg_out);
2980 len = sg_out->length;
2981 ctx->op->sym->m_src->pkt_len = len;
2982 ctx->op->sym->m_src->data_len = len;
2984 if (!ctx->fd_status) {
2985 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2987 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2988 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2990 ev->event_ptr = (void *)ctx->op;
2992 ev->flow_id = outq->ev.flow_id;
2993 ev->sub_event_type = outq->ev.sub_event_type;
2994 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2995 ev->op = RTE_EVENT_OP_NEW;
2996 ev->sched_type = outq->ev.sched_type;
2997 ev->queue_id = outq->ev.queue_id;
2998 ev->priority = outq->ev.priority;
2999 *bufs = (void *)ctx->op;
3001 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3003 return qman_cb_dqrr_consume;
3006 static enum qman_cb_dqrr_result
3007 dpaa_sec_process_atomic_event(void *event,
3008 struct qman_portal *qm __rte_unused,
3009 struct qman_fq *outq,
3010 const struct qm_dqrr_entry *dqrr,
3014 const struct qm_fd *fd;
3015 struct dpaa_sec_job *job;
3016 struct dpaa_sec_op_ctx *ctx;
3017 struct rte_event *ev = (struct rte_event *)event;
3021 /* sg is embedded in an op ctx,
3022 * sg[0] is for output
3025 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3027 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3028 ctx->fd_status = fd->status;
3029 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3030 struct qm_sg_entry *sg_out;
3033 sg_out = &job->sg[0];
3034 hw_sg_to_cpu(sg_out);
3035 len = sg_out->length;
3036 ctx->op->sym->m_src->pkt_len = len;
3037 ctx->op->sym->m_src->data_len = len;
3039 if (!ctx->fd_status) {
3040 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3042 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3043 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3045 ev->event_ptr = (void *)ctx->op;
3046 ev->flow_id = outq->ev.flow_id;
3047 ev->sub_event_type = outq->ev.sub_event_type;
3048 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3049 ev->op = RTE_EVENT_OP_NEW;
3050 ev->sched_type = outq->ev.sched_type;
3051 ev->queue_id = outq->ev.queue_id;
3052 ev->priority = outq->ev.priority;
3054 /* Save active dqrr entries */
3055 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3056 DPAA_PER_LCORE_DQRR_SIZE++;
3057 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3058 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3059 ev->impl_opaque = index + 1;
3060 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3061 *bufs = (void *)ctx->op;
3063 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3065 return qman_cb_dqrr_defer;
3069 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3072 const struct rte_event *event)
3074 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3075 struct qm_mcc_initfq opts = {0};
3079 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3080 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3081 opts.fqd.dest.channel = ch_id;
3083 switch (event->sched_type) {
3084 case RTE_SCHED_TYPE_ATOMIC:
3085 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3086 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3087 * configuration with HOLD_ACTIVE setting
3089 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3090 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3092 case RTE_SCHED_TYPE_ORDERED:
3093 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3096 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3097 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3101 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3102 if (unlikely(ret)) {
3103 DPAA_SEC_ERR("unable to init caam source fq!");
3107 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3113 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3116 struct qm_mcc_initfq opts = {0};
3118 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3120 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3121 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3122 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3123 qp->outq.cb.ern = ern_sec_fq_handler;
3124 qman_retire_fq(&qp->outq, NULL);
3125 qman_oos_fq(&qp->outq);
3126 ret = qman_init_fq(&qp->outq, 0, &opts);
3128 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3129 qp->outq.cb.dqrr = NULL;
3134 static struct rte_cryptodev_ops crypto_ops = {
3135 .dev_configure = dpaa_sec_dev_configure,
3136 .dev_start = dpaa_sec_dev_start,
3137 .dev_stop = dpaa_sec_dev_stop,
3138 .dev_close = dpaa_sec_dev_close,
3139 .dev_infos_get = dpaa_sec_dev_infos_get,
3140 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3141 .queue_pair_release = dpaa_sec_queue_pair_release,
3142 .queue_pair_count = dpaa_sec_queue_pair_count,
3143 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3144 .sym_session_configure = dpaa_sec_sym_session_configure,
3145 .sym_session_clear = dpaa_sec_sym_session_clear
3148 static const struct rte_security_capability *
3149 dpaa_sec_capabilities_get(void *device __rte_unused)
3151 return dpaa_sec_security_cap;
3154 static const struct rte_security_ops dpaa_sec_security_ops = {
3155 .session_create = dpaa_sec_security_session_create,
3156 .session_update = NULL,
3157 .session_stats_get = NULL,
3158 .session_destroy = dpaa_sec_security_session_destroy,
3159 .set_pkt_metadata = NULL,
3160 .capabilities_get = dpaa_sec_capabilities_get
3164 dpaa_sec_uninit(struct rte_cryptodev *dev)
3166 struct dpaa_sec_dev_private *internals;
3171 internals = dev->data->dev_private;
3172 rte_free(dev->security_ctx);
3174 rte_free(internals);
3176 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3177 dev->data->name, rte_socket_id());
3183 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3185 struct dpaa_sec_dev_private *internals;
3186 struct rte_security_ctx *security_instance;
3187 struct dpaa_sec_qp *qp;
3191 PMD_INIT_FUNC_TRACE();
3193 cryptodev->driver_id = cryptodev_driver_id;
3194 cryptodev->dev_ops = &crypto_ops;
3196 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3197 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3198 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3199 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3200 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3201 RTE_CRYPTODEV_FF_SECURITY |
3202 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3203 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3204 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3205 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3206 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3208 internals = cryptodev->data->dev_private;
3209 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3210 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3213 * For secondary processes, we don't initialise any further as primary
3214 * has already done this work. Only check we don't need a different
3217 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3218 DPAA_SEC_WARN("Device already init by primary process");
3222 /* Initialize security_ctx only for primary process*/
3223 security_instance = rte_malloc("rte_security_instances_ops",
3224 sizeof(struct rte_security_ctx), 0);
3225 if (security_instance == NULL)
3227 security_instance->device = (void *)cryptodev;
3228 security_instance->ops = &dpaa_sec_security_ops;
3229 security_instance->sess_cnt = 0;
3230 cryptodev->security_ctx = security_instance;
3232 rte_spinlock_init(&internals->lock);
3233 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3234 /* init qman fq for queue pair */
3235 qp = &internals->qps[i];
3236 ret = dpaa_sec_init_tx(&qp->outq);
3238 DPAA_SEC_ERR("config tx of queue pair %d", i);
3243 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3244 QMAN_FQ_FLAG_TO_DCPORTAL;
3245 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3246 /* create rx qman fq for sessions*/
3247 ret = qman_create_fq(0, flags, &internals->inq[i]);
3248 if (unlikely(ret != 0)) {
3249 DPAA_SEC_ERR("sec qman_create_fq failed");
3254 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3258 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3260 dpaa_sec_uninit(cryptodev);
3265 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3266 struct rte_dpaa_device *dpaa_dev)
3268 struct rte_cryptodev *cryptodev;
3269 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3273 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3275 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3276 if (cryptodev == NULL)
3279 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3280 cryptodev->data->dev_private = rte_zmalloc_socket(
3281 "cryptodev private structure",
3282 sizeof(struct dpaa_sec_dev_private),
3283 RTE_CACHE_LINE_SIZE,
3286 if (cryptodev->data->dev_private == NULL)
3287 rte_panic("Cannot allocate memzone for private "
3291 dpaa_dev->crypto_dev = cryptodev;
3292 cryptodev->device = &dpaa_dev->device;
3294 /* init user callbacks */
3295 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3297 /* if sec device version is not configured */
3298 if (!rta_get_sec_era()) {
3299 const struct device_node *caam_node;
3301 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3302 const uint32_t *prop = of_get_property(caam_node,
3307 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3313 /* Invoke PMD device initialization function */
3314 retval = dpaa_sec_dev_init(cryptodev);
3318 /* In case of error, cleanup is done */
3319 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3320 rte_free(cryptodev->data->dev_private);
3322 rte_cryptodev_pmd_release_device(cryptodev);
3328 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3330 struct rte_cryptodev *cryptodev;
3333 cryptodev = dpaa_dev->crypto_dev;
3334 if (cryptodev == NULL)
3337 ret = dpaa_sec_uninit(cryptodev);
3341 return rte_cryptodev_pmd_destroy(cryptodev);
3344 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3345 .drv_type = FSL_DPAA_CRYPTO,
3347 .name = "DPAA SEC PMD"
3349 .probe = cryptodev_dpaa_sec_probe,
3350 .remove = cryptodev_dpaa_sec_remove,
3353 static struct cryptodev_driver dpaa_sec_crypto_drv;
3355 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3356 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3357 cryptodev_driver_id);
3359 RTE_INIT(dpaa_sec_init_log)
3361 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3362 if (dpaa_logtype_sec >= 0)
3363 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);