1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
43 enum rta_sec_era rta_sec_era;
47 static uint8_t cryptodev_driver_id;
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 if (!ctx->fd_status) {
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
65 /* report op status to sym->op and then free the ctx memory */
66 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
88 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
89 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
91 ctx->ctx_pool = ses->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx
93 - rte_mempool_virt2iova(ctx);
98 static inline rte_iova_t
99 dpaa_mem_vtop(void *vaddr)
101 const struct rte_memseg *ms;
103 ms = rte_mem_virt2memseg(vaddr, NULL);
105 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
106 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
112 dpaa_mem_ptov(rte_iova_t paddr)
116 va = (void *)dpaax_iova_table_get_va(paddr);
120 return rte_mem_iova2virt(paddr);
124 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
126 const struct qm_mr_entry *msg)
128 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
129 fq->fqid, msg->ern.rc, msg->ern.seqnum);
132 /* initialize the queue with dest chan as caam chan so that
133 * all the packets in this queue could be dispatched into caam
136 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
139 struct qm_mcc_initfq fq_opts;
143 /* Clear FQ options */
144 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
146 flags = QMAN_INITFQ_FLAG_SCHED;
147 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
148 QM_INITFQ_WE_CONTEXTB;
150 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
151 fq_opts.fqd.context_b = fqid_out;
152 fq_opts.fqd.dest.channel = qm_channel_caam;
153 fq_opts.fqd.dest.wq = 0;
155 fq_in->cb.ern = ern_sec_fq_handler;
157 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
159 ret = qman_init_fq(fq_in, flags, &fq_opts);
160 if (unlikely(ret != 0))
161 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
166 /* something is put into in_fq and caam put the crypto result into out_fq */
167 static enum qman_cb_dqrr_result
168 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
169 struct qman_fq *fq __always_unused,
170 const struct qm_dqrr_entry *dqrr)
172 const struct qm_fd *fd;
173 struct dpaa_sec_job *job;
174 struct dpaa_sec_op_ctx *ctx;
176 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
177 return qman_cb_dqrr_defer;
179 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
180 return qman_cb_dqrr_consume;
183 /* sg is embedded in an op ctx,
184 * sg[0] is for output
187 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
189 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
190 ctx->fd_status = fd->status;
191 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
192 struct qm_sg_entry *sg_out;
195 sg_out = &job->sg[0];
196 hw_sg_to_cpu(sg_out);
197 len = sg_out->length;
198 ctx->op->sym->m_src->pkt_len = len;
199 ctx->op->sym->m_src->data_len = len;
201 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
202 dpaa_sec_op_ending(ctx);
204 return qman_cb_dqrr_consume;
207 /* caam result is put into this queue */
209 dpaa_sec_init_tx(struct qman_fq *fq)
212 struct qm_mcc_initfq opts;
215 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
216 QMAN_FQ_FLAG_DYNAMIC_FQID;
218 ret = qman_create_fq(0, flags, fq);
220 DPAA_SEC_ERR("qman_create_fq failed");
224 memset(&opts, 0, sizeof(opts));
225 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
226 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
228 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
230 fq->cb.dqrr = dqrr_out_fq_cb_rx;
231 fq->cb.ern = ern_sec_fq_handler;
233 ret = qman_init_fq(fq, 0, &opts);
235 DPAA_SEC_ERR("unable to init caam source fq!");
242 static inline int is_cipher_only(dpaa_sec_session *ses)
244 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
245 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
248 static inline int is_auth_only(dpaa_sec_session *ses)
250 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
251 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
254 static inline int is_aead(dpaa_sec_session *ses)
256 return ((ses->cipher_alg == 0) &&
257 (ses->auth_alg == 0) &&
258 (ses->aead_alg != 0));
261 static inline int is_auth_cipher(dpaa_sec_session *ses)
263 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
264 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
265 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
268 static inline int is_proto_ipsec(dpaa_sec_session *ses)
270 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
273 static inline int is_proto_pdcp(dpaa_sec_session *ses)
275 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
278 static inline int is_encode(dpaa_sec_session *ses)
280 return ses->dir == DIR_ENC;
283 static inline int is_decode(dpaa_sec_session *ses)
285 return ses->dir == DIR_DEC;
289 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
291 switch (ses->auth_alg) {
292 case RTE_CRYPTO_AUTH_NULL:
294 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
295 OP_PCL_IPSEC_HMAC_NULL : 0;
296 ses->digest_length = 0;
298 case RTE_CRYPTO_AUTH_MD5_HMAC:
300 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
302 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304 case RTE_CRYPTO_AUTH_SHA1_HMAC:
306 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
308 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310 case RTE_CRYPTO_AUTH_SHA224_HMAC:
312 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
314 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 case RTE_CRYPTO_AUTH_SHA256_HMAC:
318 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
320 alginfo_a->algmode = OP_ALG_AAI_HMAC;
322 case RTE_CRYPTO_AUTH_SHA384_HMAC:
324 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
325 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
326 alginfo_a->algmode = OP_ALG_AAI_HMAC;
328 case RTE_CRYPTO_AUTH_SHA512_HMAC:
330 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
332 alginfo_a->algmode = OP_ALG_AAI_HMAC;
335 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
340 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
342 switch (ses->cipher_alg) {
343 case RTE_CRYPTO_CIPHER_NULL:
345 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346 OP_PCL_IPSEC_NULL : 0;
348 case RTE_CRYPTO_CIPHER_AES_CBC:
350 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
351 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
352 alginfo_c->algmode = OP_ALG_AAI_CBC;
354 case RTE_CRYPTO_CIPHER_3DES_CBC:
356 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
357 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
358 alginfo_c->algmode = OP_ALG_AAI_CBC;
360 case RTE_CRYPTO_CIPHER_AES_CTR:
362 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
363 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
364 alginfo_c->algmode = OP_ALG_AAI_CTR;
367 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
372 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
374 switch (ses->aead_alg) {
375 case RTE_CRYPTO_AEAD_AES_GCM:
376 alginfo->algtype = OP_ALG_ALGSEL_AES;
377 alginfo->algmode = OP_ALG_AAI_GCM;
380 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
385 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
387 struct alginfo authdata = {0}, cipherdata = {0};
388 struct sec_cdb *cdb = &ses->cdb;
389 struct alginfo *p_authdata = NULL;
390 int32_t shared_desc_len = 0;
392 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
398 switch (ses->cipher_alg) {
399 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
400 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
402 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
403 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
405 case RTE_CRYPTO_CIPHER_AES_CTR:
406 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
408 case RTE_CRYPTO_CIPHER_NULL:
409 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
412 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
417 cipherdata.key = (size_t)ses->cipher_key.data;
418 cipherdata.keylen = ses->cipher_key.length;
419 cipherdata.key_enc_flags = 0;
420 cipherdata.key_type = RTA_DATA_IMM;
422 cdb->sh_desc[0] = cipherdata.keylen;
427 switch (ses->auth_alg) {
428 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
429 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
431 case RTE_CRYPTO_AUTH_ZUC_EIA3:
432 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
434 case RTE_CRYPTO_AUTH_AES_CMAC:
435 authdata.algtype = PDCP_AUTH_TYPE_AES;
437 case RTE_CRYPTO_AUTH_NULL:
438 authdata.algtype = PDCP_AUTH_TYPE_NULL;
441 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
446 authdata.key = (size_t)ses->auth_key.data;
447 authdata.keylen = ses->auth_key.length;
448 authdata.key_enc_flags = 0;
449 authdata.key_type = RTA_DATA_IMM;
451 p_authdata = &authdata;
453 cdb->sh_desc[1] = authdata.keylen;
456 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
458 (unsigned int *)cdb->sh_desc,
459 &cdb->sh_desc[2], 2);
461 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
465 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
467 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
468 cipherdata.key_type = RTA_DATA_PTR;
470 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
472 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
473 authdata.key_type = RTA_DATA_PTR;
480 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
481 if (ses->dir == DIR_ENC)
482 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
483 cdb->sh_desc, 1, swap,
488 ses->pdcp.hfn_threshold,
489 &cipherdata, &authdata,
491 else if (ses->dir == DIR_DEC)
492 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
493 cdb->sh_desc, 1, swap,
498 ses->pdcp.hfn_threshold,
499 &cipherdata, &authdata,
502 if (ses->dir == DIR_ENC)
503 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
504 cdb->sh_desc, 1, swap,
509 ses->pdcp.hfn_threshold,
510 &cipherdata, p_authdata, 0);
511 else if (ses->dir == DIR_DEC)
512 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
513 cdb->sh_desc, 1, swap,
518 ses->pdcp.hfn_threshold,
519 &cipherdata, p_authdata, 0);
522 return shared_desc_len;
525 /* prepare ipsec proto command block of the session */
527 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
529 struct alginfo cipherdata = {0}, authdata = {0};
530 struct sec_cdb *cdb = &ses->cdb;
531 int32_t shared_desc_len = 0;
533 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
539 caam_cipher_alg(ses, &cipherdata);
540 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
541 DPAA_SEC_ERR("not supported cipher alg");
545 cipherdata.key = (size_t)ses->cipher_key.data;
546 cipherdata.keylen = ses->cipher_key.length;
547 cipherdata.key_enc_flags = 0;
548 cipherdata.key_type = RTA_DATA_IMM;
550 caam_auth_alg(ses, &authdata);
551 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
552 DPAA_SEC_ERR("not supported auth alg");
556 authdata.key = (size_t)ses->auth_key.data;
557 authdata.keylen = ses->auth_key.length;
558 authdata.key_enc_flags = 0;
559 authdata.key_type = RTA_DATA_IMM;
561 cdb->sh_desc[0] = cipherdata.keylen;
562 cdb->sh_desc[1] = authdata.keylen;
563 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
565 (unsigned int *)cdb->sh_desc,
566 &cdb->sh_desc[2], 2);
569 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
572 if (cdb->sh_desc[2] & 1)
573 cipherdata.key_type = RTA_DATA_IMM;
575 cipherdata.key = (size_t)dpaa_mem_vtop(
576 (void *)(size_t)cipherdata.key);
577 cipherdata.key_type = RTA_DATA_PTR;
579 if (cdb->sh_desc[2] & (1<<1))
580 authdata.key_type = RTA_DATA_IMM;
582 authdata.key = (size_t)dpaa_mem_vtop(
583 (void *)(size_t)authdata.key);
584 authdata.key_type = RTA_DATA_PTR;
590 if (ses->dir == DIR_ENC) {
591 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
593 true, swap, SHR_SERIAL,
595 (uint8_t *)&ses->ip4_hdr,
596 &cipherdata, &authdata);
597 } else if (ses->dir == DIR_DEC) {
598 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
600 true, swap, SHR_SERIAL,
602 &cipherdata, &authdata);
604 return shared_desc_len;
607 /* prepare command block of the session */
609 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
611 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
612 int32_t shared_desc_len = 0;
613 struct sec_cdb *cdb = &ses->cdb;
615 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
621 memset(cdb, 0, sizeof(struct sec_cdb));
623 if (is_proto_ipsec(ses)) {
624 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
625 } else if (is_proto_pdcp(ses)) {
626 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
627 } else if (is_cipher_only(ses)) {
628 caam_cipher_alg(ses, &alginfo_c);
629 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
630 DPAA_SEC_ERR("not supported cipher alg");
634 alginfo_c.key = (size_t)ses->cipher_key.data;
635 alginfo_c.keylen = ses->cipher_key.length;
636 alginfo_c.key_enc_flags = 0;
637 alginfo_c.key_type = RTA_DATA_IMM;
639 shared_desc_len = cnstr_shdsc_blkcipher(
641 swap, SHR_NEVER, &alginfo_c,
645 } else if (is_auth_only(ses)) {
646 caam_auth_alg(ses, &alginfo_a);
647 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
648 DPAA_SEC_ERR("not supported auth alg");
652 alginfo_a.key = (size_t)ses->auth_key.data;
653 alginfo_a.keylen = ses->auth_key.length;
654 alginfo_a.key_enc_flags = 0;
655 alginfo_a.key_type = RTA_DATA_IMM;
657 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
658 swap, SHR_NEVER, &alginfo_a,
661 } else if (is_aead(ses)) {
662 caam_aead_alg(ses, &alginfo);
663 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
664 DPAA_SEC_ERR("not supported aead alg");
667 alginfo.key = (size_t)ses->aead_key.data;
668 alginfo.keylen = ses->aead_key.length;
669 alginfo.key_enc_flags = 0;
670 alginfo.key_type = RTA_DATA_IMM;
672 if (ses->dir == DIR_ENC)
673 shared_desc_len = cnstr_shdsc_gcm_encap(
674 cdb->sh_desc, true, swap, SHR_NEVER,
679 shared_desc_len = cnstr_shdsc_gcm_decap(
680 cdb->sh_desc, true, swap, SHR_NEVER,
685 caam_cipher_alg(ses, &alginfo_c);
686 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
687 DPAA_SEC_ERR("not supported cipher alg");
691 alginfo_c.key = (size_t)ses->cipher_key.data;
692 alginfo_c.keylen = ses->cipher_key.length;
693 alginfo_c.key_enc_flags = 0;
694 alginfo_c.key_type = RTA_DATA_IMM;
696 caam_auth_alg(ses, &alginfo_a);
697 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
698 DPAA_SEC_ERR("not supported auth alg");
702 alginfo_a.key = (size_t)ses->auth_key.data;
703 alginfo_a.keylen = ses->auth_key.length;
704 alginfo_a.key_enc_flags = 0;
705 alginfo_a.key_type = RTA_DATA_IMM;
707 cdb->sh_desc[0] = alginfo_c.keylen;
708 cdb->sh_desc[1] = alginfo_a.keylen;
709 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
711 (unsigned int *)cdb->sh_desc,
712 &cdb->sh_desc[2], 2);
715 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
718 if (cdb->sh_desc[2] & 1)
719 alginfo_c.key_type = RTA_DATA_IMM;
721 alginfo_c.key = (size_t)dpaa_mem_vtop(
722 (void *)(size_t)alginfo_c.key);
723 alginfo_c.key_type = RTA_DATA_PTR;
725 if (cdb->sh_desc[2] & (1<<1))
726 alginfo_a.key_type = RTA_DATA_IMM;
728 alginfo_a.key = (size_t)dpaa_mem_vtop(
729 (void *)(size_t)alginfo_a.key);
730 alginfo_a.key_type = RTA_DATA_PTR;
735 /* Auth_only_len is set as 0 here and it will be
736 * overwritten in fd for each packet.
738 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
739 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
741 ses->digest_length, ses->dir);
744 if (shared_desc_len < 0) {
745 DPAA_SEC_ERR("error in preparing command block");
746 return shared_desc_len;
749 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
750 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
751 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
756 /* qp is lockless, should be accessed by only one thread */
758 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
761 unsigned int pkts = 0;
762 int num_rx_bufs, ret;
763 struct qm_dqrr_entry *dq;
764 uint32_t vdqcr_flags = 0;
768 * Until request for four buffers, we provide exact number of buffers.
769 * Otherwise we do not set the QM_VDQCR_EXACT flag.
770 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
771 * requested, so we request two less in this case.
774 vdqcr_flags = QM_VDQCR_EXACT;
775 num_rx_bufs = nb_ops;
777 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
778 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
780 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
785 const struct qm_fd *fd;
786 struct dpaa_sec_job *job;
787 struct dpaa_sec_op_ctx *ctx;
788 struct rte_crypto_op *op;
790 dq = qman_dequeue(fq);
795 /* sg is embedded in an op ctx,
796 * sg[0] is for output
799 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
801 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
802 ctx->fd_status = fd->status;
804 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
805 struct qm_sg_entry *sg_out;
808 sg_out = &job->sg[0];
809 hw_sg_to_cpu(sg_out);
810 len = sg_out->length;
811 op->sym->m_src->pkt_len = len;
812 op->sym->m_src->data_len = len;
814 if (!ctx->fd_status) {
815 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
817 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
818 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
822 /* report op status to sym->op and then free the ctx memeory */
823 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
825 qman_dqrr_consume(fq, dq);
826 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
831 static inline struct dpaa_sec_job *
832 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
834 struct rte_crypto_sym_op *sym = op->sym;
835 struct rte_mbuf *mbuf = sym->m_src;
836 struct dpaa_sec_job *cf;
837 struct dpaa_sec_op_ctx *ctx;
838 struct qm_sg_entry *sg, *out_sg, *in_sg;
839 phys_addr_t start_addr;
840 uint8_t *old_digest, extra_segs;
847 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
848 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
852 ctx = dpaa_sec_alloc_ctx(ses);
858 old_digest = ctx->digest;
862 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
863 out_sg->length = ses->digest_length;
864 cpu_to_hw_sg(out_sg);
868 /* need to extend the input to a compound frame */
869 in_sg->extension = 1;
871 in_sg->length = sym->auth.data.length;
872 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
876 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
877 sg->length = mbuf->data_len - sym->auth.data.offset;
878 sg->offset = sym->auth.data.offset;
880 /* Successive segs */
885 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
886 sg->length = mbuf->data_len;
890 if (is_decode(ses)) {
891 /* Digest verification case */
894 rte_memcpy(old_digest, sym->auth.digest.data,
896 start_addr = dpaa_mem_vtop(old_digest);
897 qm_sg_entry_set64(sg, start_addr);
898 sg->length = ses->digest_length;
899 in_sg->length += ses->digest_length;
901 /* Digest calculation case */
902 sg->length -= ses->digest_length;
913 * |<----data_len------->|
914 * |ip_header|ah_header|icv|payload|
919 static inline struct dpaa_sec_job *
920 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
922 struct rte_crypto_sym_op *sym = op->sym;
923 struct rte_mbuf *mbuf = sym->m_src;
924 struct dpaa_sec_job *cf;
925 struct dpaa_sec_op_ctx *ctx;
926 struct qm_sg_entry *sg;
927 rte_iova_t start_addr;
930 ctx = dpaa_sec_alloc_ctx(ses);
936 old_digest = ctx->digest;
938 start_addr = rte_pktmbuf_iova(mbuf);
941 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
942 sg->length = ses->digest_length;
947 if (is_decode(ses)) {
948 /* need to extend the input to a compound frame */
950 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
951 sg->length = sym->auth.data.length + ses->digest_length;
956 /* hash result or digest, save digest first */
957 rte_memcpy(old_digest, sym->auth.digest.data,
959 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
960 sg->length = sym->auth.data.length;
963 /* let's check digest by hw */
964 start_addr = dpaa_mem_vtop(old_digest);
966 qm_sg_entry_set64(sg, start_addr);
967 sg->length = ses->digest_length;
971 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
972 sg->length = sym->auth.data.length;
980 static inline struct dpaa_sec_job *
981 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
983 struct rte_crypto_sym_op *sym = op->sym;
984 struct dpaa_sec_job *cf;
985 struct dpaa_sec_op_ctx *ctx;
986 struct qm_sg_entry *sg, *out_sg, *in_sg;
987 struct rte_mbuf *mbuf;
989 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
994 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
997 req_segs = mbuf->nb_segs * 2 + 3;
1000 if (req_segs > MAX_SG_ENTRIES) {
1001 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1006 ctx = dpaa_sec_alloc_ctx(ses);
1014 out_sg = &cf->sg[0];
1015 out_sg->extension = 1;
1016 out_sg->length = sym->cipher.data.length;
1017 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1018 cpu_to_hw_sg(out_sg);
1022 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1023 sg->length = mbuf->data_len - sym->cipher.data.offset;
1024 sg->offset = sym->cipher.data.offset;
1026 /* Successive segs */
1031 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1032 sg->length = mbuf->data_len;
1041 in_sg->extension = 1;
1043 in_sg->length = sym->cipher.data.length + ses->iv.length;
1046 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1047 cpu_to_hw_sg(in_sg);
1050 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1051 sg->length = ses->iv.length;
1056 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1057 sg->length = mbuf->data_len - sym->cipher.data.offset;
1058 sg->offset = sym->cipher.data.offset;
1060 /* Successive segs */
1065 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1066 sg->length = mbuf->data_len;
1075 static inline struct dpaa_sec_job *
1076 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1078 struct rte_crypto_sym_op *sym = op->sym;
1079 struct dpaa_sec_job *cf;
1080 struct dpaa_sec_op_ctx *ctx;
1081 struct qm_sg_entry *sg;
1082 rte_iova_t src_start_addr, dst_start_addr;
1083 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1086 ctx = dpaa_sec_alloc_ctx(ses);
1093 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1096 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1098 dst_start_addr = src_start_addr;
1102 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1103 sg->length = sym->cipher.data.length + ses->iv.length;
1109 /* need to extend the input to a compound frame */
1112 sg->length = sym->cipher.data.length + ses->iv.length;
1113 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1117 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1118 sg->length = ses->iv.length;
1122 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1123 sg->length = sym->cipher.data.length;
1130 static inline struct dpaa_sec_job *
1131 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1133 struct rte_crypto_sym_op *sym = op->sym;
1134 struct dpaa_sec_job *cf;
1135 struct dpaa_sec_op_ctx *ctx;
1136 struct qm_sg_entry *sg, *out_sg, *in_sg;
1137 struct rte_mbuf *mbuf;
1139 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1144 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1147 req_segs = mbuf->nb_segs * 2 + 4;
1150 if (ses->auth_only_len)
1153 if (req_segs > MAX_SG_ENTRIES) {
1154 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1159 ctx = dpaa_sec_alloc_ctx(ses);
1166 rte_prefetch0(cf->sg);
1169 out_sg = &cf->sg[0];
1170 out_sg->extension = 1;
1172 out_sg->length = sym->aead.data.length + ses->auth_only_len
1173 + ses->digest_length;
1175 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1177 /* output sg entries */
1179 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1180 cpu_to_hw_sg(out_sg);
1183 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1184 sg->length = mbuf->data_len - sym->aead.data.offset +
1186 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1188 /* Successive segs */
1193 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 sg->length = mbuf->data_len;
1197 sg->length -= ses->digest_length;
1199 if (is_encode(ses)) {
1201 /* set auth output */
1203 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1204 sg->length = ses->digest_length;
1212 in_sg->extension = 1;
1215 in_sg->length = ses->iv.length + sym->aead.data.length
1216 + ses->auth_only_len;
1218 in_sg->length = ses->iv.length + sym->aead.data.length
1219 + ses->auth_only_len + ses->digest_length;
1221 /* input sg entries */
1223 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1224 cpu_to_hw_sg(in_sg);
1227 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1228 sg->length = ses->iv.length;
1231 /* 2nd seg auth only */
1232 if (ses->auth_only_len) {
1234 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1235 sg->length = ses->auth_only_len;
1241 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1242 sg->length = mbuf->data_len - sym->aead.data.offset;
1243 sg->offset = sym->aead.data.offset;
1245 /* Successive segs */
1250 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1251 sg->length = mbuf->data_len;
1255 if (is_decode(ses)) {
1258 memcpy(ctx->digest, sym->aead.digest.data,
1259 ses->digest_length);
1260 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1261 sg->length = ses->digest_length;
1269 static inline struct dpaa_sec_job *
1270 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1272 struct rte_crypto_sym_op *sym = op->sym;
1273 struct dpaa_sec_job *cf;
1274 struct dpaa_sec_op_ctx *ctx;
1275 struct qm_sg_entry *sg;
1276 uint32_t length = 0;
1277 rte_iova_t src_start_addr, dst_start_addr;
1278 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1281 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1284 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1286 dst_start_addr = src_start_addr;
1288 ctx = dpaa_sec_alloc_ctx(ses);
1296 rte_prefetch0(cf->sg);
1298 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1299 if (is_encode(ses)) {
1300 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1301 sg->length = ses->iv.length;
1302 length += sg->length;
1306 if (ses->auth_only_len) {
1307 qm_sg_entry_set64(sg,
1308 dpaa_mem_vtop(sym->aead.aad.data));
1309 sg->length = ses->auth_only_len;
1310 length += sg->length;
1314 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1315 sg->length = sym->aead.data.length;
1316 length += sg->length;
1320 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1321 sg->length = ses->iv.length;
1322 length += sg->length;
1326 if (ses->auth_only_len) {
1327 qm_sg_entry_set64(sg,
1328 dpaa_mem_vtop(sym->aead.aad.data));
1329 sg->length = ses->auth_only_len;
1330 length += sg->length;
1334 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1335 sg->length = sym->aead.data.length;
1336 length += sg->length;
1339 memcpy(ctx->digest, sym->aead.digest.data,
1340 ses->digest_length);
1343 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1344 sg->length = ses->digest_length;
1345 length += sg->length;
1349 /* input compound frame */
1350 cf->sg[1].length = length;
1351 cf->sg[1].extension = 1;
1352 cf->sg[1].final = 1;
1353 cpu_to_hw_sg(&cf->sg[1]);
1357 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1358 qm_sg_entry_set64(sg,
1359 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1360 sg->length = sym->aead.data.length + ses->auth_only_len;
1361 length = sg->length;
1362 if (is_encode(ses)) {
1364 /* set auth output */
1366 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1367 sg->length = ses->digest_length;
1368 length += sg->length;
1373 /* output compound frame */
1374 cf->sg[0].length = length;
1375 cf->sg[0].extension = 1;
1376 cpu_to_hw_sg(&cf->sg[0]);
1381 static inline struct dpaa_sec_job *
1382 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1384 struct rte_crypto_sym_op *sym = op->sym;
1385 struct dpaa_sec_job *cf;
1386 struct dpaa_sec_op_ctx *ctx;
1387 struct qm_sg_entry *sg, *out_sg, *in_sg;
1388 struct rte_mbuf *mbuf;
1390 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1395 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1398 req_segs = mbuf->nb_segs * 2 + 4;
1401 if (req_segs > MAX_SG_ENTRIES) {
1402 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1407 ctx = dpaa_sec_alloc_ctx(ses);
1414 rte_prefetch0(cf->sg);
1417 out_sg = &cf->sg[0];
1418 out_sg->extension = 1;
1420 out_sg->length = sym->auth.data.length + ses->digest_length;
1422 out_sg->length = sym->auth.data.length;
1424 /* output sg entries */
1426 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1427 cpu_to_hw_sg(out_sg);
1430 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1431 sg->length = mbuf->data_len - sym->auth.data.offset;
1432 sg->offset = sym->auth.data.offset;
1434 /* Successive segs */
1439 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1440 sg->length = mbuf->data_len;
1443 sg->length -= ses->digest_length;
1445 if (is_encode(ses)) {
1447 /* set auth output */
1449 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1450 sg->length = ses->digest_length;
1458 in_sg->extension = 1;
1461 in_sg->length = ses->iv.length + sym->auth.data.length;
1463 in_sg->length = ses->iv.length + sym->auth.data.length
1464 + ses->digest_length;
1466 /* input sg entries */
1468 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1469 cpu_to_hw_sg(in_sg);
1472 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1473 sg->length = ses->iv.length;
1478 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1479 sg->length = mbuf->data_len - sym->auth.data.offset;
1480 sg->offset = sym->auth.data.offset;
1482 /* Successive segs */
1487 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1488 sg->length = mbuf->data_len;
1492 sg->length -= ses->digest_length;
1493 if (is_decode(ses)) {
1496 memcpy(ctx->digest, sym->auth.digest.data,
1497 ses->digest_length);
1498 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1499 sg->length = ses->digest_length;
1507 static inline struct dpaa_sec_job *
1508 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1510 struct rte_crypto_sym_op *sym = op->sym;
1511 struct dpaa_sec_job *cf;
1512 struct dpaa_sec_op_ctx *ctx;
1513 struct qm_sg_entry *sg;
1514 rte_iova_t src_start_addr, dst_start_addr;
1515 uint32_t length = 0;
1516 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1519 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1521 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1523 dst_start_addr = src_start_addr;
1525 ctx = dpaa_sec_alloc_ctx(ses);
1533 rte_prefetch0(cf->sg);
1535 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1536 if (is_encode(ses)) {
1537 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1538 sg->length = ses->iv.length;
1539 length += sg->length;
1543 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1544 sg->length = sym->auth.data.length;
1545 length += sg->length;
1549 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1550 sg->length = ses->iv.length;
1551 length += sg->length;
1556 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1557 sg->length = sym->auth.data.length;
1558 length += sg->length;
1561 memcpy(ctx->digest, sym->auth.digest.data,
1562 ses->digest_length);
1565 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1566 sg->length = ses->digest_length;
1567 length += sg->length;
1571 /* input compound frame */
1572 cf->sg[1].length = length;
1573 cf->sg[1].extension = 1;
1574 cf->sg[1].final = 1;
1575 cpu_to_hw_sg(&cf->sg[1]);
1579 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1580 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1581 sg->length = sym->cipher.data.length;
1582 length = sg->length;
1583 if (is_encode(ses)) {
1585 /* set auth output */
1587 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1588 sg->length = ses->digest_length;
1589 length += sg->length;
1594 /* output compound frame */
1595 cf->sg[0].length = length;
1596 cf->sg[0].extension = 1;
1597 cpu_to_hw_sg(&cf->sg[0]);
1602 static inline struct dpaa_sec_job *
1603 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1605 struct rte_crypto_sym_op *sym = op->sym;
1606 struct dpaa_sec_job *cf;
1607 struct dpaa_sec_op_ctx *ctx;
1608 struct qm_sg_entry *sg;
1609 phys_addr_t src_start_addr, dst_start_addr;
1611 ctx = dpaa_sec_alloc_ctx(ses);
1617 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1620 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1622 dst_start_addr = src_start_addr;
1626 qm_sg_entry_set64(sg, src_start_addr);
1627 sg->length = sym->m_src->pkt_len;
1631 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1634 qm_sg_entry_set64(sg, dst_start_addr);
1635 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1642 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1645 /* Function to transmit the frames to given device and queuepair */
1647 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1648 uint16_t num_tx = 0;
1649 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1650 uint32_t frames_to_send;
1651 struct rte_crypto_op *op;
1652 struct dpaa_sec_job *cf;
1653 dpaa_sec_session *ses;
1654 uint32_t auth_only_len;
1655 struct qman_fq *inq[DPAA_SEC_BURST];
1658 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1659 DPAA_SEC_BURST : nb_ops;
1660 for (loop = 0; loop < frames_to_send; loop++) {
1662 switch (op->sess_type) {
1663 case RTE_CRYPTO_OP_WITH_SESSION:
1664 ses = (dpaa_sec_session *)
1665 get_sym_session_private_data(
1667 cryptodev_driver_id);
1669 case RTE_CRYPTO_OP_SECURITY_SESSION:
1670 ses = (dpaa_sec_session *)
1671 get_sec_session_private_data(
1672 op->sym->sec_session);
1676 "sessionless crypto op not supported");
1677 frames_to_send = loop;
1681 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1682 if (dpaa_sec_attach_sess_q(qp, ses)) {
1683 frames_to_send = loop;
1687 } else if (unlikely(ses->qp[rte_lcore_id() %
1688 MAX_DPAA_CORES] != qp)) {
1689 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1691 ses->qp[rte_lcore_id() %
1692 MAX_DPAA_CORES], qp);
1693 frames_to_send = loop;
1698 auth_only_len = op->sym->auth.data.length -
1699 op->sym->cipher.data.length;
1700 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1701 if (is_proto_ipsec(ses)) {
1702 cf = build_proto(op, ses);
1703 } else if (is_proto_pdcp(ses)) {
1704 cf = build_proto(op, ses);
1705 } else if (is_auth_only(ses)) {
1706 cf = build_auth_only(op, ses);
1707 } else if (is_cipher_only(ses)) {
1708 cf = build_cipher_only(op, ses);
1709 } else if (is_aead(ses)) {
1710 cf = build_cipher_auth_gcm(op, ses);
1711 auth_only_len = ses->auth_only_len;
1712 } else if (is_auth_cipher(ses)) {
1713 cf = build_cipher_auth(op, ses);
1715 DPAA_SEC_DP_ERR("not supported ops");
1716 frames_to_send = loop;
1721 if (is_auth_only(ses)) {
1722 cf = build_auth_only_sg(op, ses);
1723 } else if (is_cipher_only(ses)) {
1724 cf = build_cipher_only_sg(op, ses);
1725 } else if (is_aead(ses)) {
1726 cf = build_cipher_auth_gcm_sg(op, ses);
1727 auth_only_len = ses->auth_only_len;
1728 } else if (is_auth_cipher(ses)) {
1729 cf = build_cipher_auth_sg(op, ses);
1731 DPAA_SEC_DP_ERR("not supported ops");
1732 frames_to_send = loop;
1737 if (unlikely(!cf)) {
1738 frames_to_send = loop;
1744 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1745 fd->opaque_addr = 0;
1747 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1748 fd->_format1 = qm_fd_compound;
1749 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1750 /* Auth_only_len is set as 0 in descriptor and it is
1751 * overwritten here in the fd.cmd which will update
1755 fd->cmd = 0x80000000 | auth_only_len;
1760 while (loop < frames_to_send) {
1761 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1762 frames_to_send - loop);
1764 nb_ops -= frames_to_send;
1765 num_tx += frames_to_send;
1768 dpaa_qp->tx_pkts += num_tx;
1769 dpaa_qp->tx_errs += nb_ops - num_tx;
1775 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1779 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1781 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1783 dpaa_qp->rx_pkts += num_rx;
1784 dpaa_qp->rx_errs += nb_ops - num_rx;
1786 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1791 /** Release queue pair */
1793 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1796 struct dpaa_sec_dev_private *internals;
1797 struct dpaa_sec_qp *qp = NULL;
1799 PMD_INIT_FUNC_TRACE();
1801 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1803 internals = dev->data->dev_private;
1804 if (qp_id >= internals->max_nb_queue_pairs) {
1805 DPAA_SEC_ERR("Max supported qpid %d",
1806 internals->max_nb_queue_pairs);
1810 qp = &internals->qps[qp_id];
1811 qp->internals = NULL;
1812 dev->data->queue_pairs[qp_id] = NULL;
1817 /** Setup a queue pair */
1819 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1820 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1821 __rte_unused int socket_id)
1823 struct dpaa_sec_dev_private *internals;
1824 struct dpaa_sec_qp *qp = NULL;
1826 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1828 internals = dev->data->dev_private;
1829 if (qp_id >= internals->max_nb_queue_pairs) {
1830 DPAA_SEC_ERR("Max supported qpid %d",
1831 internals->max_nb_queue_pairs);
1835 qp = &internals->qps[qp_id];
1836 qp->internals = internals;
1837 dev->data->queue_pairs[qp_id] = qp;
1842 /** Return the number of allocated queue pairs */
1844 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1846 PMD_INIT_FUNC_TRACE();
1848 return dev->data->nb_queue_pairs;
1851 /** Returns the size of session structure */
1853 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1855 PMD_INIT_FUNC_TRACE();
1857 return sizeof(dpaa_sec_session);
1861 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1862 struct rte_crypto_sym_xform *xform,
1863 dpaa_sec_session *session)
1865 session->cipher_alg = xform->cipher.algo;
1866 session->iv.length = xform->cipher.iv.length;
1867 session->iv.offset = xform->cipher.iv.offset;
1868 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1869 RTE_CACHE_LINE_SIZE);
1870 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1871 DPAA_SEC_ERR("No Memory for cipher key");
1874 session->cipher_key.length = xform->cipher.key.length;
1876 memcpy(session->cipher_key.data, xform->cipher.key.data,
1877 xform->cipher.key.length);
1878 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1885 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1886 struct rte_crypto_sym_xform *xform,
1887 dpaa_sec_session *session)
1889 session->auth_alg = xform->auth.algo;
1890 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1891 RTE_CACHE_LINE_SIZE);
1892 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1893 DPAA_SEC_ERR("No Memory for auth key");
1896 session->auth_key.length = xform->auth.key.length;
1897 session->digest_length = xform->auth.digest_length;
1899 memcpy(session->auth_key.data, xform->auth.key.data,
1900 xform->auth.key.length);
1901 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1908 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1909 struct rte_crypto_sym_xform *xform,
1910 dpaa_sec_session *session)
1912 session->aead_alg = xform->aead.algo;
1913 session->iv.length = xform->aead.iv.length;
1914 session->iv.offset = xform->aead.iv.offset;
1915 session->auth_only_len = xform->aead.aad_length;
1916 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1917 RTE_CACHE_LINE_SIZE);
1918 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1919 DPAA_SEC_ERR("No Memory for aead key\n");
1922 session->aead_key.length = xform->aead.key.length;
1923 session->digest_length = xform->aead.digest_length;
1925 memcpy(session->aead_key.data, xform->aead.key.data,
1926 xform->aead.key.length);
1927 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1933 static struct qman_fq *
1934 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1938 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
1939 if (qi->inq_attach[i] == 0) {
1940 qi->inq_attach[i] = 1;
1944 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
1950 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1954 for (i = 0; i < qi->max_nb_sessions; i++) {
1955 if (&qi->inq[i] == fq) {
1956 qman_retire_fq(fq, NULL);
1958 qi->inq_attach[i] = 0;
1966 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1970 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
1971 ret = dpaa_sec_prep_cdb(sess);
1973 DPAA_SEC_ERR("Unable to prepare sec cdb");
1976 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1977 ret = rte_dpaa_portal_init((void *)0);
1979 DPAA_SEC_ERR("Failure in affining portal");
1983 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
1984 dpaa_mem_vtop(&sess->cdb),
1985 qman_fq_fqid(&qp->outq));
1987 DPAA_SEC_ERR("Unable to init sec queue");
1993 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1994 struct rte_crypto_sym_xform *xform, void *sess)
1996 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1997 dpaa_sec_session *session = sess;
2000 PMD_INIT_FUNC_TRACE();
2002 if (unlikely(sess == NULL)) {
2003 DPAA_SEC_ERR("invalid session struct");
2006 memset(session, 0, sizeof(dpaa_sec_session));
2008 /* Default IV length = 0 */
2009 session->iv.length = 0;
2012 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2013 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2014 dpaa_sec_cipher_init(dev, xform, session);
2016 /* Authentication Only */
2017 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2018 xform->next == NULL) {
2019 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2020 dpaa_sec_auth_init(dev, xform, session);
2022 /* Cipher then Authenticate */
2023 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2024 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2025 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2026 dpaa_sec_cipher_init(dev, xform, session);
2027 dpaa_sec_auth_init(dev, xform->next, session);
2029 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2033 /* Authenticate then Cipher */
2034 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2035 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2036 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2037 dpaa_sec_auth_init(dev, xform, session);
2038 dpaa_sec_cipher_init(dev, xform->next, session);
2040 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2044 /* AEAD operation for AES-GCM kind of Algorithms */
2045 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2046 xform->next == NULL) {
2047 dpaa_sec_aead_init(dev, xform, session);
2050 DPAA_SEC_ERR("Invalid crypto type");
2053 session->ctx_pool = internals->ctx_pool;
2054 rte_spinlock_lock(&internals->lock);
2055 for (i = 0; i < MAX_DPAA_CORES; i++) {
2056 session->inq[i] = dpaa_sec_attach_rxq(internals);
2057 if (session->inq[i] == NULL) {
2058 DPAA_SEC_ERR("unable to attach sec queue");
2059 rte_spinlock_unlock(&internals->lock);
2063 rte_spinlock_unlock(&internals->lock);
2068 rte_free(session->cipher_key.data);
2069 rte_free(session->auth_key.data);
2070 memset(session, 0, sizeof(dpaa_sec_session));
2076 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2077 struct rte_crypto_sym_xform *xform,
2078 struct rte_cryptodev_sym_session *sess,
2079 struct rte_mempool *mempool)
2081 void *sess_private_data;
2084 PMD_INIT_FUNC_TRACE();
2086 if (rte_mempool_get(mempool, &sess_private_data)) {
2087 DPAA_SEC_ERR("Couldn't get object from session mempool");
2091 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2093 DPAA_SEC_ERR("failed to configure session parameters");
2095 /* Return session to mempool */
2096 rte_mempool_put(mempool, sess_private_data);
2100 set_sym_session_private_data(sess, dev->driver_id,
2108 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2110 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2111 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2114 for (i = 0; i < MAX_DPAA_CORES; i++) {
2116 dpaa_sec_detach_rxq(qi, s->inq[i]);
2120 rte_free(s->cipher_key.data);
2121 rte_free(s->auth_key.data);
2122 memset(s, 0, sizeof(dpaa_sec_session));
2123 rte_mempool_put(sess_mp, (void *)s);
2126 /** Clear the memory of session so it doesn't leave key material behind */
2128 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2129 struct rte_cryptodev_sym_session *sess)
2131 PMD_INIT_FUNC_TRACE();
2132 uint8_t index = dev->driver_id;
2133 void *sess_priv = get_sym_session_private_data(sess, index);
2134 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2137 free_session_memory(dev, s);
2138 set_sym_session_private_data(sess, index, NULL);
2143 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2144 struct rte_security_session_conf *conf,
2147 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2148 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2149 struct rte_crypto_auth_xform *auth_xform = NULL;
2150 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2151 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2154 PMD_INIT_FUNC_TRACE();
2156 memset(session, 0, sizeof(dpaa_sec_session));
2157 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2158 cipher_xform = &conf->crypto_xform->cipher;
2159 if (conf->crypto_xform->next)
2160 auth_xform = &conf->crypto_xform->next->auth;
2162 auth_xform = &conf->crypto_xform->auth;
2163 if (conf->crypto_xform->next)
2164 cipher_xform = &conf->crypto_xform->next->cipher;
2166 session->proto_alg = conf->protocol;
2168 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2169 session->cipher_key.data = rte_zmalloc(NULL,
2170 cipher_xform->key.length,
2171 RTE_CACHE_LINE_SIZE);
2172 if (session->cipher_key.data == NULL &&
2173 cipher_xform->key.length > 0) {
2174 DPAA_SEC_ERR("No Memory for cipher key");
2177 memcpy(session->cipher_key.data, cipher_xform->key.data,
2178 cipher_xform->key.length);
2179 session->cipher_key.length = cipher_xform->key.length;
2181 switch (cipher_xform->algo) {
2182 case RTE_CRYPTO_CIPHER_AES_CBC:
2183 case RTE_CRYPTO_CIPHER_3DES_CBC:
2184 case RTE_CRYPTO_CIPHER_AES_CTR:
2187 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2188 cipher_xform->algo);
2191 session->cipher_alg = cipher_xform->algo;
2193 session->cipher_key.data = NULL;
2194 session->cipher_key.length = 0;
2195 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2198 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2199 session->auth_key.data = rte_zmalloc(NULL,
2200 auth_xform->key.length,
2201 RTE_CACHE_LINE_SIZE);
2202 if (session->auth_key.data == NULL &&
2203 auth_xform->key.length > 0) {
2204 DPAA_SEC_ERR("No Memory for auth key");
2205 rte_free(session->cipher_key.data);
2208 memcpy(session->auth_key.data, auth_xform->key.data,
2209 auth_xform->key.length);
2210 session->auth_key.length = auth_xform->key.length;
2212 switch (auth_xform->algo) {
2213 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2214 case RTE_CRYPTO_AUTH_MD5_HMAC:
2215 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2216 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2217 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2218 case RTE_CRYPTO_AUTH_AES_CMAC:
2221 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2225 session->auth_alg = auth_xform->algo;
2227 session->auth_key.data = NULL;
2228 session->auth_key.length = 0;
2229 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2232 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2233 if (ipsec_xform->tunnel.type ==
2234 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2235 memset(&session->encap_pdb, 0,
2236 sizeof(struct ipsec_encap_pdb) +
2237 sizeof(session->ip4_hdr));
2238 session->ip4_hdr.ip_v = IPVERSION;
2239 session->ip4_hdr.ip_hl = 5;
2240 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2241 sizeof(session->ip4_hdr));
2242 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2243 session->ip4_hdr.ip_id = 0;
2244 session->ip4_hdr.ip_off = 0;
2245 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2246 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2247 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2248 IPPROTO_ESP : IPPROTO_AH;
2249 session->ip4_hdr.ip_sum = 0;
2250 session->ip4_hdr.ip_src =
2251 ipsec_xform->tunnel.ipv4.src_ip;
2252 session->ip4_hdr.ip_dst =
2253 ipsec_xform->tunnel.ipv4.dst_ip;
2254 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2255 (void *)&session->ip4_hdr,
2257 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2258 } else if (ipsec_xform->tunnel.type ==
2259 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2260 memset(&session->encap_pdb, 0,
2261 sizeof(struct ipsec_encap_pdb) +
2262 sizeof(session->ip6_hdr));
2263 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2264 DPAA_IPv6_DEFAULT_VTC_FLOW |
2265 ((ipsec_xform->tunnel.ipv6.dscp <<
2266 RTE_IPV6_HDR_TC_SHIFT) &
2267 RTE_IPV6_HDR_TC_MASK) |
2268 ((ipsec_xform->tunnel.ipv6.flabel <<
2269 RTE_IPV6_HDR_FL_SHIFT) &
2270 RTE_IPV6_HDR_FL_MASK));
2271 /* Payload length will be updated by HW */
2272 session->ip6_hdr.payload_len = 0;
2273 session->ip6_hdr.hop_limits =
2274 ipsec_xform->tunnel.ipv6.hlimit;
2275 session->ip6_hdr.proto = (ipsec_xform->proto ==
2276 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2277 IPPROTO_ESP : IPPROTO_AH;
2278 memcpy(&session->ip6_hdr.src_addr,
2279 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2280 memcpy(&session->ip6_hdr.dst_addr,
2281 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2282 session->encap_pdb.ip_hdr_len =
2283 sizeof(struct rte_ipv6_hdr);
2285 session->encap_pdb.options =
2286 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2287 PDBOPTS_ESP_OIHI_PDB_INL |
2289 PDBHMO_ESP_ENCAP_DTTL |
2291 if (ipsec_xform->options.esn)
2292 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2293 session->encap_pdb.spi = ipsec_xform->spi;
2294 session->dir = DIR_ENC;
2295 } else if (ipsec_xform->direction ==
2296 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2297 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2298 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2299 session->decap_pdb.options = sizeof(struct ip) << 16;
2301 session->decap_pdb.options =
2302 sizeof(struct rte_ipv6_hdr) << 16;
2303 if (ipsec_xform->options.esn)
2304 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2305 session->dir = DIR_DEC;
2308 session->ctx_pool = internals->ctx_pool;
2309 rte_spinlock_lock(&internals->lock);
2310 for (i = 0; i < MAX_DPAA_CORES; i++) {
2311 session->inq[i] = dpaa_sec_attach_rxq(internals);
2312 if (session->inq[i] == NULL) {
2313 DPAA_SEC_ERR("unable to attach sec queue");
2314 rte_spinlock_unlock(&internals->lock);
2318 rte_spinlock_unlock(&internals->lock);
2322 rte_free(session->auth_key.data);
2323 rte_free(session->cipher_key.data);
2324 memset(session, 0, sizeof(dpaa_sec_session));
2329 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2330 struct rte_security_session_conf *conf,
2333 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2334 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2335 struct rte_crypto_auth_xform *auth_xform = NULL;
2336 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2337 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2338 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2341 PMD_INIT_FUNC_TRACE();
2343 memset(session, 0, sizeof(dpaa_sec_session));
2345 /* find xfrm types */
2346 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2347 cipher_xform = &xform->cipher;
2348 if (xform->next != NULL)
2349 auth_xform = &xform->next->auth;
2350 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2351 auth_xform = &xform->auth;
2352 if (xform->next != NULL)
2353 cipher_xform = &xform->next->cipher;
2355 DPAA_SEC_ERR("Invalid crypto type");
2359 session->proto_alg = conf->protocol;
2361 session->cipher_key.data = rte_zmalloc(NULL,
2362 cipher_xform->key.length,
2363 RTE_CACHE_LINE_SIZE);
2364 if (session->cipher_key.data == NULL &&
2365 cipher_xform->key.length > 0) {
2366 DPAA_SEC_ERR("No Memory for cipher key");
2369 session->cipher_key.length = cipher_xform->key.length;
2370 memcpy(session->cipher_key.data, cipher_xform->key.data,
2371 cipher_xform->key.length);
2372 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2374 session->cipher_alg = cipher_xform->algo;
2376 session->cipher_key.data = NULL;
2377 session->cipher_key.length = 0;
2378 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2379 session->dir = DIR_ENC;
2382 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2383 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2384 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2386 "PDCP Seq Num size should be 5/12 bits for cmode");
2392 session->auth_key.data = rte_zmalloc(NULL,
2393 auth_xform->key.length,
2394 RTE_CACHE_LINE_SIZE);
2395 if (!session->auth_key.data &&
2396 auth_xform->key.length > 0) {
2397 DPAA_SEC_ERR("No Memory for auth key");
2398 rte_free(session->cipher_key.data);
2401 session->auth_key.length = auth_xform->key.length;
2402 memcpy(session->auth_key.data, auth_xform->key.data,
2403 auth_xform->key.length);
2404 session->auth_alg = auth_xform->algo;
2406 session->auth_key.data = NULL;
2407 session->auth_key.length = 0;
2408 session->auth_alg = 0;
2410 session->pdcp.domain = pdcp_xform->domain;
2411 session->pdcp.bearer = pdcp_xform->bearer;
2412 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2413 session->pdcp.sn_size = pdcp_xform->sn_size;
2414 #ifdef ENABLE_HFN_OVERRIDE
2415 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2417 session->pdcp.hfn = pdcp_xform->hfn;
2418 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2420 session->ctx_pool = dev_priv->ctx_pool;
2421 rte_spinlock_lock(&dev_priv->lock);
2422 for (i = 0; i < MAX_DPAA_CORES; i++) {
2423 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2424 if (session->inq[i] == NULL) {
2425 DPAA_SEC_ERR("unable to attach sec queue");
2426 rte_spinlock_unlock(&dev_priv->lock);
2430 rte_spinlock_unlock(&dev_priv->lock);
2433 rte_free(session->auth_key.data);
2434 rte_free(session->cipher_key.data);
2435 memset(session, 0, sizeof(dpaa_sec_session));
2440 dpaa_sec_security_session_create(void *dev,
2441 struct rte_security_session_conf *conf,
2442 struct rte_security_session *sess,
2443 struct rte_mempool *mempool)
2445 void *sess_private_data;
2446 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2449 if (rte_mempool_get(mempool, &sess_private_data)) {
2450 DPAA_SEC_ERR("Couldn't get object from session mempool");
2454 switch (conf->protocol) {
2455 case RTE_SECURITY_PROTOCOL_IPSEC:
2456 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2459 case RTE_SECURITY_PROTOCOL_PDCP:
2460 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2463 case RTE_SECURITY_PROTOCOL_MACSEC:
2469 DPAA_SEC_ERR("failed to configure session parameters");
2470 /* Return session to mempool */
2471 rte_mempool_put(mempool, sess_private_data);
2475 set_sec_session_private_data(sess, sess_private_data);
2480 /** Clear the memory of session so it doesn't leave key material behind */
2482 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2483 struct rte_security_session *sess)
2485 PMD_INIT_FUNC_TRACE();
2486 void *sess_priv = get_sec_session_private_data(sess);
2487 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2490 free_session_memory((struct rte_cryptodev *)dev, s);
2491 set_sec_session_private_data(sess, NULL);
2497 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2498 struct rte_cryptodev_config *config __rte_unused)
2502 struct dpaa_sec_dev_private *internals;
2504 PMD_INIT_FUNC_TRACE();
2506 internals = dev->data->dev_private;
2507 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2508 if (!internals->ctx_pool) {
2509 internals->ctx_pool = rte_mempool_create((const char *)str,
2512 CTX_POOL_CACHE_SIZE, 0,
2513 NULL, NULL, NULL, NULL,
2515 if (!internals->ctx_pool) {
2516 DPAA_SEC_ERR("%s create failed\n", str);
2520 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2527 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2529 PMD_INIT_FUNC_TRACE();
2534 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2536 PMD_INIT_FUNC_TRACE();
2540 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2542 struct dpaa_sec_dev_private *internals;
2544 PMD_INIT_FUNC_TRACE();
2549 internals = dev->data->dev_private;
2550 rte_mempool_free(internals->ctx_pool);
2551 internals->ctx_pool = NULL;
2557 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2558 struct rte_cryptodev_info *info)
2560 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2562 PMD_INIT_FUNC_TRACE();
2564 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2565 info->feature_flags = dev->feature_flags;
2566 info->capabilities = dpaa_sec_capabilities;
2567 info->sym.max_nb_sessions = internals->max_nb_sessions;
2568 info->driver_id = cryptodev_driver_id;
2572 static struct rte_cryptodev_ops crypto_ops = {
2573 .dev_configure = dpaa_sec_dev_configure,
2574 .dev_start = dpaa_sec_dev_start,
2575 .dev_stop = dpaa_sec_dev_stop,
2576 .dev_close = dpaa_sec_dev_close,
2577 .dev_infos_get = dpaa_sec_dev_infos_get,
2578 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2579 .queue_pair_release = dpaa_sec_queue_pair_release,
2580 .queue_pair_count = dpaa_sec_queue_pair_count,
2581 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2582 .sym_session_configure = dpaa_sec_sym_session_configure,
2583 .sym_session_clear = dpaa_sec_sym_session_clear
2586 static const struct rte_security_capability *
2587 dpaa_sec_capabilities_get(void *device __rte_unused)
2589 return dpaa_sec_security_cap;
2592 static const struct rte_security_ops dpaa_sec_security_ops = {
2593 .session_create = dpaa_sec_security_session_create,
2594 .session_update = NULL,
2595 .session_stats_get = NULL,
2596 .session_destroy = dpaa_sec_security_session_destroy,
2597 .set_pkt_metadata = NULL,
2598 .capabilities_get = dpaa_sec_capabilities_get
2602 dpaa_sec_uninit(struct rte_cryptodev *dev)
2604 struct dpaa_sec_dev_private *internals;
2609 internals = dev->data->dev_private;
2610 rte_free(dev->security_ctx);
2612 /* In case close has been called, internals->ctx_pool would be NULL */
2613 rte_mempool_free(internals->ctx_pool);
2614 rte_free(internals);
2616 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2617 dev->data->name, rte_socket_id());
2623 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2625 struct dpaa_sec_dev_private *internals;
2626 struct rte_security_ctx *security_instance;
2627 struct dpaa_sec_qp *qp;
2631 PMD_INIT_FUNC_TRACE();
2633 cryptodev->driver_id = cryptodev_driver_id;
2634 cryptodev->dev_ops = &crypto_ops;
2636 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2637 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2638 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2639 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2640 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2641 RTE_CRYPTODEV_FF_SECURITY |
2642 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2643 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2644 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2645 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2646 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2648 internals = cryptodev->data->dev_private;
2649 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2650 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2653 * For secondary processes, we don't initialise any further as primary
2654 * has already done this work. Only check we don't need a different
2657 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2658 DPAA_SEC_WARN("Device already init by primary process");
2662 /* Initialize security_ctx only for primary process*/
2663 security_instance = rte_malloc("rte_security_instances_ops",
2664 sizeof(struct rte_security_ctx), 0);
2665 if (security_instance == NULL)
2667 security_instance->device = (void *)cryptodev;
2668 security_instance->ops = &dpaa_sec_security_ops;
2669 security_instance->sess_cnt = 0;
2670 cryptodev->security_ctx = security_instance;
2672 rte_spinlock_init(&internals->lock);
2673 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2674 /* init qman fq for queue pair */
2675 qp = &internals->qps[i];
2676 ret = dpaa_sec_init_tx(&qp->outq);
2678 DPAA_SEC_ERR("config tx of queue pair %d", i);
2683 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2684 QMAN_FQ_FLAG_TO_DCPORTAL;
2685 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2686 /* create rx qman fq for sessions*/
2687 ret = qman_create_fq(0, flags, &internals->inq[i]);
2688 if (unlikely(ret != 0)) {
2689 DPAA_SEC_ERR("sec qman_create_fq failed");
2694 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2698 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2700 dpaa_sec_uninit(cryptodev);
2705 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2706 struct rte_dpaa_device *dpaa_dev)
2708 struct rte_cryptodev *cryptodev;
2709 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2713 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2714 dpaa_dev->id.dev_id);
2716 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2717 if (cryptodev == NULL)
2720 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2721 cryptodev->data->dev_private = rte_zmalloc_socket(
2722 "cryptodev private structure",
2723 sizeof(struct dpaa_sec_dev_private),
2724 RTE_CACHE_LINE_SIZE,
2727 if (cryptodev->data->dev_private == NULL)
2728 rte_panic("Cannot allocate memzone for private "
2732 dpaa_dev->crypto_dev = cryptodev;
2733 cryptodev->device = &dpaa_dev->device;
2735 /* init user callbacks */
2736 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2738 /* if sec device version is not configured */
2739 if (!rta_get_sec_era()) {
2740 const struct device_node *caam_node;
2742 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2743 const uint32_t *prop = of_get_property(caam_node,
2748 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2754 /* Invoke PMD device initialization function */
2755 retval = dpaa_sec_dev_init(cryptodev);
2759 /* In case of error, cleanup is done */
2760 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2761 rte_free(cryptodev->data->dev_private);
2763 rte_cryptodev_pmd_release_device(cryptodev);
2769 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2771 struct rte_cryptodev *cryptodev;
2774 cryptodev = dpaa_dev->crypto_dev;
2775 if (cryptodev == NULL)
2778 ret = dpaa_sec_uninit(cryptodev);
2782 return rte_cryptodev_pmd_destroy(cryptodev);
2785 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2786 .drv_type = FSL_DPAA_CRYPTO,
2788 .name = "DPAA SEC PMD"
2790 .probe = cryptodev_dpaa_sec_probe,
2791 .remove = cryptodev_dpaa_sec_remove,
2794 static struct cryptodev_driver dpaa_sec_crypto_drv;
2796 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2797 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2798 cryptodev_driver_id);
2800 RTE_INIT(dpaa_sec_init_log)
2802 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2803 if (dpaa_logtype_sec >= 0)
2804 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);