1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
43 enum rta_sec_era rta_sec_era;
47 static uint8_t cryptodev_driver_id;
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 if (!ctx->fd_status) {
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
65 /* report op status to sym->op and then free the ctx memory */
66 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(
76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 * each packet, memset is costlier than dcbz_64().
88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 dcbz_64(&ctx->job.sg[i]);
91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
111 dpaa_mem_ptov(rte_iova_t paddr)
115 va = (void *)dpaax_iova_table_get_va(paddr);
119 return rte_mem_iova2virt(paddr);
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125 const struct qm_mr_entry *msg)
127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 fq->fqid, msg->ern.rc, msg->ern.seqnum);
131 /* initialize the queue with dest chan as caam chan so that
132 * all the packets in this queue could be dispatched into caam
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
138 struct qm_mcc_initfq fq_opts;
142 /* Clear FQ options */
143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145 flags = QMAN_INITFQ_FLAG_SCHED;
146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 QM_INITFQ_WE_CONTEXTB;
149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 fq_opts.fqd.context_b = fqid_out;
151 fq_opts.fqd.dest.channel = qm_channel_caam;
152 fq_opts.fqd.dest.wq = 0;
154 fq_in->cb.ern = ern_sec_fq_handler;
156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158 ret = qman_init_fq(fq_in, flags, &fq_opts);
159 if (unlikely(ret != 0))
160 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 struct qman_fq *fq __always_unused,
169 const struct qm_dqrr_entry *dqrr)
171 const struct qm_fd *fd;
172 struct dpaa_sec_job *job;
173 struct dpaa_sec_op_ctx *ctx;
175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 return qman_cb_dqrr_defer;
178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 return qman_cb_dqrr_consume;
182 /* sg is embedded in an op ctx,
183 * sg[0] is for output
186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 ctx->fd_status = fd->status;
190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 struct qm_sg_entry *sg_out;
193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196 sg_out = &job->sg[0];
197 hw_sg_to_cpu(sg_out);
198 len = sg_out->length;
200 while (mbuf->next != NULL) {
201 len -= mbuf->data_len;
204 mbuf->data_len = len;
206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 dpaa_sec_op_ending(ctx);
209 return qman_cb_dqrr_consume;
212 /* caam result is put into this queue */
214 dpaa_sec_init_tx(struct qman_fq *fq)
217 struct qm_mcc_initfq opts;
220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 QMAN_FQ_FLAG_DYNAMIC_FQID;
223 ret = qman_create_fq(0, flags, fq);
225 DPAA_SEC_ERR("qman_create_fq failed");
229 memset(&opts, 0, sizeof(opts));
230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235 fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 fq->cb.ern = ern_sec_fq_handler;
238 ret = qman_init_fq(fq, 0, &opts);
240 DPAA_SEC_ERR("unable to init caam source fq!");
247 static inline int is_cipher_only(dpaa_sec_session *ses)
249 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
250 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
253 static inline int is_auth_only(dpaa_sec_session *ses)
255 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
256 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
259 static inline int is_aead(dpaa_sec_session *ses)
261 return ((ses->cipher_alg == 0) &&
262 (ses->auth_alg == 0) &&
263 (ses->aead_alg != 0));
266 static inline int is_auth_cipher(dpaa_sec_session *ses)
268 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
269 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
270 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
271 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
274 static inline int is_proto_ipsec(dpaa_sec_session *ses)
276 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
279 static inline int is_proto_pdcp(dpaa_sec_session *ses)
281 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
284 static inline int is_encode(dpaa_sec_session *ses)
286 return ses->dir == DIR_ENC;
289 static inline int is_decode(dpaa_sec_session *ses)
291 return ses->dir == DIR_DEC;
295 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
297 switch (ses->auth_alg) {
298 case RTE_CRYPTO_AUTH_NULL:
300 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301 OP_PCL_IPSEC_HMAC_NULL : 0;
302 ses->digest_length = 0;
304 case RTE_CRYPTO_AUTH_MD5_HMAC:
306 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
308 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310 case RTE_CRYPTO_AUTH_SHA1_HMAC:
312 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
314 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 case RTE_CRYPTO_AUTH_SHA224_HMAC:
318 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
320 alginfo_a->algmode = OP_ALG_AAI_HMAC;
322 case RTE_CRYPTO_AUTH_SHA256_HMAC:
324 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
325 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
326 alginfo_a->algmode = OP_ALG_AAI_HMAC;
328 case RTE_CRYPTO_AUTH_SHA384_HMAC:
330 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
332 alginfo_a->algmode = OP_ALG_AAI_HMAC;
334 case RTE_CRYPTO_AUTH_SHA512_HMAC:
336 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
337 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
338 alginfo_a->algmode = OP_ALG_AAI_HMAC;
341 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
346 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
348 switch (ses->cipher_alg) {
349 case RTE_CRYPTO_CIPHER_NULL:
351 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352 OP_PCL_IPSEC_NULL : 0;
354 case RTE_CRYPTO_CIPHER_AES_CBC:
356 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
357 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
358 alginfo_c->algmode = OP_ALG_AAI_CBC;
360 case RTE_CRYPTO_CIPHER_3DES_CBC:
362 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
363 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
364 alginfo_c->algmode = OP_ALG_AAI_CBC;
366 case RTE_CRYPTO_CIPHER_AES_CTR:
368 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
369 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
370 alginfo_c->algmode = OP_ALG_AAI_CTR;
373 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
378 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
380 switch (ses->aead_alg) {
381 case RTE_CRYPTO_AEAD_AES_GCM:
382 alginfo->algtype = OP_ALG_ALGSEL_AES;
383 alginfo->algmode = OP_ALG_AAI_GCM;
386 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
391 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
393 struct alginfo authdata = {0}, cipherdata = {0};
394 struct sec_cdb *cdb = &ses->cdb;
395 struct alginfo *p_authdata = NULL;
396 int32_t shared_desc_len = 0;
398 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
404 switch (ses->cipher_alg) {
405 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
406 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
408 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
409 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
411 case RTE_CRYPTO_CIPHER_AES_CTR:
412 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
414 case RTE_CRYPTO_CIPHER_NULL:
415 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
418 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
423 cipherdata.key = (size_t)ses->cipher_key.data;
424 cipherdata.keylen = ses->cipher_key.length;
425 cipherdata.key_enc_flags = 0;
426 cipherdata.key_type = RTA_DATA_IMM;
428 cdb->sh_desc[0] = cipherdata.keylen;
433 switch (ses->auth_alg) {
434 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
435 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
437 case RTE_CRYPTO_AUTH_ZUC_EIA3:
438 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
440 case RTE_CRYPTO_AUTH_AES_CMAC:
441 authdata.algtype = PDCP_AUTH_TYPE_AES;
443 case RTE_CRYPTO_AUTH_NULL:
444 authdata.algtype = PDCP_AUTH_TYPE_NULL;
447 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
452 authdata.key = (size_t)ses->auth_key.data;
453 authdata.keylen = ses->auth_key.length;
454 authdata.key_enc_flags = 0;
455 authdata.key_type = RTA_DATA_IMM;
457 p_authdata = &authdata;
459 cdb->sh_desc[1] = authdata.keylen;
462 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
464 (unsigned int *)cdb->sh_desc,
465 &cdb->sh_desc[2], 2);
467 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
471 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
473 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
474 cipherdata.key_type = RTA_DATA_PTR;
476 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
478 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
479 authdata.key_type = RTA_DATA_PTR;
486 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
487 if (ses->dir == DIR_ENC)
488 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
489 cdb->sh_desc, 1, swap,
494 ses->pdcp.hfn_threshold,
495 &cipherdata, &authdata,
497 else if (ses->dir == DIR_DEC)
498 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
499 cdb->sh_desc, 1, swap,
504 ses->pdcp.hfn_threshold,
505 &cipherdata, &authdata,
508 if (ses->dir == DIR_ENC)
509 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
510 cdb->sh_desc, 1, swap,
515 ses->pdcp.hfn_threshold,
516 &cipherdata, p_authdata, 0);
517 else if (ses->dir == DIR_DEC)
518 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
519 cdb->sh_desc, 1, swap,
524 ses->pdcp.hfn_threshold,
525 &cipherdata, p_authdata, 0);
528 return shared_desc_len;
531 /* prepare ipsec proto command block of the session */
533 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
535 struct alginfo cipherdata = {0}, authdata = {0};
536 struct sec_cdb *cdb = &ses->cdb;
537 int32_t shared_desc_len = 0;
539 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
545 caam_cipher_alg(ses, &cipherdata);
546 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
547 DPAA_SEC_ERR("not supported cipher alg");
551 cipherdata.key = (size_t)ses->cipher_key.data;
552 cipherdata.keylen = ses->cipher_key.length;
553 cipherdata.key_enc_flags = 0;
554 cipherdata.key_type = RTA_DATA_IMM;
556 caam_auth_alg(ses, &authdata);
557 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
558 DPAA_SEC_ERR("not supported auth alg");
562 authdata.key = (size_t)ses->auth_key.data;
563 authdata.keylen = ses->auth_key.length;
564 authdata.key_enc_flags = 0;
565 authdata.key_type = RTA_DATA_IMM;
567 cdb->sh_desc[0] = cipherdata.keylen;
568 cdb->sh_desc[1] = authdata.keylen;
569 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
571 (unsigned int *)cdb->sh_desc,
572 &cdb->sh_desc[2], 2);
575 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
578 if (cdb->sh_desc[2] & 1)
579 cipherdata.key_type = RTA_DATA_IMM;
581 cipherdata.key = (size_t)dpaa_mem_vtop(
582 (void *)(size_t)cipherdata.key);
583 cipherdata.key_type = RTA_DATA_PTR;
585 if (cdb->sh_desc[2] & (1<<1))
586 authdata.key_type = RTA_DATA_IMM;
588 authdata.key = (size_t)dpaa_mem_vtop(
589 (void *)(size_t)authdata.key);
590 authdata.key_type = RTA_DATA_PTR;
596 if (ses->dir == DIR_ENC) {
597 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
599 true, swap, SHR_SERIAL,
601 (uint8_t *)&ses->ip4_hdr,
602 &cipherdata, &authdata);
603 } else if (ses->dir == DIR_DEC) {
604 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
606 true, swap, SHR_SERIAL,
608 &cipherdata, &authdata);
610 return shared_desc_len;
613 /* prepare command block of the session */
615 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
617 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
618 int32_t shared_desc_len = 0;
619 struct sec_cdb *cdb = &ses->cdb;
621 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
627 memset(cdb, 0, sizeof(struct sec_cdb));
629 if (is_proto_ipsec(ses)) {
630 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
631 } else if (is_proto_pdcp(ses)) {
632 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
633 } else if (is_cipher_only(ses)) {
634 caam_cipher_alg(ses, &alginfo_c);
635 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
636 DPAA_SEC_ERR("not supported cipher alg");
640 alginfo_c.key = (size_t)ses->cipher_key.data;
641 alginfo_c.keylen = ses->cipher_key.length;
642 alginfo_c.key_enc_flags = 0;
643 alginfo_c.key_type = RTA_DATA_IMM;
645 shared_desc_len = cnstr_shdsc_blkcipher(
647 swap, SHR_NEVER, &alginfo_c,
651 } else if (is_auth_only(ses)) {
652 caam_auth_alg(ses, &alginfo_a);
653 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
654 DPAA_SEC_ERR("not supported auth alg");
658 alginfo_a.key = (size_t)ses->auth_key.data;
659 alginfo_a.keylen = ses->auth_key.length;
660 alginfo_a.key_enc_flags = 0;
661 alginfo_a.key_type = RTA_DATA_IMM;
663 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
664 swap, SHR_NEVER, &alginfo_a,
667 } else if (is_aead(ses)) {
668 caam_aead_alg(ses, &alginfo);
669 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
670 DPAA_SEC_ERR("not supported aead alg");
673 alginfo.key = (size_t)ses->aead_key.data;
674 alginfo.keylen = ses->aead_key.length;
675 alginfo.key_enc_flags = 0;
676 alginfo.key_type = RTA_DATA_IMM;
678 if (ses->dir == DIR_ENC)
679 shared_desc_len = cnstr_shdsc_gcm_encap(
680 cdb->sh_desc, true, swap, SHR_NEVER,
685 shared_desc_len = cnstr_shdsc_gcm_decap(
686 cdb->sh_desc, true, swap, SHR_NEVER,
691 caam_cipher_alg(ses, &alginfo_c);
692 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
693 DPAA_SEC_ERR("not supported cipher alg");
697 alginfo_c.key = (size_t)ses->cipher_key.data;
698 alginfo_c.keylen = ses->cipher_key.length;
699 alginfo_c.key_enc_flags = 0;
700 alginfo_c.key_type = RTA_DATA_IMM;
702 caam_auth_alg(ses, &alginfo_a);
703 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
704 DPAA_SEC_ERR("not supported auth alg");
708 alginfo_a.key = (size_t)ses->auth_key.data;
709 alginfo_a.keylen = ses->auth_key.length;
710 alginfo_a.key_enc_flags = 0;
711 alginfo_a.key_type = RTA_DATA_IMM;
713 cdb->sh_desc[0] = alginfo_c.keylen;
714 cdb->sh_desc[1] = alginfo_a.keylen;
715 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
717 (unsigned int *)cdb->sh_desc,
718 &cdb->sh_desc[2], 2);
721 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
724 if (cdb->sh_desc[2] & 1)
725 alginfo_c.key_type = RTA_DATA_IMM;
727 alginfo_c.key = (size_t)dpaa_mem_vtop(
728 (void *)(size_t)alginfo_c.key);
729 alginfo_c.key_type = RTA_DATA_PTR;
731 if (cdb->sh_desc[2] & (1<<1))
732 alginfo_a.key_type = RTA_DATA_IMM;
734 alginfo_a.key = (size_t)dpaa_mem_vtop(
735 (void *)(size_t)alginfo_a.key);
736 alginfo_a.key_type = RTA_DATA_PTR;
741 /* Auth_only_len is set as 0 here and it will be
742 * overwritten in fd for each packet.
744 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
745 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
747 ses->digest_length, ses->dir);
750 if (shared_desc_len < 0) {
751 DPAA_SEC_ERR("error in preparing command block");
752 return shared_desc_len;
755 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
756 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
757 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
762 /* qp is lockless, should be accessed by only one thread */
764 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
767 unsigned int pkts = 0;
768 int num_rx_bufs, ret;
769 struct qm_dqrr_entry *dq;
770 uint32_t vdqcr_flags = 0;
774 * Until request for four buffers, we provide exact number of buffers.
775 * Otherwise we do not set the QM_VDQCR_EXACT flag.
776 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
777 * requested, so we request two less in this case.
780 vdqcr_flags = QM_VDQCR_EXACT;
781 num_rx_bufs = nb_ops;
783 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
784 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
786 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
791 const struct qm_fd *fd;
792 struct dpaa_sec_job *job;
793 struct dpaa_sec_op_ctx *ctx;
794 struct rte_crypto_op *op;
796 dq = qman_dequeue(fq);
801 /* sg is embedded in an op ctx,
802 * sg[0] is for output
805 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
807 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
808 ctx->fd_status = fd->status;
810 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
811 struct qm_sg_entry *sg_out;
813 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
814 op->sym->m_src : op->sym->m_dst;
816 sg_out = &job->sg[0];
817 hw_sg_to_cpu(sg_out);
818 len = sg_out->length;
820 while (mbuf->next != NULL) {
821 len -= mbuf->data_len;
824 mbuf->data_len = len;
826 if (!ctx->fd_status) {
827 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
829 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
830 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
834 /* report op status to sym->op and then free the ctx memeory */
835 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
837 qman_dqrr_consume(fq, dq);
838 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
843 static inline struct dpaa_sec_job *
844 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
846 struct rte_crypto_sym_op *sym = op->sym;
847 struct rte_mbuf *mbuf = sym->m_src;
848 struct dpaa_sec_job *cf;
849 struct dpaa_sec_op_ctx *ctx;
850 struct qm_sg_entry *sg, *out_sg, *in_sg;
851 phys_addr_t start_addr;
852 uint8_t *old_digest, extra_segs;
859 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
860 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
864 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
870 old_digest = ctx->digest;
874 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
875 out_sg->length = ses->digest_length;
876 cpu_to_hw_sg(out_sg);
880 /* need to extend the input to a compound frame */
881 in_sg->extension = 1;
883 in_sg->length = sym->auth.data.length;
884 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
888 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
889 sg->length = mbuf->data_len - sym->auth.data.offset;
890 sg->offset = sym->auth.data.offset;
892 /* Successive segs */
897 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
898 sg->length = mbuf->data_len;
902 if (is_decode(ses)) {
903 /* Digest verification case */
906 rte_memcpy(old_digest, sym->auth.digest.data,
908 start_addr = dpaa_mem_vtop(old_digest);
909 qm_sg_entry_set64(sg, start_addr);
910 sg->length = ses->digest_length;
911 in_sg->length += ses->digest_length;
913 /* Digest calculation case */
914 sg->length -= ses->digest_length;
925 * |<----data_len------->|
926 * |ip_header|ah_header|icv|payload|
931 static inline struct dpaa_sec_job *
932 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
934 struct rte_crypto_sym_op *sym = op->sym;
935 struct rte_mbuf *mbuf = sym->m_src;
936 struct dpaa_sec_job *cf;
937 struct dpaa_sec_op_ctx *ctx;
938 struct qm_sg_entry *sg;
939 rte_iova_t start_addr;
942 ctx = dpaa_sec_alloc_ctx(ses, 4);
948 old_digest = ctx->digest;
950 start_addr = rte_pktmbuf_iova(mbuf);
953 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
954 sg->length = ses->digest_length;
959 if (is_decode(ses)) {
960 /* need to extend the input to a compound frame */
962 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
963 sg->length = sym->auth.data.length + ses->digest_length;
968 /* hash result or digest, save digest first */
969 rte_memcpy(old_digest, sym->auth.digest.data,
971 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
972 sg->length = sym->auth.data.length;
975 /* let's check digest by hw */
976 start_addr = dpaa_mem_vtop(old_digest);
978 qm_sg_entry_set64(sg, start_addr);
979 sg->length = ses->digest_length;
983 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
984 sg->length = sym->auth.data.length;
992 static inline struct dpaa_sec_job *
993 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
995 struct rte_crypto_sym_op *sym = op->sym;
996 struct dpaa_sec_job *cf;
997 struct dpaa_sec_op_ctx *ctx;
998 struct qm_sg_entry *sg, *out_sg, *in_sg;
999 struct rte_mbuf *mbuf;
1001 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1006 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1009 req_segs = mbuf->nb_segs * 2 + 3;
1012 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1013 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1018 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1026 out_sg = &cf->sg[0];
1027 out_sg->extension = 1;
1028 out_sg->length = sym->cipher.data.length;
1029 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1030 cpu_to_hw_sg(out_sg);
1034 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1035 sg->length = mbuf->data_len - sym->cipher.data.offset;
1036 sg->offset = sym->cipher.data.offset;
1038 /* Successive segs */
1043 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1044 sg->length = mbuf->data_len;
1053 in_sg->extension = 1;
1055 in_sg->length = sym->cipher.data.length + ses->iv.length;
1058 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1059 cpu_to_hw_sg(in_sg);
1062 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1063 sg->length = ses->iv.length;
1068 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1069 sg->length = mbuf->data_len - sym->cipher.data.offset;
1070 sg->offset = sym->cipher.data.offset;
1072 /* Successive segs */
1077 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1078 sg->length = mbuf->data_len;
1087 static inline struct dpaa_sec_job *
1088 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1090 struct rte_crypto_sym_op *sym = op->sym;
1091 struct dpaa_sec_job *cf;
1092 struct dpaa_sec_op_ctx *ctx;
1093 struct qm_sg_entry *sg;
1094 rte_iova_t src_start_addr, dst_start_addr;
1095 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1098 ctx = dpaa_sec_alloc_ctx(ses, 4);
1105 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1108 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1110 dst_start_addr = src_start_addr;
1114 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1115 sg->length = sym->cipher.data.length + ses->iv.length;
1121 /* need to extend the input to a compound frame */
1124 sg->length = sym->cipher.data.length + ses->iv.length;
1125 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1129 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1130 sg->length = ses->iv.length;
1134 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1135 sg->length = sym->cipher.data.length;
1142 static inline struct dpaa_sec_job *
1143 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1145 struct rte_crypto_sym_op *sym = op->sym;
1146 struct dpaa_sec_job *cf;
1147 struct dpaa_sec_op_ctx *ctx;
1148 struct qm_sg_entry *sg, *out_sg, *in_sg;
1149 struct rte_mbuf *mbuf;
1151 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1156 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1159 req_segs = mbuf->nb_segs * 2 + 4;
1162 if (ses->auth_only_len)
1165 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1166 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1171 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1178 rte_prefetch0(cf->sg);
1181 out_sg = &cf->sg[0];
1182 out_sg->extension = 1;
1184 out_sg->length = sym->aead.data.length + ses->auth_only_len
1185 + ses->digest_length;
1187 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1189 /* output sg entries */
1191 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1192 cpu_to_hw_sg(out_sg);
1195 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1196 sg->length = mbuf->data_len - sym->aead.data.offset +
1198 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1200 /* Successive segs */
1205 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1206 sg->length = mbuf->data_len;
1209 sg->length -= ses->digest_length;
1211 if (is_encode(ses)) {
1213 /* set auth output */
1215 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1216 sg->length = ses->digest_length;
1224 in_sg->extension = 1;
1227 in_sg->length = ses->iv.length + sym->aead.data.length
1228 + ses->auth_only_len;
1230 in_sg->length = ses->iv.length + sym->aead.data.length
1231 + ses->auth_only_len + ses->digest_length;
1233 /* input sg entries */
1235 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1236 cpu_to_hw_sg(in_sg);
1239 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1240 sg->length = ses->iv.length;
1243 /* 2nd seg auth only */
1244 if (ses->auth_only_len) {
1246 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1247 sg->length = ses->auth_only_len;
1253 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1254 sg->length = mbuf->data_len - sym->aead.data.offset;
1255 sg->offset = sym->aead.data.offset;
1257 /* Successive segs */
1262 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1263 sg->length = mbuf->data_len;
1267 if (is_decode(ses)) {
1270 memcpy(ctx->digest, sym->aead.digest.data,
1271 ses->digest_length);
1272 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1273 sg->length = ses->digest_length;
1281 static inline struct dpaa_sec_job *
1282 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1284 struct rte_crypto_sym_op *sym = op->sym;
1285 struct dpaa_sec_job *cf;
1286 struct dpaa_sec_op_ctx *ctx;
1287 struct qm_sg_entry *sg;
1288 uint32_t length = 0;
1289 rte_iova_t src_start_addr, dst_start_addr;
1290 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1293 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1296 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1298 dst_start_addr = src_start_addr;
1300 ctx = dpaa_sec_alloc_ctx(ses, 7);
1308 rte_prefetch0(cf->sg);
1310 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1311 if (is_encode(ses)) {
1312 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1313 sg->length = ses->iv.length;
1314 length += sg->length;
1318 if (ses->auth_only_len) {
1319 qm_sg_entry_set64(sg,
1320 dpaa_mem_vtop(sym->aead.aad.data));
1321 sg->length = ses->auth_only_len;
1322 length += sg->length;
1326 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1327 sg->length = sym->aead.data.length;
1328 length += sg->length;
1332 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1333 sg->length = ses->iv.length;
1334 length += sg->length;
1338 if (ses->auth_only_len) {
1339 qm_sg_entry_set64(sg,
1340 dpaa_mem_vtop(sym->aead.aad.data));
1341 sg->length = ses->auth_only_len;
1342 length += sg->length;
1346 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1347 sg->length = sym->aead.data.length;
1348 length += sg->length;
1351 memcpy(ctx->digest, sym->aead.digest.data,
1352 ses->digest_length);
1355 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1356 sg->length = ses->digest_length;
1357 length += sg->length;
1361 /* input compound frame */
1362 cf->sg[1].length = length;
1363 cf->sg[1].extension = 1;
1364 cf->sg[1].final = 1;
1365 cpu_to_hw_sg(&cf->sg[1]);
1369 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1370 qm_sg_entry_set64(sg,
1371 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1372 sg->length = sym->aead.data.length + ses->auth_only_len;
1373 length = sg->length;
1374 if (is_encode(ses)) {
1376 /* set auth output */
1378 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1379 sg->length = ses->digest_length;
1380 length += sg->length;
1385 /* output compound frame */
1386 cf->sg[0].length = length;
1387 cf->sg[0].extension = 1;
1388 cpu_to_hw_sg(&cf->sg[0]);
1393 static inline struct dpaa_sec_job *
1394 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1396 struct rte_crypto_sym_op *sym = op->sym;
1397 struct dpaa_sec_job *cf;
1398 struct dpaa_sec_op_ctx *ctx;
1399 struct qm_sg_entry *sg, *out_sg, *in_sg;
1400 struct rte_mbuf *mbuf;
1402 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1407 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1410 req_segs = mbuf->nb_segs * 2 + 4;
1413 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1414 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1419 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1426 rte_prefetch0(cf->sg);
1429 out_sg = &cf->sg[0];
1430 out_sg->extension = 1;
1432 out_sg->length = sym->auth.data.length + ses->digest_length;
1434 out_sg->length = sym->auth.data.length;
1436 /* output sg entries */
1438 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1439 cpu_to_hw_sg(out_sg);
1442 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1443 sg->length = mbuf->data_len - sym->auth.data.offset;
1444 sg->offset = sym->auth.data.offset;
1446 /* Successive segs */
1451 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1452 sg->length = mbuf->data_len;
1455 sg->length -= ses->digest_length;
1457 if (is_encode(ses)) {
1459 /* set auth output */
1461 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1462 sg->length = ses->digest_length;
1470 in_sg->extension = 1;
1473 in_sg->length = ses->iv.length + sym->auth.data.length;
1475 in_sg->length = ses->iv.length + sym->auth.data.length
1476 + ses->digest_length;
1478 /* input sg entries */
1480 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1481 cpu_to_hw_sg(in_sg);
1484 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1485 sg->length = ses->iv.length;
1490 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1491 sg->length = mbuf->data_len - sym->auth.data.offset;
1492 sg->offset = sym->auth.data.offset;
1494 /* Successive segs */
1499 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1500 sg->length = mbuf->data_len;
1504 sg->length -= ses->digest_length;
1505 if (is_decode(ses)) {
1508 memcpy(ctx->digest, sym->auth.digest.data,
1509 ses->digest_length);
1510 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1511 sg->length = ses->digest_length;
1519 static inline struct dpaa_sec_job *
1520 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1522 struct rte_crypto_sym_op *sym = op->sym;
1523 struct dpaa_sec_job *cf;
1524 struct dpaa_sec_op_ctx *ctx;
1525 struct qm_sg_entry *sg;
1526 rte_iova_t src_start_addr, dst_start_addr;
1527 uint32_t length = 0;
1528 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1531 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1533 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1535 dst_start_addr = src_start_addr;
1537 ctx = dpaa_sec_alloc_ctx(ses, 7);
1545 rte_prefetch0(cf->sg);
1547 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1548 if (is_encode(ses)) {
1549 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1550 sg->length = ses->iv.length;
1551 length += sg->length;
1555 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1556 sg->length = sym->auth.data.length;
1557 length += sg->length;
1561 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1562 sg->length = ses->iv.length;
1563 length += sg->length;
1568 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1569 sg->length = sym->auth.data.length;
1570 length += sg->length;
1573 memcpy(ctx->digest, sym->auth.digest.data,
1574 ses->digest_length);
1577 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1578 sg->length = ses->digest_length;
1579 length += sg->length;
1583 /* input compound frame */
1584 cf->sg[1].length = length;
1585 cf->sg[1].extension = 1;
1586 cf->sg[1].final = 1;
1587 cpu_to_hw_sg(&cf->sg[1]);
1591 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1592 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1593 sg->length = sym->cipher.data.length;
1594 length = sg->length;
1595 if (is_encode(ses)) {
1597 /* set auth output */
1599 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1600 sg->length = ses->digest_length;
1601 length += sg->length;
1606 /* output compound frame */
1607 cf->sg[0].length = length;
1608 cf->sg[0].extension = 1;
1609 cpu_to_hw_sg(&cf->sg[0]);
1614 static inline struct dpaa_sec_job *
1615 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1617 struct rte_crypto_sym_op *sym = op->sym;
1618 struct dpaa_sec_job *cf;
1619 struct dpaa_sec_op_ctx *ctx;
1620 struct qm_sg_entry *sg;
1621 phys_addr_t src_start_addr, dst_start_addr;
1623 ctx = dpaa_sec_alloc_ctx(ses, 2);
1629 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1632 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1634 dst_start_addr = src_start_addr;
1638 qm_sg_entry_set64(sg, src_start_addr);
1639 sg->length = sym->m_src->pkt_len;
1643 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1646 qm_sg_entry_set64(sg, dst_start_addr);
1647 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1653 static inline struct dpaa_sec_job *
1654 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1656 struct rte_crypto_sym_op *sym = op->sym;
1657 struct dpaa_sec_job *cf;
1658 struct dpaa_sec_op_ctx *ctx;
1659 struct qm_sg_entry *sg, *out_sg, *in_sg;
1660 struct rte_mbuf *mbuf;
1662 uint32_t in_len = 0, out_len = 0;
1669 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1670 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1671 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1676 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1682 out_sg = &cf->sg[0];
1683 out_sg->extension = 1;
1684 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1688 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1691 /* Successive segs */
1692 while (mbuf->next) {
1693 sg->length = mbuf->data_len;
1694 out_len += sg->length;
1698 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1701 sg->length = mbuf->buf_len - mbuf->data_off;
1702 out_len += sg->length;
1706 out_sg->length = out_len;
1707 cpu_to_hw_sg(out_sg);
1712 in_sg->extension = 1;
1714 in_len = mbuf->data_len;
1717 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1720 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1721 sg->length = mbuf->data_len;
1724 /* Successive segs */
1729 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1730 sg->length = mbuf->data_len;
1732 in_len += sg->length;
1738 in_sg->length = in_len;
1739 cpu_to_hw_sg(in_sg);
1741 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1747 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1750 /* Function to transmit the frames to given device and queuepair */
1752 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1753 uint16_t num_tx = 0;
1754 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1755 uint32_t frames_to_send;
1756 struct rte_crypto_op *op;
1757 struct dpaa_sec_job *cf;
1758 dpaa_sec_session *ses;
1759 uint32_t auth_only_len;
1760 struct qman_fq *inq[DPAA_SEC_BURST];
1763 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1764 DPAA_SEC_BURST : nb_ops;
1765 for (loop = 0; loop < frames_to_send; loop++) {
1767 switch (op->sess_type) {
1768 case RTE_CRYPTO_OP_WITH_SESSION:
1769 ses = (dpaa_sec_session *)
1770 get_sym_session_private_data(
1772 cryptodev_driver_id);
1774 case RTE_CRYPTO_OP_SECURITY_SESSION:
1775 ses = (dpaa_sec_session *)
1776 get_sec_session_private_data(
1777 op->sym->sec_session);
1781 "sessionless crypto op not supported");
1782 frames_to_send = loop;
1786 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1787 if (dpaa_sec_attach_sess_q(qp, ses)) {
1788 frames_to_send = loop;
1792 } else if (unlikely(ses->qp[rte_lcore_id() %
1793 MAX_DPAA_CORES] != qp)) {
1794 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1796 ses->qp[rte_lcore_id() %
1797 MAX_DPAA_CORES], qp);
1798 frames_to_send = loop;
1803 auth_only_len = op->sym->auth.data.length -
1804 op->sym->cipher.data.length;
1805 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1806 ((op->sym->m_dst == NULL) ||
1807 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1808 if (is_proto_ipsec(ses)) {
1809 cf = build_proto(op, ses);
1810 } else if (is_proto_pdcp(ses)) {
1811 cf = build_proto(op, ses);
1812 } else if (is_auth_only(ses)) {
1813 cf = build_auth_only(op, ses);
1814 } else if (is_cipher_only(ses)) {
1815 cf = build_cipher_only(op, ses);
1816 } else if (is_aead(ses)) {
1817 cf = build_cipher_auth_gcm(op, ses);
1818 auth_only_len = ses->auth_only_len;
1819 } else if (is_auth_cipher(ses)) {
1820 cf = build_cipher_auth(op, ses);
1822 DPAA_SEC_DP_ERR("not supported ops");
1823 frames_to_send = loop;
1828 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1829 cf = build_proto_sg(op, ses);
1830 } else if (is_auth_only(ses)) {
1831 cf = build_auth_only_sg(op, ses);
1832 } else if (is_cipher_only(ses)) {
1833 cf = build_cipher_only_sg(op, ses);
1834 } else if (is_aead(ses)) {
1835 cf = build_cipher_auth_gcm_sg(op, ses);
1836 auth_only_len = ses->auth_only_len;
1837 } else if (is_auth_cipher(ses)) {
1838 cf = build_cipher_auth_sg(op, ses);
1840 DPAA_SEC_DP_ERR("not supported ops");
1841 frames_to_send = loop;
1846 if (unlikely(!cf)) {
1847 frames_to_send = loop;
1853 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1854 fd->opaque_addr = 0;
1856 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1857 fd->_format1 = qm_fd_compound;
1858 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1859 /* Auth_only_len is set as 0 in descriptor and it is
1860 * overwritten here in the fd.cmd which will update
1864 fd->cmd = 0x80000000 | auth_only_len;
1866 /* In case of PDCP, per packet HFN is stored in
1867 * mbuf priv after sym_op.
1869 if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1870 fd->cmd = 0x80000000 |
1871 *((uint32_t *)((uint8_t *)op +
1872 ses->pdcp.hfn_ovd_offset));
1873 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1874 *((uint32_t *)((uint8_t *)op +
1875 ses->pdcp.hfn_ovd_offset)),
1877 is_proto_pdcp(ses));
1883 while (loop < frames_to_send) {
1884 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1885 frames_to_send - loop);
1887 nb_ops -= frames_to_send;
1888 num_tx += frames_to_send;
1891 dpaa_qp->tx_pkts += num_tx;
1892 dpaa_qp->tx_errs += nb_ops - num_tx;
1898 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1902 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1904 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1906 dpaa_qp->rx_pkts += num_rx;
1907 dpaa_qp->rx_errs += nb_ops - num_rx;
1909 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1914 /** Release queue pair */
1916 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1919 struct dpaa_sec_dev_private *internals;
1920 struct dpaa_sec_qp *qp = NULL;
1922 PMD_INIT_FUNC_TRACE();
1924 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1926 internals = dev->data->dev_private;
1927 if (qp_id >= internals->max_nb_queue_pairs) {
1928 DPAA_SEC_ERR("Max supported qpid %d",
1929 internals->max_nb_queue_pairs);
1933 qp = &internals->qps[qp_id];
1934 rte_mempool_free(qp->ctx_pool);
1935 qp->internals = NULL;
1936 dev->data->queue_pairs[qp_id] = NULL;
1941 /** Setup a queue pair */
1943 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1944 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1945 __rte_unused int socket_id)
1947 struct dpaa_sec_dev_private *internals;
1948 struct dpaa_sec_qp *qp = NULL;
1951 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1953 internals = dev->data->dev_private;
1954 if (qp_id >= internals->max_nb_queue_pairs) {
1955 DPAA_SEC_ERR("Max supported qpid %d",
1956 internals->max_nb_queue_pairs);
1960 qp = &internals->qps[qp_id];
1961 qp->internals = internals;
1962 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1963 dev->data->dev_id, qp_id);
1964 if (!qp->ctx_pool) {
1965 qp->ctx_pool = rte_mempool_create((const char *)str,
1968 CTX_POOL_CACHE_SIZE, 0,
1969 NULL, NULL, NULL, NULL,
1971 if (!qp->ctx_pool) {
1972 DPAA_SEC_ERR("%s create failed\n", str);
1976 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1977 dev->data->dev_id, qp_id);
1978 dev->data->queue_pairs[qp_id] = qp;
1983 /** Return the number of allocated queue pairs */
1985 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1987 PMD_INIT_FUNC_TRACE();
1989 return dev->data->nb_queue_pairs;
1992 /** Returns the size of session structure */
1994 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1996 PMD_INIT_FUNC_TRACE();
1998 return sizeof(dpaa_sec_session);
2002 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2003 struct rte_crypto_sym_xform *xform,
2004 dpaa_sec_session *session)
2006 session->cipher_alg = xform->cipher.algo;
2007 session->iv.length = xform->cipher.iv.length;
2008 session->iv.offset = xform->cipher.iv.offset;
2009 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2010 RTE_CACHE_LINE_SIZE);
2011 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2012 DPAA_SEC_ERR("No Memory for cipher key");
2015 session->cipher_key.length = xform->cipher.key.length;
2017 memcpy(session->cipher_key.data, xform->cipher.key.data,
2018 xform->cipher.key.length);
2019 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2026 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2027 struct rte_crypto_sym_xform *xform,
2028 dpaa_sec_session *session)
2030 session->auth_alg = xform->auth.algo;
2031 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2032 RTE_CACHE_LINE_SIZE);
2033 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2034 DPAA_SEC_ERR("No Memory for auth key");
2037 session->auth_key.length = xform->auth.key.length;
2038 session->digest_length = xform->auth.digest_length;
2040 memcpy(session->auth_key.data, xform->auth.key.data,
2041 xform->auth.key.length);
2042 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2049 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2050 struct rte_crypto_sym_xform *xform,
2051 dpaa_sec_session *session)
2053 session->aead_alg = xform->aead.algo;
2054 session->iv.length = xform->aead.iv.length;
2055 session->iv.offset = xform->aead.iv.offset;
2056 session->auth_only_len = xform->aead.aad_length;
2057 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2058 RTE_CACHE_LINE_SIZE);
2059 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2060 DPAA_SEC_ERR("No Memory for aead key\n");
2063 session->aead_key.length = xform->aead.key.length;
2064 session->digest_length = xform->aead.digest_length;
2066 memcpy(session->aead_key.data, xform->aead.key.data,
2067 xform->aead.key.length);
2068 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2074 static struct qman_fq *
2075 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2079 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2080 if (qi->inq_attach[i] == 0) {
2081 qi->inq_attach[i] = 1;
2085 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2091 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2095 for (i = 0; i < qi->max_nb_sessions; i++) {
2096 if (&qi->inq[i] == fq) {
2097 qman_retire_fq(fq, NULL);
2099 qi->inq_attach[i] = 0;
2107 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2111 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2112 ret = dpaa_sec_prep_cdb(sess);
2114 DPAA_SEC_ERR("Unable to prepare sec cdb");
2117 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2118 ret = rte_dpaa_portal_init((void *)0);
2120 DPAA_SEC_ERR("Failure in affining portal");
2124 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2125 dpaa_mem_vtop(&sess->cdb),
2126 qman_fq_fqid(&qp->outq));
2128 DPAA_SEC_ERR("Unable to init sec queue");
2134 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2135 struct rte_crypto_sym_xform *xform, void *sess)
2137 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2138 dpaa_sec_session *session = sess;
2141 PMD_INIT_FUNC_TRACE();
2143 if (unlikely(sess == NULL)) {
2144 DPAA_SEC_ERR("invalid session struct");
2147 memset(session, 0, sizeof(dpaa_sec_session));
2149 /* Default IV length = 0 */
2150 session->iv.length = 0;
2153 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2154 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2155 dpaa_sec_cipher_init(dev, xform, session);
2157 /* Authentication Only */
2158 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2159 xform->next == NULL) {
2160 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2161 dpaa_sec_auth_init(dev, xform, session);
2163 /* Cipher then Authenticate */
2164 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2165 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2166 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2167 dpaa_sec_cipher_init(dev, xform, session);
2168 dpaa_sec_auth_init(dev, xform->next, session);
2170 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2174 /* Authenticate then Cipher */
2175 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2176 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2177 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2178 dpaa_sec_auth_init(dev, xform, session);
2179 dpaa_sec_cipher_init(dev, xform->next, session);
2181 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2185 /* AEAD operation for AES-GCM kind of Algorithms */
2186 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2187 xform->next == NULL) {
2188 dpaa_sec_aead_init(dev, xform, session);
2191 DPAA_SEC_ERR("Invalid crypto type");
2194 rte_spinlock_lock(&internals->lock);
2195 for (i = 0; i < MAX_DPAA_CORES; i++) {
2196 session->inq[i] = dpaa_sec_attach_rxq(internals);
2197 if (session->inq[i] == NULL) {
2198 DPAA_SEC_ERR("unable to attach sec queue");
2199 rte_spinlock_unlock(&internals->lock);
2203 rte_spinlock_unlock(&internals->lock);
2208 rte_free(session->cipher_key.data);
2209 rte_free(session->auth_key.data);
2210 memset(session, 0, sizeof(dpaa_sec_session));
2216 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2217 struct rte_crypto_sym_xform *xform,
2218 struct rte_cryptodev_sym_session *sess,
2219 struct rte_mempool *mempool)
2221 void *sess_private_data;
2224 PMD_INIT_FUNC_TRACE();
2226 if (rte_mempool_get(mempool, &sess_private_data)) {
2227 DPAA_SEC_ERR("Couldn't get object from session mempool");
2231 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2233 DPAA_SEC_ERR("failed to configure session parameters");
2235 /* Return session to mempool */
2236 rte_mempool_put(mempool, sess_private_data);
2240 set_sym_session_private_data(sess, dev->driver_id,
2248 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2250 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2251 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2254 for (i = 0; i < MAX_DPAA_CORES; i++) {
2256 dpaa_sec_detach_rxq(qi, s->inq[i]);
2260 rte_free(s->cipher_key.data);
2261 rte_free(s->auth_key.data);
2262 memset(s, 0, sizeof(dpaa_sec_session));
2263 rte_mempool_put(sess_mp, (void *)s);
2266 /** Clear the memory of session so it doesn't leave key material behind */
2268 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2269 struct rte_cryptodev_sym_session *sess)
2271 PMD_INIT_FUNC_TRACE();
2272 uint8_t index = dev->driver_id;
2273 void *sess_priv = get_sym_session_private_data(sess, index);
2274 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2277 free_session_memory(dev, s);
2278 set_sym_session_private_data(sess, index, NULL);
2283 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2284 struct rte_security_session_conf *conf,
2287 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2288 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2289 struct rte_crypto_auth_xform *auth_xform = NULL;
2290 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2291 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2294 PMD_INIT_FUNC_TRACE();
2296 memset(session, 0, sizeof(dpaa_sec_session));
2297 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2298 cipher_xform = &conf->crypto_xform->cipher;
2299 if (conf->crypto_xform->next)
2300 auth_xform = &conf->crypto_xform->next->auth;
2302 auth_xform = &conf->crypto_xform->auth;
2303 if (conf->crypto_xform->next)
2304 cipher_xform = &conf->crypto_xform->next->cipher;
2306 session->proto_alg = conf->protocol;
2308 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2309 session->cipher_key.data = rte_zmalloc(NULL,
2310 cipher_xform->key.length,
2311 RTE_CACHE_LINE_SIZE);
2312 if (session->cipher_key.data == NULL &&
2313 cipher_xform->key.length > 0) {
2314 DPAA_SEC_ERR("No Memory for cipher key");
2317 memcpy(session->cipher_key.data, cipher_xform->key.data,
2318 cipher_xform->key.length);
2319 session->cipher_key.length = cipher_xform->key.length;
2321 switch (cipher_xform->algo) {
2322 case RTE_CRYPTO_CIPHER_AES_CBC:
2323 case RTE_CRYPTO_CIPHER_3DES_CBC:
2324 case RTE_CRYPTO_CIPHER_AES_CTR:
2327 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2328 cipher_xform->algo);
2331 session->cipher_alg = cipher_xform->algo;
2333 session->cipher_key.data = NULL;
2334 session->cipher_key.length = 0;
2335 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2338 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2339 session->auth_key.data = rte_zmalloc(NULL,
2340 auth_xform->key.length,
2341 RTE_CACHE_LINE_SIZE);
2342 if (session->auth_key.data == NULL &&
2343 auth_xform->key.length > 0) {
2344 DPAA_SEC_ERR("No Memory for auth key");
2345 rte_free(session->cipher_key.data);
2348 memcpy(session->auth_key.data, auth_xform->key.data,
2349 auth_xform->key.length);
2350 session->auth_key.length = auth_xform->key.length;
2352 switch (auth_xform->algo) {
2353 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2354 case RTE_CRYPTO_AUTH_MD5_HMAC:
2355 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2356 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2357 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2358 case RTE_CRYPTO_AUTH_AES_CMAC:
2361 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2365 session->auth_alg = auth_xform->algo;
2367 session->auth_key.data = NULL;
2368 session->auth_key.length = 0;
2369 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2372 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2373 if (ipsec_xform->tunnel.type ==
2374 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2375 memset(&session->encap_pdb, 0,
2376 sizeof(struct ipsec_encap_pdb) +
2377 sizeof(session->ip4_hdr));
2378 session->ip4_hdr.ip_v = IPVERSION;
2379 session->ip4_hdr.ip_hl = 5;
2380 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2381 sizeof(session->ip4_hdr));
2382 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2383 session->ip4_hdr.ip_id = 0;
2384 session->ip4_hdr.ip_off = 0;
2385 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2386 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2387 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2388 IPPROTO_ESP : IPPROTO_AH;
2389 session->ip4_hdr.ip_sum = 0;
2390 session->ip4_hdr.ip_src =
2391 ipsec_xform->tunnel.ipv4.src_ip;
2392 session->ip4_hdr.ip_dst =
2393 ipsec_xform->tunnel.ipv4.dst_ip;
2394 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2395 (void *)&session->ip4_hdr,
2397 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2398 } else if (ipsec_xform->tunnel.type ==
2399 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2400 memset(&session->encap_pdb, 0,
2401 sizeof(struct ipsec_encap_pdb) +
2402 sizeof(session->ip6_hdr));
2403 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2404 DPAA_IPv6_DEFAULT_VTC_FLOW |
2405 ((ipsec_xform->tunnel.ipv6.dscp <<
2406 RTE_IPV6_HDR_TC_SHIFT) &
2407 RTE_IPV6_HDR_TC_MASK) |
2408 ((ipsec_xform->tunnel.ipv6.flabel <<
2409 RTE_IPV6_HDR_FL_SHIFT) &
2410 RTE_IPV6_HDR_FL_MASK));
2411 /* Payload length will be updated by HW */
2412 session->ip6_hdr.payload_len = 0;
2413 session->ip6_hdr.hop_limits =
2414 ipsec_xform->tunnel.ipv6.hlimit;
2415 session->ip6_hdr.proto = (ipsec_xform->proto ==
2416 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2417 IPPROTO_ESP : IPPROTO_AH;
2418 memcpy(&session->ip6_hdr.src_addr,
2419 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2420 memcpy(&session->ip6_hdr.dst_addr,
2421 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2422 session->encap_pdb.ip_hdr_len =
2423 sizeof(struct rte_ipv6_hdr);
2425 session->encap_pdb.options =
2426 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2427 PDBOPTS_ESP_OIHI_PDB_INL |
2429 PDBHMO_ESP_ENCAP_DTTL |
2431 if (ipsec_xform->options.esn)
2432 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2433 session->encap_pdb.spi = ipsec_xform->spi;
2434 session->dir = DIR_ENC;
2435 } else if (ipsec_xform->direction ==
2436 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2437 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2438 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2439 session->decap_pdb.options = sizeof(struct ip) << 16;
2441 session->decap_pdb.options =
2442 sizeof(struct rte_ipv6_hdr) << 16;
2443 if (ipsec_xform->options.esn)
2444 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2445 session->dir = DIR_DEC;
2448 rte_spinlock_lock(&internals->lock);
2449 for (i = 0; i < MAX_DPAA_CORES; i++) {
2450 session->inq[i] = dpaa_sec_attach_rxq(internals);
2451 if (session->inq[i] == NULL) {
2452 DPAA_SEC_ERR("unable to attach sec queue");
2453 rte_spinlock_unlock(&internals->lock);
2457 rte_spinlock_unlock(&internals->lock);
2461 rte_free(session->auth_key.data);
2462 rte_free(session->cipher_key.data);
2463 memset(session, 0, sizeof(dpaa_sec_session));
2468 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2469 struct rte_security_session_conf *conf,
2472 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2473 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2474 struct rte_crypto_auth_xform *auth_xform = NULL;
2475 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2476 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2477 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2480 PMD_INIT_FUNC_TRACE();
2482 memset(session, 0, sizeof(dpaa_sec_session));
2484 /* find xfrm types */
2485 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2486 cipher_xform = &xform->cipher;
2487 if (xform->next != NULL)
2488 auth_xform = &xform->next->auth;
2489 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2490 auth_xform = &xform->auth;
2491 if (xform->next != NULL)
2492 cipher_xform = &xform->next->cipher;
2494 DPAA_SEC_ERR("Invalid crypto type");
2498 session->proto_alg = conf->protocol;
2500 session->cipher_key.data = rte_zmalloc(NULL,
2501 cipher_xform->key.length,
2502 RTE_CACHE_LINE_SIZE);
2503 if (session->cipher_key.data == NULL &&
2504 cipher_xform->key.length > 0) {
2505 DPAA_SEC_ERR("No Memory for cipher key");
2508 session->cipher_key.length = cipher_xform->key.length;
2509 memcpy(session->cipher_key.data, cipher_xform->key.data,
2510 cipher_xform->key.length);
2511 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2513 session->cipher_alg = cipher_xform->algo;
2515 session->cipher_key.data = NULL;
2516 session->cipher_key.length = 0;
2517 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2518 session->dir = DIR_ENC;
2521 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2522 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2523 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2525 "PDCP Seq Num size should be 5/12 bits for cmode");
2531 session->auth_key.data = rte_zmalloc(NULL,
2532 auth_xform->key.length,
2533 RTE_CACHE_LINE_SIZE);
2534 if (!session->auth_key.data &&
2535 auth_xform->key.length > 0) {
2536 DPAA_SEC_ERR("No Memory for auth key");
2537 rte_free(session->cipher_key.data);
2540 session->auth_key.length = auth_xform->key.length;
2541 memcpy(session->auth_key.data, auth_xform->key.data,
2542 auth_xform->key.length);
2543 session->auth_alg = auth_xform->algo;
2545 session->auth_key.data = NULL;
2546 session->auth_key.length = 0;
2547 session->auth_alg = 0;
2549 session->pdcp.domain = pdcp_xform->domain;
2550 session->pdcp.bearer = pdcp_xform->bearer;
2551 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2552 session->pdcp.sn_size = pdcp_xform->sn_size;
2553 session->pdcp.hfn = pdcp_xform->hfn;
2554 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2555 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2556 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2558 rte_spinlock_lock(&dev_priv->lock);
2559 for (i = 0; i < MAX_DPAA_CORES; i++) {
2560 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2561 if (session->inq[i] == NULL) {
2562 DPAA_SEC_ERR("unable to attach sec queue");
2563 rte_spinlock_unlock(&dev_priv->lock);
2567 rte_spinlock_unlock(&dev_priv->lock);
2570 rte_free(session->auth_key.data);
2571 rte_free(session->cipher_key.data);
2572 memset(session, 0, sizeof(dpaa_sec_session));
2577 dpaa_sec_security_session_create(void *dev,
2578 struct rte_security_session_conf *conf,
2579 struct rte_security_session *sess,
2580 struct rte_mempool *mempool)
2582 void *sess_private_data;
2583 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2586 if (rte_mempool_get(mempool, &sess_private_data)) {
2587 DPAA_SEC_ERR("Couldn't get object from session mempool");
2591 switch (conf->protocol) {
2592 case RTE_SECURITY_PROTOCOL_IPSEC:
2593 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2596 case RTE_SECURITY_PROTOCOL_PDCP:
2597 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2600 case RTE_SECURITY_PROTOCOL_MACSEC:
2606 DPAA_SEC_ERR("failed to configure session parameters");
2607 /* Return session to mempool */
2608 rte_mempool_put(mempool, sess_private_data);
2612 set_sec_session_private_data(sess, sess_private_data);
2617 /** Clear the memory of session so it doesn't leave key material behind */
2619 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2620 struct rte_security_session *sess)
2622 PMD_INIT_FUNC_TRACE();
2623 void *sess_priv = get_sec_session_private_data(sess);
2624 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2627 free_session_memory((struct rte_cryptodev *)dev, s);
2628 set_sec_session_private_data(sess, NULL);
2634 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2635 struct rte_cryptodev_config *config __rte_unused)
2637 PMD_INIT_FUNC_TRACE();
2643 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2645 PMD_INIT_FUNC_TRACE();
2650 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2652 PMD_INIT_FUNC_TRACE();
2656 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2658 PMD_INIT_FUNC_TRACE();
2667 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2668 struct rte_cryptodev_info *info)
2670 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2672 PMD_INIT_FUNC_TRACE();
2674 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2675 info->feature_flags = dev->feature_flags;
2676 info->capabilities = dpaa_sec_capabilities;
2677 info->sym.max_nb_sessions = internals->max_nb_sessions;
2678 info->driver_id = cryptodev_driver_id;
2682 static struct rte_cryptodev_ops crypto_ops = {
2683 .dev_configure = dpaa_sec_dev_configure,
2684 .dev_start = dpaa_sec_dev_start,
2685 .dev_stop = dpaa_sec_dev_stop,
2686 .dev_close = dpaa_sec_dev_close,
2687 .dev_infos_get = dpaa_sec_dev_infos_get,
2688 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2689 .queue_pair_release = dpaa_sec_queue_pair_release,
2690 .queue_pair_count = dpaa_sec_queue_pair_count,
2691 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2692 .sym_session_configure = dpaa_sec_sym_session_configure,
2693 .sym_session_clear = dpaa_sec_sym_session_clear
2696 static const struct rte_security_capability *
2697 dpaa_sec_capabilities_get(void *device __rte_unused)
2699 return dpaa_sec_security_cap;
2702 static const struct rte_security_ops dpaa_sec_security_ops = {
2703 .session_create = dpaa_sec_security_session_create,
2704 .session_update = NULL,
2705 .session_stats_get = NULL,
2706 .session_destroy = dpaa_sec_security_session_destroy,
2707 .set_pkt_metadata = NULL,
2708 .capabilities_get = dpaa_sec_capabilities_get
2712 dpaa_sec_uninit(struct rte_cryptodev *dev)
2714 struct dpaa_sec_dev_private *internals;
2719 internals = dev->data->dev_private;
2720 rte_free(dev->security_ctx);
2722 rte_free(internals);
2724 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2725 dev->data->name, rte_socket_id());
2731 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2733 struct dpaa_sec_dev_private *internals;
2734 struct rte_security_ctx *security_instance;
2735 struct dpaa_sec_qp *qp;
2739 PMD_INIT_FUNC_TRACE();
2741 cryptodev->driver_id = cryptodev_driver_id;
2742 cryptodev->dev_ops = &crypto_ops;
2744 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2745 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2746 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2747 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2748 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2749 RTE_CRYPTODEV_FF_SECURITY |
2750 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2751 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2752 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2753 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2754 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2756 internals = cryptodev->data->dev_private;
2757 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2758 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2761 * For secondary processes, we don't initialise any further as primary
2762 * has already done this work. Only check we don't need a different
2765 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2766 DPAA_SEC_WARN("Device already init by primary process");
2770 /* Initialize security_ctx only for primary process*/
2771 security_instance = rte_malloc("rte_security_instances_ops",
2772 sizeof(struct rte_security_ctx), 0);
2773 if (security_instance == NULL)
2775 security_instance->device = (void *)cryptodev;
2776 security_instance->ops = &dpaa_sec_security_ops;
2777 security_instance->sess_cnt = 0;
2778 cryptodev->security_ctx = security_instance;
2780 rte_spinlock_init(&internals->lock);
2781 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2782 /* init qman fq for queue pair */
2783 qp = &internals->qps[i];
2784 ret = dpaa_sec_init_tx(&qp->outq);
2786 DPAA_SEC_ERR("config tx of queue pair %d", i);
2791 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2792 QMAN_FQ_FLAG_TO_DCPORTAL;
2793 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2794 /* create rx qman fq for sessions*/
2795 ret = qman_create_fq(0, flags, &internals->inq[i]);
2796 if (unlikely(ret != 0)) {
2797 DPAA_SEC_ERR("sec qman_create_fq failed");
2802 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2806 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2808 dpaa_sec_uninit(cryptodev);
2813 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2814 struct rte_dpaa_device *dpaa_dev)
2816 struct rte_cryptodev *cryptodev;
2817 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2821 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2822 dpaa_dev->id.dev_id);
2824 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2825 if (cryptodev == NULL)
2828 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2829 cryptodev->data->dev_private = rte_zmalloc_socket(
2830 "cryptodev private structure",
2831 sizeof(struct dpaa_sec_dev_private),
2832 RTE_CACHE_LINE_SIZE,
2835 if (cryptodev->data->dev_private == NULL)
2836 rte_panic("Cannot allocate memzone for private "
2840 dpaa_dev->crypto_dev = cryptodev;
2841 cryptodev->device = &dpaa_dev->device;
2843 /* init user callbacks */
2844 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2846 /* if sec device version is not configured */
2847 if (!rta_get_sec_era()) {
2848 const struct device_node *caam_node;
2850 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2851 const uint32_t *prop = of_get_property(caam_node,
2856 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2862 /* Invoke PMD device initialization function */
2863 retval = dpaa_sec_dev_init(cryptodev);
2867 /* In case of error, cleanup is done */
2868 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2869 rte_free(cryptodev->data->dev_private);
2871 rte_cryptodev_pmd_release_device(cryptodev);
2877 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2879 struct rte_cryptodev *cryptodev;
2882 cryptodev = dpaa_dev->crypto_dev;
2883 if (cryptodev == NULL)
2886 ret = dpaa_sec_uninit(cryptodev);
2890 return rte_cryptodev_pmd_destroy(cryptodev);
2893 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2894 .drv_type = FSL_DPAA_CRYPTO,
2896 .name = "DPAA SEC PMD"
2898 .probe = cryptodev_dpaa_sec_probe,
2899 .remove = cryptodev_dpaa_sec_remove,
2902 static struct cryptodev_driver dpaa_sec_crypto_drv;
2904 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2905 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2906 cryptodev_driver_id);
2908 RTE_INIT(dpaa_sec_init_log)
2910 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2911 if (dpaa_logtype_sec >= 0)
2912 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);