1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_log.h>
42 enum rta_sec_era rta_sec_era;
46 static uint8_t cryptodev_driver_id;
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 if (!ctx->fd_status) {
58 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 /* report op status to sym->op and then free the ctx memeory */
65 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
68 static inline struct dpaa_sec_op_ctx *
69 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
71 struct dpaa_sec_op_ctx *ctx;
74 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
76 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
80 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
81 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
82 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
83 * each packet, memset is costlier than dcbz_64().
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
88 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
90 ctx->ctx_pool = ses->ctx_pool;
91 ctx->vtop_offset = (size_t) ctx
92 - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
109 dpaa_mem_ptov(rte_iova_t paddr)
113 va = (void *)dpaax_iova_table_get_va(paddr);
117 return rte_mem_iova2virt(paddr);
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
123 const struct qm_mr_entry *msg)
125 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 /* initialize the queue with dest chan as caam chan so that
130 * all the packets in this queue could be dispatched into caam
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 struct qm_mcc_initfq fq_opts;
140 /* Clear FQ options */
141 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
143 flags = QMAN_INITFQ_FLAG_SCHED;
144 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 QM_INITFQ_WE_CONTEXTB;
147 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 fq_opts.fqd.context_b = fqid_out;
149 fq_opts.fqd.dest.channel = qm_channel_caam;
150 fq_opts.fqd.dest.wq = 0;
152 fq_in->cb.ern = ern_sec_fq_handler;
154 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
156 ret = qman_init_fq(fq_in, flags, &fq_opts);
157 if (unlikely(ret != 0))
158 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 struct qman_fq *fq __always_unused,
167 const struct qm_dqrr_entry *dqrr)
169 const struct qm_fd *fd;
170 struct dpaa_sec_job *job;
171 struct dpaa_sec_op_ctx *ctx;
173 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 return qman_cb_dqrr_defer;
176 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 return qman_cb_dqrr_consume;
180 /* sg is embedded in an op ctx,
181 * sg[0] is for output
184 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
186 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 ctx->fd_status = fd->status;
188 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 struct qm_sg_entry *sg_out;
192 sg_out = &job->sg[0];
193 hw_sg_to_cpu(sg_out);
194 len = sg_out->length;
195 ctx->op->sym->m_src->pkt_len = len;
196 ctx->op->sym->m_src->data_len = len;
198 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
199 dpaa_sec_op_ending(ctx);
201 return qman_cb_dqrr_consume;
204 /* caam result is put into this queue */
206 dpaa_sec_init_tx(struct qman_fq *fq)
209 struct qm_mcc_initfq opts;
212 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
213 QMAN_FQ_FLAG_DYNAMIC_FQID;
215 ret = qman_create_fq(0, flags, fq);
217 DPAA_SEC_ERR("qman_create_fq failed");
221 memset(&opts, 0, sizeof(opts));
222 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
223 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
225 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
227 fq->cb.dqrr = dqrr_out_fq_cb_rx;
228 fq->cb.ern = ern_sec_fq_handler;
230 ret = qman_init_fq(fq, 0, &opts);
232 DPAA_SEC_ERR("unable to init caam source fq!");
239 static inline int is_cipher_only(dpaa_sec_session *ses)
241 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
242 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
245 static inline int is_auth_only(dpaa_sec_session *ses)
247 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
248 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
251 static inline int is_aead(dpaa_sec_session *ses)
253 return ((ses->cipher_alg == 0) &&
254 (ses->auth_alg == 0) &&
255 (ses->aead_alg != 0));
258 static inline int is_auth_cipher(dpaa_sec_session *ses)
260 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
261 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
262 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
265 static inline int is_proto_ipsec(dpaa_sec_session *ses)
267 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
270 static inline int is_proto_pdcp(dpaa_sec_session *ses)
272 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
275 static inline int is_encode(dpaa_sec_session *ses)
277 return ses->dir == DIR_ENC;
280 static inline int is_decode(dpaa_sec_session *ses)
282 return ses->dir == DIR_DEC;
286 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
288 switch (ses->auth_alg) {
289 case RTE_CRYPTO_AUTH_NULL:
291 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292 OP_PCL_IPSEC_HMAC_NULL : 0;
293 ses->digest_length = 0;
295 case RTE_CRYPTO_AUTH_MD5_HMAC:
297 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
299 alginfo_a->algmode = OP_ALG_AAI_HMAC;
301 case RTE_CRYPTO_AUTH_SHA1_HMAC:
303 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
305 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 case RTE_CRYPTO_AUTH_SHA224_HMAC:
309 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
311 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 case RTE_CRYPTO_AUTH_SHA256_HMAC:
315 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
317 alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 case RTE_CRYPTO_AUTH_SHA384_HMAC:
321 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
323 alginfo_a->algmode = OP_ALG_AAI_HMAC;
325 case RTE_CRYPTO_AUTH_SHA512_HMAC:
327 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
328 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
329 alginfo_a->algmode = OP_ALG_AAI_HMAC;
332 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
337 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
339 switch (ses->cipher_alg) {
340 case RTE_CRYPTO_CIPHER_NULL:
342 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343 OP_PCL_IPSEC_NULL : 0;
345 case RTE_CRYPTO_CIPHER_AES_CBC:
347 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
349 alginfo_c->algmode = OP_ALG_AAI_CBC;
351 case RTE_CRYPTO_CIPHER_3DES_CBC:
353 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
355 alginfo_c->algmode = OP_ALG_AAI_CBC;
357 case RTE_CRYPTO_CIPHER_AES_CTR:
359 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
360 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
361 alginfo_c->algmode = OP_ALG_AAI_CTR;
364 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
369 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
371 switch (ses->aead_alg) {
372 case RTE_CRYPTO_AEAD_AES_GCM:
373 alginfo->algtype = OP_ALG_ALGSEL_AES;
374 alginfo->algmode = OP_ALG_AAI_GCM;
377 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
382 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
384 struct alginfo authdata = {0}, cipherdata = {0};
385 struct sec_cdb *cdb = &ses->cdb;
386 int32_t shared_desc_len = 0;
388 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
394 switch (ses->cipher_alg) {
395 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
396 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
398 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
399 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
401 case RTE_CRYPTO_CIPHER_AES_CTR:
402 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
404 case RTE_CRYPTO_CIPHER_NULL:
405 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
408 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
413 cipherdata.key = (size_t)ses->cipher_key.data;
414 cipherdata.keylen = ses->cipher_key.length;
415 cipherdata.key_enc_flags = 0;
416 cipherdata.key_type = RTA_DATA_IMM;
418 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
419 switch (ses->auth_alg) {
420 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
421 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
423 case RTE_CRYPTO_AUTH_ZUC_EIA3:
424 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
426 case RTE_CRYPTO_AUTH_AES_CMAC:
427 authdata.algtype = PDCP_AUTH_TYPE_AES;
429 case RTE_CRYPTO_AUTH_NULL:
430 authdata.algtype = PDCP_AUTH_TYPE_NULL;
433 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
438 authdata.key = (size_t)ses->auth_key.data;
439 authdata.keylen = ses->auth_key.length;
440 authdata.key_enc_flags = 0;
441 authdata.key_type = RTA_DATA_IMM;
443 cdb->sh_desc[0] = cipherdata.keylen;
444 cdb->sh_desc[1] = authdata.keylen;
445 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
447 (unsigned int *)cdb->sh_desc,
448 &cdb->sh_desc[2], 2);
451 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
454 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
455 cipherdata.key = (size_t)dpaa_mem_vtop(
456 (void *)(size_t)cipherdata.key);
457 cipherdata.key_type = RTA_DATA_PTR;
459 if (!(cdb->sh_desc[2] & (1<<1)) && authdata.keylen) {
460 authdata.key = (size_t)dpaa_mem_vtop(
461 (void *)(size_t)authdata.key);
462 authdata.key_type = RTA_DATA_PTR;
469 if (ses->dir == DIR_ENC)
470 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
471 cdb->sh_desc, 1, swap,
475 ses->pdcp.hfn_threshold,
476 &cipherdata, &authdata,
478 else if (ses->dir == DIR_DEC)
479 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
480 cdb->sh_desc, 1, swap,
484 ses->pdcp.hfn_threshold,
485 &cipherdata, &authdata,
488 cdb->sh_desc[0] = cipherdata.keylen;
489 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
491 (unsigned int *)cdb->sh_desc,
492 &cdb->sh_desc[2], 1);
495 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
498 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
499 cipherdata.key = (size_t)dpaa_mem_vtop(
500 (void *)(size_t)cipherdata.key);
501 cipherdata.key_type = RTA_DATA_PTR;
507 if (ses->dir == DIR_ENC)
508 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
509 cdb->sh_desc, 1, swap,
514 ses->pdcp.hfn_threshold,
516 else if (ses->dir == DIR_DEC)
517 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
518 cdb->sh_desc, 1, swap,
523 ses->pdcp.hfn_threshold,
527 return shared_desc_len;
530 /* prepare ipsec proto command block of the session */
532 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
534 struct alginfo cipherdata = {0}, authdata = {0};
535 struct sec_cdb *cdb = &ses->cdb;
536 int32_t shared_desc_len = 0;
538 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
544 caam_cipher_alg(ses, &cipherdata);
545 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546 DPAA_SEC_ERR("not supported cipher alg");
550 cipherdata.key = (size_t)ses->cipher_key.data;
551 cipherdata.keylen = ses->cipher_key.length;
552 cipherdata.key_enc_flags = 0;
553 cipherdata.key_type = RTA_DATA_IMM;
555 caam_auth_alg(ses, &authdata);
556 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
557 DPAA_SEC_ERR("not supported auth alg");
561 authdata.key = (size_t)ses->auth_key.data;
562 authdata.keylen = ses->auth_key.length;
563 authdata.key_enc_flags = 0;
564 authdata.key_type = RTA_DATA_IMM;
566 cdb->sh_desc[0] = cipherdata.keylen;
567 cdb->sh_desc[1] = authdata.keylen;
568 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
570 (unsigned int *)cdb->sh_desc,
571 &cdb->sh_desc[2], 2);
574 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
577 if (cdb->sh_desc[2] & 1)
578 cipherdata.key_type = RTA_DATA_IMM;
580 cipherdata.key = (size_t)dpaa_mem_vtop(
581 (void *)(size_t)cipherdata.key);
582 cipherdata.key_type = RTA_DATA_PTR;
584 if (cdb->sh_desc[2] & (1<<1))
585 authdata.key_type = RTA_DATA_IMM;
587 authdata.key = (size_t)dpaa_mem_vtop(
588 (void *)(size_t)authdata.key);
589 authdata.key_type = RTA_DATA_PTR;
595 if (ses->dir == DIR_ENC) {
596 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
598 true, swap, SHR_SERIAL,
600 (uint8_t *)&ses->ip4_hdr,
601 &cipherdata, &authdata);
602 } else if (ses->dir == DIR_DEC) {
603 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
605 true, swap, SHR_SERIAL,
607 &cipherdata, &authdata);
609 return shared_desc_len;
612 /* prepare command block of the session */
614 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
616 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
617 int32_t shared_desc_len = 0;
618 struct sec_cdb *cdb = &ses->cdb;
620 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
626 memset(cdb, 0, sizeof(struct sec_cdb));
628 if (is_proto_ipsec(ses)) {
629 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
630 } else if (is_proto_pdcp(ses)) {
631 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
632 } else if (is_cipher_only(ses)) {
633 caam_cipher_alg(ses, &alginfo_c);
634 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
635 DPAA_SEC_ERR("not supported cipher alg");
639 alginfo_c.key = (size_t)ses->cipher_key.data;
640 alginfo_c.keylen = ses->cipher_key.length;
641 alginfo_c.key_enc_flags = 0;
642 alginfo_c.key_type = RTA_DATA_IMM;
644 shared_desc_len = cnstr_shdsc_blkcipher(
646 swap, SHR_NEVER, &alginfo_c,
650 } else if (is_auth_only(ses)) {
651 caam_auth_alg(ses, &alginfo_a);
652 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
653 DPAA_SEC_ERR("not supported auth alg");
657 alginfo_a.key = (size_t)ses->auth_key.data;
658 alginfo_a.keylen = ses->auth_key.length;
659 alginfo_a.key_enc_flags = 0;
660 alginfo_a.key_type = RTA_DATA_IMM;
662 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
663 swap, SHR_NEVER, &alginfo_a,
666 } else if (is_aead(ses)) {
667 caam_aead_alg(ses, &alginfo);
668 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
669 DPAA_SEC_ERR("not supported aead alg");
672 alginfo.key = (size_t)ses->aead_key.data;
673 alginfo.keylen = ses->aead_key.length;
674 alginfo.key_enc_flags = 0;
675 alginfo.key_type = RTA_DATA_IMM;
677 if (ses->dir == DIR_ENC)
678 shared_desc_len = cnstr_shdsc_gcm_encap(
679 cdb->sh_desc, true, swap, SHR_NEVER,
684 shared_desc_len = cnstr_shdsc_gcm_decap(
685 cdb->sh_desc, true, swap, SHR_NEVER,
690 caam_cipher_alg(ses, &alginfo_c);
691 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
692 DPAA_SEC_ERR("not supported cipher alg");
696 alginfo_c.key = (size_t)ses->cipher_key.data;
697 alginfo_c.keylen = ses->cipher_key.length;
698 alginfo_c.key_enc_flags = 0;
699 alginfo_c.key_type = RTA_DATA_IMM;
701 caam_auth_alg(ses, &alginfo_a);
702 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
703 DPAA_SEC_ERR("not supported auth alg");
707 alginfo_a.key = (size_t)ses->auth_key.data;
708 alginfo_a.keylen = ses->auth_key.length;
709 alginfo_a.key_enc_flags = 0;
710 alginfo_a.key_type = RTA_DATA_IMM;
712 cdb->sh_desc[0] = alginfo_c.keylen;
713 cdb->sh_desc[1] = alginfo_a.keylen;
714 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
716 (unsigned int *)cdb->sh_desc,
717 &cdb->sh_desc[2], 2);
720 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
723 if (cdb->sh_desc[2] & 1)
724 alginfo_c.key_type = RTA_DATA_IMM;
726 alginfo_c.key = (size_t)dpaa_mem_vtop(
727 (void *)(size_t)alginfo_c.key);
728 alginfo_c.key_type = RTA_DATA_PTR;
730 if (cdb->sh_desc[2] & (1<<1))
731 alginfo_a.key_type = RTA_DATA_IMM;
733 alginfo_a.key = (size_t)dpaa_mem_vtop(
734 (void *)(size_t)alginfo_a.key);
735 alginfo_a.key_type = RTA_DATA_PTR;
740 /* Auth_only_len is set as 0 here and it will be
741 * overwritten in fd for each packet.
743 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
744 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
746 ses->digest_length, ses->dir);
749 if (shared_desc_len < 0) {
750 DPAA_SEC_ERR("error in preparing command block");
751 return shared_desc_len;
754 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
755 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
756 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
761 /* qp is lockless, should be accessed by only one thread */
763 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
766 unsigned int pkts = 0;
767 int num_rx_bufs, ret;
768 struct qm_dqrr_entry *dq;
769 uint32_t vdqcr_flags = 0;
773 * Until request for four buffers, we provide exact number of buffers.
774 * Otherwise we do not set the QM_VDQCR_EXACT flag.
775 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
776 * requested, so we request two less in this case.
779 vdqcr_flags = QM_VDQCR_EXACT;
780 num_rx_bufs = nb_ops;
782 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
783 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
785 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
790 const struct qm_fd *fd;
791 struct dpaa_sec_job *job;
792 struct dpaa_sec_op_ctx *ctx;
793 struct rte_crypto_op *op;
795 dq = qman_dequeue(fq);
800 /* sg is embedded in an op ctx,
801 * sg[0] is for output
804 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
806 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
807 ctx->fd_status = fd->status;
809 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
810 struct qm_sg_entry *sg_out;
813 sg_out = &job->sg[0];
814 hw_sg_to_cpu(sg_out);
815 len = sg_out->length;
816 op->sym->m_src->pkt_len = len;
817 op->sym->m_src->data_len = len;
819 if (!ctx->fd_status) {
820 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
822 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
823 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
827 /* report op status to sym->op and then free the ctx memeory */
828 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
830 qman_dqrr_consume(fq, dq);
831 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
836 static inline struct dpaa_sec_job *
837 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
839 struct rte_crypto_sym_op *sym = op->sym;
840 struct rte_mbuf *mbuf = sym->m_src;
841 struct dpaa_sec_job *cf;
842 struct dpaa_sec_op_ctx *ctx;
843 struct qm_sg_entry *sg, *out_sg, *in_sg;
844 phys_addr_t start_addr;
845 uint8_t *old_digest, extra_segs;
852 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
853 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
857 ctx = dpaa_sec_alloc_ctx(ses);
863 old_digest = ctx->digest;
867 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
868 out_sg->length = ses->digest_length;
869 cpu_to_hw_sg(out_sg);
873 /* need to extend the input to a compound frame */
874 in_sg->extension = 1;
876 in_sg->length = sym->auth.data.length;
877 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
881 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
882 sg->length = mbuf->data_len - sym->auth.data.offset;
883 sg->offset = sym->auth.data.offset;
885 /* Successive segs */
890 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
891 sg->length = mbuf->data_len;
895 if (is_decode(ses)) {
896 /* Digest verification case */
899 rte_memcpy(old_digest, sym->auth.digest.data,
901 start_addr = dpaa_mem_vtop(old_digest);
902 qm_sg_entry_set64(sg, start_addr);
903 sg->length = ses->digest_length;
904 in_sg->length += ses->digest_length;
906 /* Digest calculation case */
907 sg->length -= ses->digest_length;
918 * |<----data_len------->|
919 * |ip_header|ah_header|icv|payload|
924 static inline struct dpaa_sec_job *
925 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
927 struct rte_crypto_sym_op *sym = op->sym;
928 struct rte_mbuf *mbuf = sym->m_src;
929 struct dpaa_sec_job *cf;
930 struct dpaa_sec_op_ctx *ctx;
931 struct qm_sg_entry *sg;
932 rte_iova_t start_addr;
935 ctx = dpaa_sec_alloc_ctx(ses);
941 old_digest = ctx->digest;
943 start_addr = rte_pktmbuf_iova(mbuf);
946 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
947 sg->length = ses->digest_length;
952 if (is_decode(ses)) {
953 /* need to extend the input to a compound frame */
955 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
956 sg->length = sym->auth.data.length + ses->digest_length;
961 /* hash result or digest, save digest first */
962 rte_memcpy(old_digest, sym->auth.digest.data,
964 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
965 sg->length = sym->auth.data.length;
968 /* let's check digest by hw */
969 start_addr = dpaa_mem_vtop(old_digest);
971 qm_sg_entry_set64(sg, start_addr);
972 sg->length = ses->digest_length;
976 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
977 sg->length = sym->auth.data.length;
985 static inline struct dpaa_sec_job *
986 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
988 struct rte_crypto_sym_op *sym = op->sym;
989 struct dpaa_sec_job *cf;
990 struct dpaa_sec_op_ctx *ctx;
991 struct qm_sg_entry *sg, *out_sg, *in_sg;
992 struct rte_mbuf *mbuf;
994 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
999 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1002 req_segs = mbuf->nb_segs * 2 + 3;
1005 if (req_segs > MAX_SG_ENTRIES) {
1006 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1011 ctx = dpaa_sec_alloc_ctx(ses);
1019 out_sg = &cf->sg[0];
1020 out_sg->extension = 1;
1021 out_sg->length = sym->cipher.data.length;
1022 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1023 cpu_to_hw_sg(out_sg);
1027 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1028 sg->length = mbuf->data_len - sym->cipher.data.offset;
1029 sg->offset = sym->cipher.data.offset;
1031 /* Successive segs */
1036 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1037 sg->length = mbuf->data_len;
1046 in_sg->extension = 1;
1048 in_sg->length = sym->cipher.data.length + ses->iv.length;
1051 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1052 cpu_to_hw_sg(in_sg);
1055 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1056 sg->length = ses->iv.length;
1061 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1062 sg->length = mbuf->data_len - sym->cipher.data.offset;
1063 sg->offset = sym->cipher.data.offset;
1065 /* Successive segs */
1070 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1071 sg->length = mbuf->data_len;
1080 static inline struct dpaa_sec_job *
1081 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1083 struct rte_crypto_sym_op *sym = op->sym;
1084 struct dpaa_sec_job *cf;
1085 struct dpaa_sec_op_ctx *ctx;
1086 struct qm_sg_entry *sg;
1087 rte_iova_t src_start_addr, dst_start_addr;
1088 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1091 ctx = dpaa_sec_alloc_ctx(ses);
1098 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1101 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1103 dst_start_addr = src_start_addr;
1107 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1108 sg->length = sym->cipher.data.length + ses->iv.length;
1114 /* need to extend the input to a compound frame */
1117 sg->length = sym->cipher.data.length + ses->iv.length;
1118 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1122 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1123 sg->length = ses->iv.length;
1127 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1128 sg->length = sym->cipher.data.length;
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1138 struct rte_crypto_sym_op *sym = op->sym;
1139 struct dpaa_sec_job *cf;
1140 struct dpaa_sec_op_ctx *ctx;
1141 struct qm_sg_entry *sg, *out_sg, *in_sg;
1142 struct rte_mbuf *mbuf;
1144 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1149 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1152 req_segs = mbuf->nb_segs * 2 + 4;
1155 if (ses->auth_only_len)
1158 if (req_segs > MAX_SG_ENTRIES) {
1159 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1164 ctx = dpaa_sec_alloc_ctx(ses);
1171 rte_prefetch0(cf->sg);
1174 out_sg = &cf->sg[0];
1175 out_sg->extension = 1;
1177 out_sg->length = sym->aead.data.length + ses->auth_only_len
1178 + ses->digest_length;
1180 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1182 /* output sg entries */
1184 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1185 cpu_to_hw_sg(out_sg);
1188 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1189 sg->length = mbuf->data_len - sym->aead.data.offset +
1191 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1193 /* Successive segs */
1198 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199 sg->length = mbuf->data_len;
1202 sg->length -= ses->digest_length;
1204 if (is_encode(ses)) {
1206 /* set auth output */
1208 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209 sg->length = ses->digest_length;
1217 in_sg->extension = 1;
1220 in_sg->length = ses->iv.length + sym->aead.data.length
1221 + ses->auth_only_len;
1223 in_sg->length = ses->iv.length + sym->aead.data.length
1224 + ses->auth_only_len + ses->digest_length;
1226 /* input sg entries */
1228 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1229 cpu_to_hw_sg(in_sg);
1232 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1233 sg->length = ses->iv.length;
1236 /* 2nd seg auth only */
1237 if (ses->auth_only_len) {
1239 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1240 sg->length = ses->auth_only_len;
1246 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247 sg->length = mbuf->data_len - sym->aead.data.offset;
1248 sg->offset = sym->aead.data.offset;
1250 /* Successive segs */
1255 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1256 sg->length = mbuf->data_len;
1260 if (is_decode(ses)) {
1263 memcpy(ctx->digest, sym->aead.digest.data,
1264 ses->digest_length);
1265 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1266 sg->length = ses->digest_length;
1274 static inline struct dpaa_sec_job *
1275 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1277 struct rte_crypto_sym_op *sym = op->sym;
1278 struct dpaa_sec_job *cf;
1279 struct dpaa_sec_op_ctx *ctx;
1280 struct qm_sg_entry *sg;
1281 uint32_t length = 0;
1282 rte_iova_t src_start_addr, dst_start_addr;
1283 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1286 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1289 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1291 dst_start_addr = src_start_addr;
1293 ctx = dpaa_sec_alloc_ctx(ses);
1301 rte_prefetch0(cf->sg);
1303 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1304 if (is_encode(ses)) {
1305 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306 sg->length = ses->iv.length;
1307 length += sg->length;
1311 if (ses->auth_only_len) {
1312 qm_sg_entry_set64(sg,
1313 dpaa_mem_vtop(sym->aead.aad.data));
1314 sg->length = ses->auth_only_len;
1315 length += sg->length;
1319 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320 sg->length = sym->aead.data.length;
1321 length += sg->length;
1325 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1326 sg->length = ses->iv.length;
1327 length += sg->length;
1331 if (ses->auth_only_len) {
1332 qm_sg_entry_set64(sg,
1333 dpaa_mem_vtop(sym->aead.aad.data));
1334 sg->length = ses->auth_only_len;
1335 length += sg->length;
1339 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1340 sg->length = sym->aead.data.length;
1341 length += sg->length;
1344 memcpy(ctx->digest, sym->aead.digest.data,
1345 ses->digest_length);
1348 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1349 sg->length = ses->digest_length;
1350 length += sg->length;
1354 /* input compound frame */
1355 cf->sg[1].length = length;
1356 cf->sg[1].extension = 1;
1357 cf->sg[1].final = 1;
1358 cpu_to_hw_sg(&cf->sg[1]);
1362 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1363 qm_sg_entry_set64(sg,
1364 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1365 sg->length = sym->aead.data.length + ses->auth_only_len;
1366 length = sg->length;
1367 if (is_encode(ses)) {
1369 /* set auth output */
1371 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1372 sg->length = ses->digest_length;
1373 length += sg->length;
1378 /* output compound frame */
1379 cf->sg[0].length = length;
1380 cf->sg[0].extension = 1;
1381 cpu_to_hw_sg(&cf->sg[0]);
1386 static inline struct dpaa_sec_job *
1387 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1389 struct rte_crypto_sym_op *sym = op->sym;
1390 struct dpaa_sec_job *cf;
1391 struct dpaa_sec_op_ctx *ctx;
1392 struct qm_sg_entry *sg, *out_sg, *in_sg;
1393 struct rte_mbuf *mbuf;
1395 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1400 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1403 req_segs = mbuf->nb_segs * 2 + 4;
1406 if (req_segs > MAX_SG_ENTRIES) {
1407 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1412 ctx = dpaa_sec_alloc_ctx(ses);
1419 rte_prefetch0(cf->sg);
1422 out_sg = &cf->sg[0];
1423 out_sg->extension = 1;
1425 out_sg->length = sym->auth.data.length + ses->digest_length;
1427 out_sg->length = sym->auth.data.length;
1429 /* output sg entries */
1431 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1432 cpu_to_hw_sg(out_sg);
1435 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1436 sg->length = mbuf->data_len - sym->auth.data.offset;
1437 sg->offset = sym->auth.data.offset;
1439 /* Successive segs */
1444 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1445 sg->length = mbuf->data_len;
1448 sg->length -= ses->digest_length;
1450 if (is_encode(ses)) {
1452 /* set auth output */
1454 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1455 sg->length = ses->digest_length;
1463 in_sg->extension = 1;
1466 in_sg->length = ses->iv.length + sym->auth.data.length;
1468 in_sg->length = ses->iv.length + sym->auth.data.length
1469 + ses->digest_length;
1471 /* input sg entries */
1473 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1474 cpu_to_hw_sg(in_sg);
1477 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1478 sg->length = ses->iv.length;
1483 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1484 sg->length = mbuf->data_len - sym->auth.data.offset;
1485 sg->offset = sym->auth.data.offset;
1487 /* Successive segs */
1492 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1493 sg->length = mbuf->data_len;
1497 sg->length -= ses->digest_length;
1498 if (is_decode(ses)) {
1501 memcpy(ctx->digest, sym->auth.digest.data,
1502 ses->digest_length);
1503 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1504 sg->length = ses->digest_length;
1512 static inline struct dpaa_sec_job *
1513 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1515 struct rte_crypto_sym_op *sym = op->sym;
1516 struct dpaa_sec_job *cf;
1517 struct dpaa_sec_op_ctx *ctx;
1518 struct qm_sg_entry *sg;
1519 rte_iova_t src_start_addr, dst_start_addr;
1520 uint32_t length = 0;
1521 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1524 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1526 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1528 dst_start_addr = src_start_addr;
1530 ctx = dpaa_sec_alloc_ctx(ses);
1538 rte_prefetch0(cf->sg);
1540 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1541 if (is_encode(ses)) {
1542 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1543 sg->length = ses->iv.length;
1544 length += sg->length;
1548 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1549 sg->length = sym->auth.data.length;
1550 length += sg->length;
1554 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1555 sg->length = ses->iv.length;
1556 length += sg->length;
1561 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1562 sg->length = sym->auth.data.length;
1563 length += sg->length;
1566 memcpy(ctx->digest, sym->auth.digest.data,
1567 ses->digest_length);
1570 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1571 sg->length = ses->digest_length;
1572 length += sg->length;
1576 /* input compound frame */
1577 cf->sg[1].length = length;
1578 cf->sg[1].extension = 1;
1579 cf->sg[1].final = 1;
1580 cpu_to_hw_sg(&cf->sg[1]);
1584 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1585 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1586 sg->length = sym->cipher.data.length;
1587 length = sg->length;
1588 if (is_encode(ses)) {
1590 /* set auth output */
1592 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1593 sg->length = ses->digest_length;
1594 length += sg->length;
1599 /* output compound frame */
1600 cf->sg[0].length = length;
1601 cf->sg[0].extension = 1;
1602 cpu_to_hw_sg(&cf->sg[0]);
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1610 struct rte_crypto_sym_op *sym = op->sym;
1611 struct dpaa_sec_job *cf;
1612 struct dpaa_sec_op_ctx *ctx;
1613 struct qm_sg_entry *sg;
1614 phys_addr_t src_start_addr, dst_start_addr;
1616 ctx = dpaa_sec_alloc_ctx(ses);
1622 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1625 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1627 dst_start_addr = src_start_addr;
1631 qm_sg_entry_set64(sg, src_start_addr);
1632 sg->length = sym->m_src->pkt_len;
1636 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1639 qm_sg_entry_set64(sg, dst_start_addr);
1640 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1647 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1650 /* Function to transmit the frames to given device and queuepair */
1652 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1653 uint16_t num_tx = 0;
1654 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1655 uint32_t frames_to_send;
1656 struct rte_crypto_op *op;
1657 struct dpaa_sec_job *cf;
1658 dpaa_sec_session *ses;
1659 uint32_t auth_only_len;
1660 struct qman_fq *inq[DPAA_SEC_BURST];
1663 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1664 DPAA_SEC_BURST : nb_ops;
1665 for (loop = 0; loop < frames_to_send; loop++) {
1667 switch (op->sess_type) {
1668 case RTE_CRYPTO_OP_WITH_SESSION:
1669 ses = (dpaa_sec_session *)
1670 get_sym_session_private_data(
1672 cryptodev_driver_id);
1674 case RTE_CRYPTO_OP_SECURITY_SESSION:
1675 ses = (dpaa_sec_session *)
1676 get_sec_session_private_data(
1677 op->sym->sec_session);
1681 "sessionless crypto op not supported");
1682 frames_to_send = loop;
1686 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1687 if (dpaa_sec_attach_sess_q(qp, ses)) {
1688 frames_to_send = loop;
1692 } else if (unlikely(ses->qp[rte_lcore_id() %
1693 MAX_DPAA_CORES] != qp)) {
1694 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1696 ses->qp[rte_lcore_id() %
1697 MAX_DPAA_CORES], qp);
1698 frames_to_send = loop;
1703 auth_only_len = op->sym->auth.data.length -
1704 op->sym->cipher.data.length;
1705 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1706 if (is_proto_ipsec(ses)) {
1707 cf = build_proto(op, ses);
1708 } else if (is_proto_pdcp(ses)) {
1709 cf = build_proto(op, ses);
1710 } else if (is_auth_only(ses)) {
1711 cf = build_auth_only(op, ses);
1712 } else if (is_cipher_only(ses)) {
1713 cf = build_cipher_only(op, ses);
1714 } else if (is_aead(ses)) {
1715 cf = build_cipher_auth_gcm(op, ses);
1716 auth_only_len = ses->auth_only_len;
1717 } else if (is_auth_cipher(ses)) {
1718 cf = build_cipher_auth(op, ses);
1720 DPAA_SEC_DP_ERR("not supported ops");
1721 frames_to_send = loop;
1726 if (is_auth_only(ses)) {
1727 cf = build_auth_only_sg(op, ses);
1728 } else if (is_cipher_only(ses)) {
1729 cf = build_cipher_only_sg(op, ses);
1730 } else if (is_aead(ses)) {
1731 cf = build_cipher_auth_gcm_sg(op, ses);
1732 auth_only_len = ses->auth_only_len;
1733 } else if (is_auth_cipher(ses)) {
1734 cf = build_cipher_auth_sg(op, ses);
1736 DPAA_SEC_DP_ERR("not supported ops");
1737 frames_to_send = loop;
1742 if (unlikely(!cf)) {
1743 frames_to_send = loop;
1749 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1750 fd->opaque_addr = 0;
1752 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1753 fd->_format1 = qm_fd_compound;
1754 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1755 /* Auth_only_len is set as 0 in descriptor and it is
1756 * overwritten here in the fd.cmd which will update
1760 fd->cmd = 0x80000000 | auth_only_len;
1765 while (loop < frames_to_send) {
1766 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1767 frames_to_send - loop);
1769 nb_ops -= frames_to_send;
1770 num_tx += frames_to_send;
1773 dpaa_qp->tx_pkts += num_tx;
1774 dpaa_qp->tx_errs += nb_ops - num_tx;
1780 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1784 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1786 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1788 dpaa_qp->rx_pkts += num_rx;
1789 dpaa_qp->rx_errs += nb_ops - num_rx;
1791 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1796 /** Release queue pair */
1798 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1801 struct dpaa_sec_dev_private *internals;
1802 struct dpaa_sec_qp *qp = NULL;
1804 PMD_INIT_FUNC_TRACE();
1806 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1808 internals = dev->data->dev_private;
1809 if (qp_id >= internals->max_nb_queue_pairs) {
1810 DPAA_SEC_ERR("Max supported qpid %d",
1811 internals->max_nb_queue_pairs);
1815 qp = &internals->qps[qp_id];
1816 qp->internals = NULL;
1817 dev->data->queue_pairs[qp_id] = NULL;
1822 /** Setup a queue pair */
1824 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1825 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1826 __rte_unused int socket_id)
1828 struct dpaa_sec_dev_private *internals;
1829 struct dpaa_sec_qp *qp = NULL;
1831 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1833 internals = dev->data->dev_private;
1834 if (qp_id >= internals->max_nb_queue_pairs) {
1835 DPAA_SEC_ERR("Max supported qpid %d",
1836 internals->max_nb_queue_pairs);
1840 qp = &internals->qps[qp_id];
1841 qp->internals = internals;
1842 dev->data->queue_pairs[qp_id] = qp;
1847 /** Return the number of allocated queue pairs */
1849 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1851 PMD_INIT_FUNC_TRACE();
1853 return dev->data->nb_queue_pairs;
1856 /** Returns the size of session structure */
1858 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1860 PMD_INIT_FUNC_TRACE();
1862 return sizeof(dpaa_sec_session);
1866 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1867 struct rte_crypto_sym_xform *xform,
1868 dpaa_sec_session *session)
1870 session->cipher_alg = xform->cipher.algo;
1871 session->iv.length = xform->cipher.iv.length;
1872 session->iv.offset = xform->cipher.iv.offset;
1873 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1874 RTE_CACHE_LINE_SIZE);
1875 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1876 DPAA_SEC_ERR("No Memory for cipher key");
1879 session->cipher_key.length = xform->cipher.key.length;
1881 memcpy(session->cipher_key.data, xform->cipher.key.data,
1882 xform->cipher.key.length);
1883 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1890 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1891 struct rte_crypto_sym_xform *xform,
1892 dpaa_sec_session *session)
1894 session->auth_alg = xform->auth.algo;
1895 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1896 RTE_CACHE_LINE_SIZE);
1897 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1898 DPAA_SEC_ERR("No Memory for auth key");
1901 session->auth_key.length = xform->auth.key.length;
1902 session->digest_length = xform->auth.digest_length;
1904 memcpy(session->auth_key.data, xform->auth.key.data,
1905 xform->auth.key.length);
1906 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1913 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1914 struct rte_crypto_sym_xform *xform,
1915 dpaa_sec_session *session)
1917 session->aead_alg = xform->aead.algo;
1918 session->iv.length = xform->aead.iv.length;
1919 session->iv.offset = xform->aead.iv.offset;
1920 session->auth_only_len = xform->aead.aad_length;
1921 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1922 RTE_CACHE_LINE_SIZE);
1923 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1924 DPAA_SEC_ERR("No Memory for aead key\n");
1927 session->aead_key.length = xform->aead.key.length;
1928 session->digest_length = xform->aead.digest_length;
1930 memcpy(session->aead_key.data, xform->aead.key.data,
1931 xform->aead.key.length);
1932 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1938 static struct qman_fq *
1939 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1943 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
1944 if (qi->inq_attach[i] == 0) {
1945 qi->inq_attach[i] = 1;
1949 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
1955 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1959 for (i = 0; i < qi->max_nb_sessions; i++) {
1960 if (&qi->inq[i] == fq) {
1961 qman_retire_fq(fq, NULL);
1963 qi->inq_attach[i] = 0;
1971 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1975 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
1976 ret = dpaa_sec_prep_cdb(sess);
1978 DPAA_SEC_ERR("Unable to prepare sec cdb");
1981 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1982 ret = rte_dpaa_portal_init((void *)0);
1984 DPAA_SEC_ERR("Failure in affining portal");
1988 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
1989 dpaa_mem_vtop(&sess->cdb),
1990 qman_fq_fqid(&qp->outq));
1992 DPAA_SEC_ERR("Unable to init sec queue");
1998 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1999 struct rte_crypto_sym_xform *xform, void *sess)
2001 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2002 dpaa_sec_session *session = sess;
2005 PMD_INIT_FUNC_TRACE();
2007 if (unlikely(sess == NULL)) {
2008 DPAA_SEC_ERR("invalid session struct");
2011 memset(session, 0, sizeof(dpaa_sec_session));
2013 /* Default IV length = 0 */
2014 session->iv.length = 0;
2017 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2018 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2019 dpaa_sec_cipher_init(dev, xform, session);
2021 /* Authentication Only */
2022 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2023 xform->next == NULL) {
2024 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2025 dpaa_sec_auth_init(dev, xform, session);
2027 /* Cipher then Authenticate */
2028 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2029 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2030 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2031 dpaa_sec_cipher_init(dev, xform, session);
2032 dpaa_sec_auth_init(dev, xform->next, session);
2034 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2038 /* Authenticate then Cipher */
2039 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2040 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2041 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2042 dpaa_sec_auth_init(dev, xform, session);
2043 dpaa_sec_cipher_init(dev, xform->next, session);
2045 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2049 /* AEAD operation for AES-GCM kind of Algorithms */
2050 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2051 xform->next == NULL) {
2052 dpaa_sec_aead_init(dev, xform, session);
2055 DPAA_SEC_ERR("Invalid crypto type");
2058 session->ctx_pool = internals->ctx_pool;
2059 rte_spinlock_lock(&internals->lock);
2060 for (i = 0; i < MAX_DPAA_CORES; i++) {
2061 session->inq[i] = dpaa_sec_attach_rxq(internals);
2062 if (session->inq[i] == NULL) {
2063 DPAA_SEC_ERR("unable to attach sec queue");
2064 rte_spinlock_unlock(&internals->lock);
2068 rte_spinlock_unlock(&internals->lock);
2073 rte_free(session->cipher_key.data);
2074 rte_free(session->auth_key.data);
2075 memset(session, 0, sizeof(dpaa_sec_session));
2081 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2082 struct rte_crypto_sym_xform *xform,
2083 struct rte_cryptodev_sym_session *sess,
2084 struct rte_mempool *mempool)
2086 void *sess_private_data;
2089 PMD_INIT_FUNC_TRACE();
2091 if (rte_mempool_get(mempool, &sess_private_data)) {
2092 DPAA_SEC_ERR("Couldn't get object from session mempool");
2096 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2098 DPAA_SEC_ERR("failed to configure session parameters");
2100 /* Return session to mempool */
2101 rte_mempool_put(mempool, sess_private_data);
2105 set_sym_session_private_data(sess, dev->driver_id,
2112 /** Clear the memory of session so it doesn't leave key material behind */
2114 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2115 struct rte_cryptodev_sym_session *sess)
2117 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2118 uint8_t index = dev->driver_id, i;
2119 void *sess_priv = get_sym_session_private_data(sess, index);
2121 PMD_INIT_FUNC_TRACE();
2123 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2126 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2128 for (i = 0; i < MAX_DPAA_CORES; i++) {
2130 dpaa_sec_detach_rxq(qi, s->inq[i]);
2134 rte_free(s->cipher_key.data);
2135 rte_free(s->auth_key.data);
2136 memset(s, 0, sizeof(dpaa_sec_session));
2137 set_sym_session_private_data(sess, index, NULL);
2138 rte_mempool_put(sess_mp, sess_priv);
2143 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2144 struct rte_security_session_conf *conf,
2147 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2148 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2149 struct rte_crypto_auth_xform *auth_xform = NULL;
2150 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2151 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2154 PMD_INIT_FUNC_TRACE();
2156 memset(session, 0, sizeof(dpaa_sec_session));
2157 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2158 cipher_xform = &conf->crypto_xform->cipher;
2159 if (conf->crypto_xform->next)
2160 auth_xform = &conf->crypto_xform->next->auth;
2162 auth_xform = &conf->crypto_xform->auth;
2163 if (conf->crypto_xform->next)
2164 cipher_xform = &conf->crypto_xform->next->cipher;
2166 session->proto_alg = conf->protocol;
2168 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2169 session->cipher_key.data = rte_zmalloc(NULL,
2170 cipher_xform->key.length,
2171 RTE_CACHE_LINE_SIZE);
2172 if (session->cipher_key.data == NULL &&
2173 cipher_xform->key.length > 0) {
2174 DPAA_SEC_ERR("No Memory for cipher key");
2177 memcpy(session->cipher_key.data, cipher_xform->key.data,
2178 cipher_xform->key.length);
2179 session->cipher_key.length = cipher_xform->key.length;
2181 switch (cipher_xform->algo) {
2182 case RTE_CRYPTO_CIPHER_AES_CBC:
2183 case RTE_CRYPTO_CIPHER_3DES_CBC:
2184 case RTE_CRYPTO_CIPHER_AES_CTR:
2187 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2188 cipher_xform->algo);
2191 session->cipher_alg = cipher_xform->algo;
2193 session->cipher_key.data = NULL;
2194 session->cipher_key.length = 0;
2195 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2198 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2199 session->auth_key.data = rte_zmalloc(NULL,
2200 auth_xform->key.length,
2201 RTE_CACHE_LINE_SIZE);
2202 if (session->auth_key.data == NULL &&
2203 auth_xform->key.length > 0) {
2204 DPAA_SEC_ERR("No Memory for auth key");
2205 rte_free(session->cipher_key.data);
2208 memcpy(session->auth_key.data, auth_xform->key.data,
2209 auth_xform->key.length);
2210 session->auth_key.length = auth_xform->key.length;
2212 switch (auth_xform->algo) {
2213 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2214 case RTE_CRYPTO_AUTH_MD5_HMAC:
2215 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2216 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2217 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2218 case RTE_CRYPTO_AUTH_AES_CMAC:
2221 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2225 session->auth_alg = auth_xform->algo;
2227 session->auth_key.data = NULL;
2228 session->auth_key.length = 0;
2229 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2232 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2233 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2234 sizeof(session->ip4_hdr));
2235 session->ip4_hdr.ip_v = IPVERSION;
2236 session->ip4_hdr.ip_hl = 5;
2237 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2238 sizeof(session->ip4_hdr));
2239 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2240 session->ip4_hdr.ip_id = 0;
2241 session->ip4_hdr.ip_off = 0;
2242 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2243 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2244 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2246 session->ip4_hdr.ip_sum = 0;
2247 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2248 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2249 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2250 (void *)&session->ip4_hdr,
2253 session->encap_pdb.options =
2254 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2255 PDBOPTS_ESP_OIHI_PDB_INL |
2257 PDBHMO_ESP_ENCAP_DTTL |
2259 session->encap_pdb.spi = ipsec_xform->spi;
2260 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2262 session->dir = DIR_ENC;
2263 } else if (ipsec_xform->direction ==
2264 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2265 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2266 session->decap_pdb.options = sizeof(struct ip) << 16;
2267 session->dir = DIR_DEC;
2270 session->ctx_pool = internals->ctx_pool;
2271 rte_spinlock_lock(&internals->lock);
2272 for (i = 0; i < MAX_DPAA_CORES; i++) {
2273 session->inq[i] = dpaa_sec_attach_rxq(internals);
2274 if (session->inq[i] == NULL) {
2275 DPAA_SEC_ERR("unable to attach sec queue");
2276 rte_spinlock_unlock(&internals->lock);
2280 rte_spinlock_unlock(&internals->lock);
2284 rte_free(session->auth_key.data);
2285 rte_free(session->cipher_key.data);
2286 memset(session, 0, sizeof(dpaa_sec_session));
2291 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2292 struct rte_security_session_conf *conf,
2295 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2296 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2297 struct rte_crypto_auth_xform *auth_xform = NULL;
2298 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2299 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2300 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2303 PMD_INIT_FUNC_TRACE();
2305 memset(session, 0, sizeof(dpaa_sec_session));
2307 /* find xfrm types */
2308 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2309 cipher_xform = &xform->cipher;
2310 if (xform->next != NULL)
2311 auth_xform = &xform->next->auth;
2312 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2313 auth_xform = &xform->auth;
2314 if (xform->next != NULL)
2315 cipher_xform = &xform->next->cipher;
2317 DPAA_SEC_ERR("Invalid crypto type");
2321 session->proto_alg = conf->protocol;
2323 session->cipher_key.data = rte_zmalloc(NULL,
2324 cipher_xform->key.length,
2325 RTE_CACHE_LINE_SIZE);
2326 if (session->cipher_key.data == NULL &&
2327 cipher_xform->key.length > 0) {
2328 DPAA_SEC_ERR("No Memory for cipher key");
2331 session->cipher_key.length = cipher_xform->key.length;
2332 memcpy(session->cipher_key.data, cipher_xform->key.data,
2333 cipher_xform->key.length);
2334 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2336 session->cipher_alg = cipher_xform->algo;
2338 session->cipher_key.data = NULL;
2339 session->cipher_key.length = 0;
2340 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2341 session->dir = DIR_ENC;
2344 /* Auth is only applicable for control mode operation. */
2345 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2346 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2348 "PDCP Seq Num size should be 5 bits for cmode");
2352 session->auth_key.data = rte_zmalloc(NULL,
2353 auth_xform->key.length,
2354 RTE_CACHE_LINE_SIZE);
2355 if (session->auth_key.data == NULL &&
2356 auth_xform->key.length > 0) {
2357 DPAA_SEC_ERR("No Memory for auth key");
2358 rte_free(session->cipher_key.data);
2361 session->auth_key.length = auth_xform->key.length;
2362 memcpy(session->auth_key.data, auth_xform->key.data,
2363 auth_xform->key.length);
2364 session->auth_alg = auth_xform->algo;
2366 session->auth_key.data = NULL;
2367 session->auth_key.length = 0;
2368 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2371 session->pdcp.domain = pdcp_xform->domain;
2372 session->pdcp.bearer = pdcp_xform->bearer;
2373 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2374 session->pdcp.sn_size = pdcp_xform->sn_size;
2375 #ifdef ENABLE_HFN_OVERRIDE
2376 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2378 session->pdcp.hfn = pdcp_xform->hfn;
2379 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2381 session->ctx_pool = dev_priv->ctx_pool;
2382 rte_spinlock_lock(&dev_priv->lock);
2383 for (i = 0; i < MAX_DPAA_CORES; i++) {
2384 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2385 if (session->inq[i] == NULL) {
2386 DPAA_SEC_ERR("unable to attach sec queue");
2387 rte_spinlock_unlock(&dev_priv->lock);
2391 rte_spinlock_unlock(&dev_priv->lock);
2394 rte_free(session->auth_key.data);
2395 rte_free(session->cipher_key.data);
2396 memset(session, 0, sizeof(dpaa_sec_session));
2401 dpaa_sec_security_session_create(void *dev,
2402 struct rte_security_session_conf *conf,
2403 struct rte_security_session *sess,
2404 struct rte_mempool *mempool)
2406 void *sess_private_data;
2407 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2410 if (rte_mempool_get(mempool, &sess_private_data)) {
2411 DPAA_SEC_ERR("Couldn't get object from session mempool");
2415 switch (conf->protocol) {
2416 case RTE_SECURITY_PROTOCOL_IPSEC:
2417 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2420 case RTE_SECURITY_PROTOCOL_PDCP:
2421 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2424 case RTE_SECURITY_PROTOCOL_MACSEC:
2430 DPAA_SEC_ERR("failed to configure session parameters");
2431 /* Return session to mempool */
2432 rte_mempool_put(mempool, sess_private_data);
2436 set_sec_session_private_data(sess, sess_private_data);
2441 /** Clear the memory of session so it doesn't leave key material behind */
2443 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2444 struct rte_security_session *sess)
2446 PMD_INIT_FUNC_TRACE();
2447 void *sess_priv = get_sec_session_private_data(sess);
2449 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2452 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2454 rte_free(s->cipher_key.data);
2455 rte_free(s->auth_key.data);
2456 memset(sess, 0, sizeof(dpaa_sec_session));
2457 set_sec_session_private_data(sess, NULL);
2458 rte_mempool_put(sess_mp, sess_priv);
2465 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2466 struct rte_cryptodev_config *config __rte_unused)
2470 struct dpaa_sec_dev_private *internals;
2472 PMD_INIT_FUNC_TRACE();
2474 internals = dev->data->dev_private;
2475 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2476 if (!internals->ctx_pool) {
2477 internals->ctx_pool = rte_mempool_create((const char *)str,
2480 CTX_POOL_CACHE_SIZE, 0,
2481 NULL, NULL, NULL, NULL,
2483 if (!internals->ctx_pool) {
2484 DPAA_SEC_ERR("%s create failed\n", str);
2488 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2495 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2497 PMD_INIT_FUNC_TRACE();
2502 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2504 PMD_INIT_FUNC_TRACE();
2508 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2510 struct dpaa_sec_dev_private *internals;
2512 PMD_INIT_FUNC_TRACE();
2517 internals = dev->data->dev_private;
2518 rte_mempool_free(internals->ctx_pool);
2519 internals->ctx_pool = NULL;
2525 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2526 struct rte_cryptodev_info *info)
2528 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2530 PMD_INIT_FUNC_TRACE();
2532 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2533 info->feature_flags = dev->feature_flags;
2534 info->capabilities = dpaa_sec_capabilities;
2535 info->sym.max_nb_sessions = internals->max_nb_sessions;
2536 info->driver_id = cryptodev_driver_id;
2540 static struct rte_cryptodev_ops crypto_ops = {
2541 .dev_configure = dpaa_sec_dev_configure,
2542 .dev_start = dpaa_sec_dev_start,
2543 .dev_stop = dpaa_sec_dev_stop,
2544 .dev_close = dpaa_sec_dev_close,
2545 .dev_infos_get = dpaa_sec_dev_infos_get,
2546 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2547 .queue_pair_release = dpaa_sec_queue_pair_release,
2548 .queue_pair_count = dpaa_sec_queue_pair_count,
2549 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2550 .sym_session_configure = dpaa_sec_sym_session_configure,
2551 .sym_session_clear = dpaa_sec_sym_session_clear
2554 static const struct rte_security_capability *
2555 dpaa_sec_capabilities_get(void *device __rte_unused)
2557 return dpaa_sec_security_cap;
2560 static const struct rte_security_ops dpaa_sec_security_ops = {
2561 .session_create = dpaa_sec_security_session_create,
2562 .session_update = NULL,
2563 .session_stats_get = NULL,
2564 .session_destroy = dpaa_sec_security_session_destroy,
2565 .set_pkt_metadata = NULL,
2566 .capabilities_get = dpaa_sec_capabilities_get
2570 dpaa_sec_uninit(struct rte_cryptodev *dev)
2572 struct dpaa_sec_dev_private *internals;
2577 internals = dev->data->dev_private;
2578 rte_free(dev->security_ctx);
2580 /* In case close has been called, internals->ctx_pool would be NULL */
2581 rte_mempool_free(internals->ctx_pool);
2582 rte_free(internals);
2584 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2585 dev->data->name, rte_socket_id());
2591 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2593 struct dpaa_sec_dev_private *internals;
2594 struct rte_security_ctx *security_instance;
2595 struct dpaa_sec_qp *qp;
2599 PMD_INIT_FUNC_TRACE();
2601 cryptodev->driver_id = cryptodev_driver_id;
2602 cryptodev->dev_ops = &crypto_ops;
2604 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2605 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2606 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2607 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2608 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2609 RTE_CRYPTODEV_FF_SECURITY |
2610 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2611 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2612 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2613 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2614 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2616 internals = cryptodev->data->dev_private;
2617 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2618 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2621 * For secondary processes, we don't initialise any further as primary
2622 * has already done this work. Only check we don't need a different
2625 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2626 DPAA_SEC_WARN("Device already init by primary process");
2630 /* Initialize security_ctx only for primary process*/
2631 security_instance = rte_malloc("rte_security_instances_ops",
2632 sizeof(struct rte_security_ctx), 0);
2633 if (security_instance == NULL)
2635 security_instance->device = (void *)cryptodev;
2636 security_instance->ops = &dpaa_sec_security_ops;
2637 security_instance->sess_cnt = 0;
2638 cryptodev->security_ctx = security_instance;
2640 rte_spinlock_init(&internals->lock);
2641 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2642 /* init qman fq for queue pair */
2643 qp = &internals->qps[i];
2644 ret = dpaa_sec_init_tx(&qp->outq);
2646 DPAA_SEC_ERR("config tx of queue pair %d", i);
2651 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2652 QMAN_FQ_FLAG_TO_DCPORTAL;
2653 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2654 /* create rx qman fq for sessions*/
2655 ret = qman_create_fq(0, flags, &internals->inq[i]);
2656 if (unlikely(ret != 0)) {
2657 DPAA_SEC_ERR("sec qman_create_fq failed");
2662 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2666 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2668 dpaa_sec_uninit(cryptodev);
2673 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2674 struct rte_dpaa_device *dpaa_dev)
2676 struct rte_cryptodev *cryptodev;
2677 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2681 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2682 dpaa_dev->id.dev_id);
2684 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2685 if (cryptodev == NULL)
2688 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2689 cryptodev->data->dev_private = rte_zmalloc_socket(
2690 "cryptodev private structure",
2691 sizeof(struct dpaa_sec_dev_private),
2692 RTE_CACHE_LINE_SIZE,
2695 if (cryptodev->data->dev_private == NULL)
2696 rte_panic("Cannot allocate memzone for private "
2700 dpaa_dev->crypto_dev = cryptodev;
2701 cryptodev->device = &dpaa_dev->device;
2703 /* init user callbacks */
2704 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2706 /* if sec device version is not configured */
2707 if (!rta_get_sec_era()) {
2708 const struct device_node *caam_node;
2710 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2711 const uint32_t *prop = of_get_property(caam_node,
2716 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2722 /* Invoke PMD device initialization function */
2723 retval = dpaa_sec_dev_init(cryptodev);
2727 /* In case of error, cleanup is done */
2728 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2729 rte_free(cryptodev->data->dev_private);
2731 rte_cryptodev_pmd_release_device(cryptodev);
2737 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2739 struct rte_cryptodev *cryptodev;
2742 cryptodev = dpaa_dev->crypto_dev;
2743 if (cryptodev == NULL)
2746 ret = dpaa_sec_uninit(cryptodev);
2750 return rte_cryptodev_pmd_destroy(cryptodev);
2753 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2754 .drv_type = FSL_DPAA_CRYPTO,
2756 .name = "DPAA SEC PMD"
2758 .probe = cryptodev_dpaa_sec_probe,
2759 .remove = cryptodev_dpaa_sec_remove,
2762 static struct cryptodev_driver dpaa_sec_crypto_drv;
2764 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2765 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2766 cryptodev_driver_id);
2768 RTE_INIT(dpaa_sec_init_log)
2770 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2771 if (dpaa_logtype_sec >= 0)
2772 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);