1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_log.h>
41 #include <dpaax_iova_table.h>
43 enum rta_sec_era rta_sec_era;
47 static uint8_t cryptodev_driver_id;
49 static __thread struct rte_crypto_op **dpaa_sec_ops;
50 static __thread int dpaa_sec_op_nb;
53 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
56 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
58 if (!ctx->fd_status) {
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
61 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
65 /* report op status to sym->op and then free the ctx memory */
66 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
88 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
89 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
91 ctx->ctx_pool = ses->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx
93 - rte_mempool_virt2iova(ctx);
98 static inline rte_iova_t
99 dpaa_mem_vtop(void *vaddr)
101 const struct rte_memseg *ms;
103 ms = rte_mem_virt2memseg(vaddr, NULL);
105 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
106 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
112 dpaa_mem_ptov(rte_iova_t paddr)
116 va = (void *)dpaax_iova_table_get_va(paddr);
120 return rte_mem_iova2virt(paddr);
124 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
126 const struct qm_mr_entry *msg)
128 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
129 fq->fqid, msg->ern.rc, msg->ern.seqnum);
132 /* initialize the queue with dest chan as caam chan so that
133 * all the packets in this queue could be dispatched into caam
136 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
139 struct qm_mcc_initfq fq_opts;
143 /* Clear FQ options */
144 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
146 flags = QMAN_INITFQ_FLAG_SCHED;
147 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
148 QM_INITFQ_WE_CONTEXTB;
150 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
151 fq_opts.fqd.context_b = fqid_out;
152 fq_opts.fqd.dest.channel = qm_channel_caam;
153 fq_opts.fqd.dest.wq = 0;
155 fq_in->cb.ern = ern_sec_fq_handler;
157 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
159 ret = qman_init_fq(fq_in, flags, &fq_opts);
160 if (unlikely(ret != 0))
161 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
166 /* something is put into in_fq and caam put the crypto result into out_fq */
167 static enum qman_cb_dqrr_result
168 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
169 struct qman_fq *fq __always_unused,
170 const struct qm_dqrr_entry *dqrr)
172 const struct qm_fd *fd;
173 struct dpaa_sec_job *job;
174 struct dpaa_sec_op_ctx *ctx;
176 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
177 return qman_cb_dqrr_defer;
179 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
180 return qman_cb_dqrr_consume;
183 /* sg is embedded in an op ctx,
184 * sg[0] is for output
187 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
189 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
190 ctx->fd_status = fd->status;
191 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
192 struct qm_sg_entry *sg_out;
194 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
195 ctx->op->sym->m_src : ctx->op->sym->m_dst;
197 sg_out = &job->sg[0];
198 hw_sg_to_cpu(sg_out);
199 len = sg_out->length;
201 while (mbuf->next != NULL) {
202 len -= mbuf->data_len;
205 mbuf->data_len = len;
207 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
208 dpaa_sec_op_ending(ctx);
210 return qman_cb_dqrr_consume;
213 /* caam result is put into this queue */
215 dpaa_sec_init_tx(struct qman_fq *fq)
218 struct qm_mcc_initfq opts;
221 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
222 QMAN_FQ_FLAG_DYNAMIC_FQID;
224 ret = qman_create_fq(0, flags, fq);
226 DPAA_SEC_ERR("qman_create_fq failed");
230 memset(&opts, 0, sizeof(opts));
231 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
232 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
234 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
236 fq->cb.dqrr = dqrr_out_fq_cb_rx;
237 fq->cb.ern = ern_sec_fq_handler;
239 ret = qman_init_fq(fq, 0, &opts);
241 DPAA_SEC_ERR("unable to init caam source fq!");
248 static inline int is_cipher_only(dpaa_sec_session *ses)
250 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
251 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
254 static inline int is_auth_only(dpaa_sec_session *ses)
256 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
257 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
260 static inline int is_aead(dpaa_sec_session *ses)
262 return ((ses->cipher_alg == 0) &&
263 (ses->auth_alg == 0) &&
264 (ses->aead_alg != 0));
267 static inline int is_auth_cipher(dpaa_sec_session *ses)
269 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
270 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
271 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
272 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
275 static inline int is_proto_ipsec(dpaa_sec_session *ses)
277 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
280 static inline int is_proto_pdcp(dpaa_sec_session *ses)
282 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
285 static inline int is_encode(dpaa_sec_session *ses)
287 return ses->dir == DIR_ENC;
290 static inline int is_decode(dpaa_sec_session *ses)
292 return ses->dir == DIR_DEC;
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
298 switch (ses->auth_alg) {
299 case RTE_CRYPTO_AUTH_NULL:
301 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
302 OP_PCL_IPSEC_HMAC_NULL : 0;
303 ses->digest_length = 0;
305 case RTE_CRYPTO_AUTH_MD5_HMAC:
307 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
308 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
309 alginfo_a->algmode = OP_ALG_AAI_HMAC;
311 case RTE_CRYPTO_AUTH_SHA1_HMAC:
313 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
314 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
315 alginfo_a->algmode = OP_ALG_AAI_HMAC;
317 case RTE_CRYPTO_AUTH_SHA224_HMAC:
319 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
320 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
321 alginfo_a->algmode = OP_ALG_AAI_HMAC;
323 case RTE_CRYPTO_AUTH_SHA256_HMAC:
325 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
326 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
327 alginfo_a->algmode = OP_ALG_AAI_HMAC;
329 case RTE_CRYPTO_AUTH_SHA384_HMAC:
331 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
332 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
333 alginfo_a->algmode = OP_ALG_AAI_HMAC;
335 case RTE_CRYPTO_AUTH_SHA512_HMAC:
337 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
338 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
339 alginfo_a->algmode = OP_ALG_AAI_HMAC;
342 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
347 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
349 switch (ses->cipher_alg) {
350 case RTE_CRYPTO_CIPHER_NULL:
352 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
353 OP_PCL_IPSEC_NULL : 0;
355 case RTE_CRYPTO_CIPHER_AES_CBC:
357 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
359 alginfo_c->algmode = OP_ALG_AAI_CBC;
361 case RTE_CRYPTO_CIPHER_3DES_CBC:
363 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
365 alginfo_c->algmode = OP_ALG_AAI_CBC;
367 case RTE_CRYPTO_CIPHER_AES_CTR:
369 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
370 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
371 alginfo_c->algmode = OP_ALG_AAI_CTR;
374 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
379 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
381 switch (ses->aead_alg) {
382 case RTE_CRYPTO_AEAD_AES_GCM:
383 alginfo->algtype = OP_ALG_ALGSEL_AES;
384 alginfo->algmode = OP_ALG_AAI_GCM;
387 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
392 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
394 struct alginfo authdata = {0}, cipherdata = {0};
395 struct sec_cdb *cdb = &ses->cdb;
396 struct alginfo *p_authdata = NULL;
397 int32_t shared_desc_len = 0;
399 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
405 switch (ses->cipher_alg) {
406 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
407 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
409 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
410 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
412 case RTE_CRYPTO_CIPHER_AES_CTR:
413 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
415 case RTE_CRYPTO_CIPHER_NULL:
416 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
419 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
424 cipherdata.key = (size_t)ses->cipher_key.data;
425 cipherdata.keylen = ses->cipher_key.length;
426 cipherdata.key_enc_flags = 0;
427 cipherdata.key_type = RTA_DATA_IMM;
429 cdb->sh_desc[0] = cipherdata.keylen;
434 switch (ses->auth_alg) {
435 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
436 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
438 case RTE_CRYPTO_AUTH_ZUC_EIA3:
439 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
441 case RTE_CRYPTO_AUTH_AES_CMAC:
442 authdata.algtype = PDCP_AUTH_TYPE_AES;
444 case RTE_CRYPTO_AUTH_NULL:
445 authdata.algtype = PDCP_AUTH_TYPE_NULL;
448 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
453 authdata.key = (size_t)ses->auth_key.data;
454 authdata.keylen = ses->auth_key.length;
455 authdata.key_enc_flags = 0;
456 authdata.key_type = RTA_DATA_IMM;
458 p_authdata = &authdata;
460 cdb->sh_desc[1] = authdata.keylen;
463 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
465 (unsigned int *)cdb->sh_desc,
466 &cdb->sh_desc[2], 2);
468 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
472 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
474 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
475 cipherdata.key_type = RTA_DATA_PTR;
477 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
479 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
480 authdata.key_type = RTA_DATA_PTR;
487 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
488 if (ses->dir == DIR_ENC)
489 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
490 cdb->sh_desc, 1, swap,
495 ses->pdcp.hfn_threshold,
496 &cipherdata, &authdata,
498 else if (ses->dir == DIR_DEC)
499 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
500 cdb->sh_desc, 1, swap,
505 ses->pdcp.hfn_threshold,
506 &cipherdata, &authdata,
509 if (ses->dir == DIR_ENC)
510 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
511 cdb->sh_desc, 1, swap,
516 ses->pdcp.hfn_threshold,
517 &cipherdata, p_authdata, 0);
518 else if (ses->dir == DIR_DEC)
519 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
520 cdb->sh_desc, 1, swap,
525 ses->pdcp.hfn_threshold,
526 &cipherdata, p_authdata, 0);
529 return shared_desc_len;
532 /* prepare ipsec proto command block of the session */
534 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
536 struct alginfo cipherdata = {0}, authdata = {0};
537 struct sec_cdb *cdb = &ses->cdb;
538 int32_t shared_desc_len = 0;
540 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
546 caam_cipher_alg(ses, &cipherdata);
547 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
548 DPAA_SEC_ERR("not supported cipher alg");
552 cipherdata.key = (size_t)ses->cipher_key.data;
553 cipherdata.keylen = ses->cipher_key.length;
554 cipherdata.key_enc_flags = 0;
555 cipherdata.key_type = RTA_DATA_IMM;
557 caam_auth_alg(ses, &authdata);
558 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
559 DPAA_SEC_ERR("not supported auth alg");
563 authdata.key = (size_t)ses->auth_key.data;
564 authdata.keylen = ses->auth_key.length;
565 authdata.key_enc_flags = 0;
566 authdata.key_type = RTA_DATA_IMM;
568 cdb->sh_desc[0] = cipherdata.keylen;
569 cdb->sh_desc[1] = authdata.keylen;
570 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
572 (unsigned int *)cdb->sh_desc,
573 &cdb->sh_desc[2], 2);
576 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
579 if (cdb->sh_desc[2] & 1)
580 cipherdata.key_type = RTA_DATA_IMM;
582 cipherdata.key = (size_t)dpaa_mem_vtop(
583 (void *)(size_t)cipherdata.key);
584 cipherdata.key_type = RTA_DATA_PTR;
586 if (cdb->sh_desc[2] & (1<<1))
587 authdata.key_type = RTA_DATA_IMM;
589 authdata.key = (size_t)dpaa_mem_vtop(
590 (void *)(size_t)authdata.key);
591 authdata.key_type = RTA_DATA_PTR;
597 if (ses->dir == DIR_ENC) {
598 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
600 true, swap, SHR_SERIAL,
602 (uint8_t *)&ses->ip4_hdr,
603 &cipherdata, &authdata);
604 } else if (ses->dir == DIR_DEC) {
605 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
607 true, swap, SHR_SERIAL,
609 &cipherdata, &authdata);
611 return shared_desc_len;
614 /* prepare command block of the session */
616 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
618 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
619 int32_t shared_desc_len = 0;
620 struct sec_cdb *cdb = &ses->cdb;
622 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
628 memset(cdb, 0, sizeof(struct sec_cdb));
630 if (is_proto_ipsec(ses)) {
631 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
632 } else if (is_proto_pdcp(ses)) {
633 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
634 } else if (is_cipher_only(ses)) {
635 caam_cipher_alg(ses, &alginfo_c);
636 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
637 DPAA_SEC_ERR("not supported cipher alg");
641 alginfo_c.key = (size_t)ses->cipher_key.data;
642 alginfo_c.keylen = ses->cipher_key.length;
643 alginfo_c.key_enc_flags = 0;
644 alginfo_c.key_type = RTA_DATA_IMM;
646 shared_desc_len = cnstr_shdsc_blkcipher(
648 swap, SHR_NEVER, &alginfo_c,
652 } else if (is_auth_only(ses)) {
653 caam_auth_alg(ses, &alginfo_a);
654 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
655 DPAA_SEC_ERR("not supported auth alg");
659 alginfo_a.key = (size_t)ses->auth_key.data;
660 alginfo_a.keylen = ses->auth_key.length;
661 alginfo_a.key_enc_flags = 0;
662 alginfo_a.key_type = RTA_DATA_IMM;
664 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
665 swap, SHR_NEVER, &alginfo_a,
668 } else if (is_aead(ses)) {
669 caam_aead_alg(ses, &alginfo);
670 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
671 DPAA_SEC_ERR("not supported aead alg");
674 alginfo.key = (size_t)ses->aead_key.data;
675 alginfo.keylen = ses->aead_key.length;
676 alginfo.key_enc_flags = 0;
677 alginfo.key_type = RTA_DATA_IMM;
679 if (ses->dir == DIR_ENC)
680 shared_desc_len = cnstr_shdsc_gcm_encap(
681 cdb->sh_desc, true, swap, SHR_NEVER,
686 shared_desc_len = cnstr_shdsc_gcm_decap(
687 cdb->sh_desc, true, swap, SHR_NEVER,
692 caam_cipher_alg(ses, &alginfo_c);
693 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
694 DPAA_SEC_ERR("not supported cipher alg");
698 alginfo_c.key = (size_t)ses->cipher_key.data;
699 alginfo_c.keylen = ses->cipher_key.length;
700 alginfo_c.key_enc_flags = 0;
701 alginfo_c.key_type = RTA_DATA_IMM;
703 caam_auth_alg(ses, &alginfo_a);
704 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
705 DPAA_SEC_ERR("not supported auth alg");
709 alginfo_a.key = (size_t)ses->auth_key.data;
710 alginfo_a.keylen = ses->auth_key.length;
711 alginfo_a.key_enc_flags = 0;
712 alginfo_a.key_type = RTA_DATA_IMM;
714 cdb->sh_desc[0] = alginfo_c.keylen;
715 cdb->sh_desc[1] = alginfo_a.keylen;
716 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
718 (unsigned int *)cdb->sh_desc,
719 &cdb->sh_desc[2], 2);
722 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
725 if (cdb->sh_desc[2] & 1)
726 alginfo_c.key_type = RTA_DATA_IMM;
728 alginfo_c.key = (size_t)dpaa_mem_vtop(
729 (void *)(size_t)alginfo_c.key);
730 alginfo_c.key_type = RTA_DATA_PTR;
732 if (cdb->sh_desc[2] & (1<<1))
733 alginfo_a.key_type = RTA_DATA_IMM;
735 alginfo_a.key = (size_t)dpaa_mem_vtop(
736 (void *)(size_t)alginfo_a.key);
737 alginfo_a.key_type = RTA_DATA_PTR;
742 /* Auth_only_len is set as 0 here and it will be
743 * overwritten in fd for each packet.
745 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
746 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
748 ses->digest_length, ses->dir);
751 if (shared_desc_len < 0) {
752 DPAA_SEC_ERR("error in preparing command block");
753 return shared_desc_len;
756 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
757 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
758 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
763 /* qp is lockless, should be accessed by only one thread */
765 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
768 unsigned int pkts = 0;
769 int num_rx_bufs, ret;
770 struct qm_dqrr_entry *dq;
771 uint32_t vdqcr_flags = 0;
775 * Until request for four buffers, we provide exact number of buffers.
776 * Otherwise we do not set the QM_VDQCR_EXACT flag.
777 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
778 * requested, so we request two less in this case.
781 vdqcr_flags = QM_VDQCR_EXACT;
782 num_rx_bufs = nb_ops;
784 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
785 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
787 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
792 const struct qm_fd *fd;
793 struct dpaa_sec_job *job;
794 struct dpaa_sec_op_ctx *ctx;
795 struct rte_crypto_op *op;
797 dq = qman_dequeue(fq);
802 /* sg is embedded in an op ctx,
803 * sg[0] is for output
806 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
808 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
809 ctx->fd_status = fd->status;
811 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
812 struct qm_sg_entry *sg_out;
814 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
815 op->sym->m_src : op->sym->m_dst;
817 sg_out = &job->sg[0];
818 hw_sg_to_cpu(sg_out);
819 len = sg_out->length;
821 while (mbuf->next != NULL) {
822 len -= mbuf->data_len;
825 mbuf->data_len = len;
827 if (!ctx->fd_status) {
828 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
830 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
831 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
835 /* report op status to sym->op and then free the ctx memeory */
836 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
838 qman_dqrr_consume(fq, dq);
839 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
844 static inline struct dpaa_sec_job *
845 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
847 struct rte_crypto_sym_op *sym = op->sym;
848 struct rte_mbuf *mbuf = sym->m_src;
849 struct dpaa_sec_job *cf;
850 struct dpaa_sec_op_ctx *ctx;
851 struct qm_sg_entry *sg, *out_sg, *in_sg;
852 phys_addr_t start_addr;
853 uint8_t *old_digest, extra_segs;
860 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
861 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
865 ctx = dpaa_sec_alloc_ctx(ses);
871 old_digest = ctx->digest;
875 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
876 out_sg->length = ses->digest_length;
877 cpu_to_hw_sg(out_sg);
881 /* need to extend the input to a compound frame */
882 in_sg->extension = 1;
884 in_sg->length = sym->auth.data.length;
885 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
889 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
890 sg->length = mbuf->data_len - sym->auth.data.offset;
891 sg->offset = sym->auth.data.offset;
893 /* Successive segs */
898 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
899 sg->length = mbuf->data_len;
903 if (is_decode(ses)) {
904 /* Digest verification case */
907 rte_memcpy(old_digest, sym->auth.digest.data,
909 start_addr = dpaa_mem_vtop(old_digest);
910 qm_sg_entry_set64(sg, start_addr);
911 sg->length = ses->digest_length;
912 in_sg->length += ses->digest_length;
914 /* Digest calculation case */
915 sg->length -= ses->digest_length;
926 * |<----data_len------->|
927 * |ip_header|ah_header|icv|payload|
932 static inline struct dpaa_sec_job *
933 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
935 struct rte_crypto_sym_op *sym = op->sym;
936 struct rte_mbuf *mbuf = sym->m_src;
937 struct dpaa_sec_job *cf;
938 struct dpaa_sec_op_ctx *ctx;
939 struct qm_sg_entry *sg;
940 rte_iova_t start_addr;
943 ctx = dpaa_sec_alloc_ctx(ses);
949 old_digest = ctx->digest;
951 start_addr = rte_pktmbuf_iova(mbuf);
954 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
955 sg->length = ses->digest_length;
960 if (is_decode(ses)) {
961 /* need to extend the input to a compound frame */
963 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
964 sg->length = sym->auth.data.length + ses->digest_length;
969 /* hash result or digest, save digest first */
970 rte_memcpy(old_digest, sym->auth.digest.data,
972 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
973 sg->length = sym->auth.data.length;
976 /* let's check digest by hw */
977 start_addr = dpaa_mem_vtop(old_digest);
979 qm_sg_entry_set64(sg, start_addr);
980 sg->length = ses->digest_length;
984 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
985 sg->length = sym->auth.data.length;
993 static inline struct dpaa_sec_job *
994 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
996 struct rte_crypto_sym_op *sym = op->sym;
997 struct dpaa_sec_job *cf;
998 struct dpaa_sec_op_ctx *ctx;
999 struct qm_sg_entry *sg, *out_sg, *in_sg;
1000 struct rte_mbuf *mbuf;
1002 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1007 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1010 req_segs = mbuf->nb_segs * 2 + 3;
1013 if (req_segs > MAX_SG_ENTRIES) {
1014 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1019 ctx = dpaa_sec_alloc_ctx(ses);
1027 out_sg = &cf->sg[0];
1028 out_sg->extension = 1;
1029 out_sg->length = sym->cipher.data.length;
1030 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1031 cpu_to_hw_sg(out_sg);
1035 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1036 sg->length = mbuf->data_len - sym->cipher.data.offset;
1037 sg->offset = sym->cipher.data.offset;
1039 /* Successive segs */
1044 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1045 sg->length = mbuf->data_len;
1054 in_sg->extension = 1;
1056 in_sg->length = sym->cipher.data.length + ses->iv.length;
1059 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1060 cpu_to_hw_sg(in_sg);
1063 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1064 sg->length = ses->iv.length;
1069 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1070 sg->length = mbuf->data_len - sym->cipher.data.offset;
1071 sg->offset = sym->cipher.data.offset;
1073 /* Successive segs */
1078 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1079 sg->length = mbuf->data_len;
1088 static inline struct dpaa_sec_job *
1089 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1091 struct rte_crypto_sym_op *sym = op->sym;
1092 struct dpaa_sec_job *cf;
1093 struct dpaa_sec_op_ctx *ctx;
1094 struct qm_sg_entry *sg;
1095 rte_iova_t src_start_addr, dst_start_addr;
1096 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1099 ctx = dpaa_sec_alloc_ctx(ses);
1106 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1109 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1111 dst_start_addr = src_start_addr;
1115 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1116 sg->length = sym->cipher.data.length + ses->iv.length;
1122 /* need to extend the input to a compound frame */
1125 sg->length = sym->cipher.data.length + ses->iv.length;
1126 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1130 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1131 sg->length = ses->iv.length;
1135 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1136 sg->length = sym->cipher.data.length;
1143 static inline struct dpaa_sec_job *
1144 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1146 struct rte_crypto_sym_op *sym = op->sym;
1147 struct dpaa_sec_job *cf;
1148 struct dpaa_sec_op_ctx *ctx;
1149 struct qm_sg_entry *sg, *out_sg, *in_sg;
1150 struct rte_mbuf *mbuf;
1152 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1157 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1160 req_segs = mbuf->nb_segs * 2 + 4;
1163 if (ses->auth_only_len)
1166 if (req_segs > MAX_SG_ENTRIES) {
1167 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1172 ctx = dpaa_sec_alloc_ctx(ses);
1179 rte_prefetch0(cf->sg);
1182 out_sg = &cf->sg[0];
1183 out_sg->extension = 1;
1185 out_sg->length = sym->aead.data.length + ses->auth_only_len
1186 + ses->digest_length;
1188 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1190 /* output sg entries */
1192 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1193 cpu_to_hw_sg(out_sg);
1196 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1197 sg->length = mbuf->data_len - sym->aead.data.offset +
1199 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1201 /* Successive segs */
1206 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1207 sg->length = mbuf->data_len;
1210 sg->length -= ses->digest_length;
1212 if (is_encode(ses)) {
1214 /* set auth output */
1216 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1217 sg->length = ses->digest_length;
1225 in_sg->extension = 1;
1228 in_sg->length = ses->iv.length + sym->aead.data.length
1229 + ses->auth_only_len;
1231 in_sg->length = ses->iv.length + sym->aead.data.length
1232 + ses->auth_only_len + ses->digest_length;
1234 /* input sg entries */
1236 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1237 cpu_to_hw_sg(in_sg);
1240 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1241 sg->length = ses->iv.length;
1244 /* 2nd seg auth only */
1245 if (ses->auth_only_len) {
1247 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1248 sg->length = ses->auth_only_len;
1254 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1255 sg->length = mbuf->data_len - sym->aead.data.offset;
1256 sg->offset = sym->aead.data.offset;
1258 /* Successive segs */
1263 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1264 sg->length = mbuf->data_len;
1268 if (is_decode(ses)) {
1271 memcpy(ctx->digest, sym->aead.digest.data,
1272 ses->digest_length);
1273 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1274 sg->length = ses->digest_length;
1282 static inline struct dpaa_sec_job *
1283 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1285 struct rte_crypto_sym_op *sym = op->sym;
1286 struct dpaa_sec_job *cf;
1287 struct dpaa_sec_op_ctx *ctx;
1288 struct qm_sg_entry *sg;
1289 uint32_t length = 0;
1290 rte_iova_t src_start_addr, dst_start_addr;
1291 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1294 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1297 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1299 dst_start_addr = src_start_addr;
1301 ctx = dpaa_sec_alloc_ctx(ses);
1309 rte_prefetch0(cf->sg);
1311 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1312 if (is_encode(ses)) {
1313 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1314 sg->length = ses->iv.length;
1315 length += sg->length;
1319 if (ses->auth_only_len) {
1320 qm_sg_entry_set64(sg,
1321 dpaa_mem_vtop(sym->aead.aad.data));
1322 sg->length = ses->auth_only_len;
1323 length += sg->length;
1327 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1328 sg->length = sym->aead.data.length;
1329 length += sg->length;
1333 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1334 sg->length = ses->iv.length;
1335 length += sg->length;
1339 if (ses->auth_only_len) {
1340 qm_sg_entry_set64(sg,
1341 dpaa_mem_vtop(sym->aead.aad.data));
1342 sg->length = ses->auth_only_len;
1343 length += sg->length;
1347 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1348 sg->length = sym->aead.data.length;
1349 length += sg->length;
1352 memcpy(ctx->digest, sym->aead.digest.data,
1353 ses->digest_length);
1356 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1357 sg->length = ses->digest_length;
1358 length += sg->length;
1362 /* input compound frame */
1363 cf->sg[1].length = length;
1364 cf->sg[1].extension = 1;
1365 cf->sg[1].final = 1;
1366 cpu_to_hw_sg(&cf->sg[1]);
1370 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1371 qm_sg_entry_set64(sg,
1372 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1373 sg->length = sym->aead.data.length + ses->auth_only_len;
1374 length = sg->length;
1375 if (is_encode(ses)) {
1377 /* set auth output */
1379 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1380 sg->length = ses->digest_length;
1381 length += sg->length;
1386 /* output compound frame */
1387 cf->sg[0].length = length;
1388 cf->sg[0].extension = 1;
1389 cpu_to_hw_sg(&cf->sg[0]);
1394 static inline struct dpaa_sec_job *
1395 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1397 struct rte_crypto_sym_op *sym = op->sym;
1398 struct dpaa_sec_job *cf;
1399 struct dpaa_sec_op_ctx *ctx;
1400 struct qm_sg_entry *sg, *out_sg, *in_sg;
1401 struct rte_mbuf *mbuf;
1403 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1408 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1411 req_segs = mbuf->nb_segs * 2 + 4;
1414 if (req_segs > MAX_SG_ENTRIES) {
1415 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1420 ctx = dpaa_sec_alloc_ctx(ses);
1427 rte_prefetch0(cf->sg);
1430 out_sg = &cf->sg[0];
1431 out_sg->extension = 1;
1433 out_sg->length = sym->auth.data.length + ses->digest_length;
1435 out_sg->length = sym->auth.data.length;
1437 /* output sg entries */
1439 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1440 cpu_to_hw_sg(out_sg);
1443 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1444 sg->length = mbuf->data_len - sym->auth.data.offset;
1445 sg->offset = sym->auth.data.offset;
1447 /* Successive segs */
1452 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1453 sg->length = mbuf->data_len;
1456 sg->length -= ses->digest_length;
1458 if (is_encode(ses)) {
1460 /* set auth output */
1462 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1463 sg->length = ses->digest_length;
1471 in_sg->extension = 1;
1474 in_sg->length = ses->iv.length + sym->auth.data.length;
1476 in_sg->length = ses->iv.length + sym->auth.data.length
1477 + ses->digest_length;
1479 /* input sg entries */
1481 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1482 cpu_to_hw_sg(in_sg);
1485 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1486 sg->length = ses->iv.length;
1491 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1492 sg->length = mbuf->data_len - sym->auth.data.offset;
1493 sg->offset = sym->auth.data.offset;
1495 /* Successive segs */
1500 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1501 sg->length = mbuf->data_len;
1505 sg->length -= ses->digest_length;
1506 if (is_decode(ses)) {
1509 memcpy(ctx->digest, sym->auth.digest.data,
1510 ses->digest_length);
1511 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1512 sg->length = ses->digest_length;
1520 static inline struct dpaa_sec_job *
1521 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1523 struct rte_crypto_sym_op *sym = op->sym;
1524 struct dpaa_sec_job *cf;
1525 struct dpaa_sec_op_ctx *ctx;
1526 struct qm_sg_entry *sg;
1527 rte_iova_t src_start_addr, dst_start_addr;
1528 uint32_t length = 0;
1529 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1532 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1534 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1536 dst_start_addr = src_start_addr;
1538 ctx = dpaa_sec_alloc_ctx(ses);
1546 rte_prefetch0(cf->sg);
1548 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1549 if (is_encode(ses)) {
1550 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1551 sg->length = ses->iv.length;
1552 length += sg->length;
1556 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1557 sg->length = sym->auth.data.length;
1558 length += sg->length;
1562 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1563 sg->length = ses->iv.length;
1564 length += sg->length;
1569 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1570 sg->length = sym->auth.data.length;
1571 length += sg->length;
1574 memcpy(ctx->digest, sym->auth.digest.data,
1575 ses->digest_length);
1578 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1579 sg->length = ses->digest_length;
1580 length += sg->length;
1584 /* input compound frame */
1585 cf->sg[1].length = length;
1586 cf->sg[1].extension = 1;
1587 cf->sg[1].final = 1;
1588 cpu_to_hw_sg(&cf->sg[1]);
1592 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1593 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1594 sg->length = sym->cipher.data.length;
1595 length = sg->length;
1596 if (is_encode(ses)) {
1598 /* set auth output */
1600 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1601 sg->length = ses->digest_length;
1602 length += sg->length;
1607 /* output compound frame */
1608 cf->sg[0].length = length;
1609 cf->sg[0].extension = 1;
1610 cpu_to_hw_sg(&cf->sg[0]);
1615 static inline struct dpaa_sec_job *
1616 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1618 struct rte_crypto_sym_op *sym = op->sym;
1619 struct dpaa_sec_job *cf;
1620 struct dpaa_sec_op_ctx *ctx;
1621 struct qm_sg_entry *sg;
1622 phys_addr_t src_start_addr, dst_start_addr;
1624 ctx = dpaa_sec_alloc_ctx(ses);
1630 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1633 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1635 dst_start_addr = src_start_addr;
1639 qm_sg_entry_set64(sg, src_start_addr);
1640 sg->length = sym->m_src->pkt_len;
1644 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1647 qm_sg_entry_set64(sg, dst_start_addr);
1648 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1654 static inline struct dpaa_sec_job *
1655 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1657 struct rte_crypto_sym_op *sym = op->sym;
1658 struct dpaa_sec_job *cf;
1659 struct dpaa_sec_op_ctx *ctx;
1660 struct qm_sg_entry *sg, *out_sg, *in_sg;
1661 struct rte_mbuf *mbuf;
1663 uint32_t in_len = 0, out_len = 0;
1670 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1671 if (req_segs > MAX_SG_ENTRIES) {
1672 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1677 ctx = dpaa_sec_alloc_ctx(ses);
1683 out_sg = &cf->sg[0];
1684 out_sg->extension = 1;
1685 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1689 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1692 /* Successive segs */
1693 while (mbuf->next) {
1694 sg->length = mbuf->data_len;
1695 out_len += sg->length;
1699 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1702 sg->length = mbuf->buf_len - mbuf->data_off;
1703 out_len += sg->length;
1707 out_sg->length = out_len;
1708 cpu_to_hw_sg(out_sg);
1713 in_sg->extension = 1;
1715 in_len = mbuf->data_len;
1718 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1721 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1722 sg->length = mbuf->data_len;
1725 /* Successive segs */
1730 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1731 sg->length = mbuf->data_len;
1733 in_len += sg->length;
1739 in_sg->length = in_len;
1740 cpu_to_hw_sg(in_sg);
1742 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1748 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1751 /* Function to transmit the frames to given device and queuepair */
1753 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1754 uint16_t num_tx = 0;
1755 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1756 uint32_t frames_to_send;
1757 struct rte_crypto_op *op;
1758 struct dpaa_sec_job *cf;
1759 dpaa_sec_session *ses;
1760 uint32_t auth_only_len;
1761 struct qman_fq *inq[DPAA_SEC_BURST];
1764 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1765 DPAA_SEC_BURST : nb_ops;
1766 for (loop = 0; loop < frames_to_send; loop++) {
1768 switch (op->sess_type) {
1769 case RTE_CRYPTO_OP_WITH_SESSION:
1770 ses = (dpaa_sec_session *)
1771 get_sym_session_private_data(
1773 cryptodev_driver_id);
1775 case RTE_CRYPTO_OP_SECURITY_SESSION:
1776 ses = (dpaa_sec_session *)
1777 get_sec_session_private_data(
1778 op->sym->sec_session);
1782 "sessionless crypto op not supported");
1783 frames_to_send = loop;
1787 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1788 if (dpaa_sec_attach_sess_q(qp, ses)) {
1789 frames_to_send = loop;
1793 } else if (unlikely(ses->qp[rte_lcore_id() %
1794 MAX_DPAA_CORES] != qp)) {
1795 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1797 ses->qp[rte_lcore_id() %
1798 MAX_DPAA_CORES], qp);
1799 frames_to_send = loop;
1804 auth_only_len = op->sym->auth.data.length -
1805 op->sym->cipher.data.length;
1806 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1807 ((op->sym->m_dst == NULL) ||
1808 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1809 if (is_proto_ipsec(ses)) {
1810 cf = build_proto(op, ses);
1811 } else if (is_proto_pdcp(ses)) {
1812 cf = build_proto(op, ses);
1813 } else if (is_auth_only(ses)) {
1814 cf = build_auth_only(op, ses);
1815 } else if (is_cipher_only(ses)) {
1816 cf = build_cipher_only(op, ses);
1817 } else if (is_aead(ses)) {
1818 cf = build_cipher_auth_gcm(op, ses);
1819 auth_only_len = ses->auth_only_len;
1820 } else if (is_auth_cipher(ses)) {
1821 cf = build_cipher_auth(op, ses);
1823 DPAA_SEC_DP_ERR("not supported ops");
1824 frames_to_send = loop;
1829 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1830 cf = build_proto_sg(op, ses);
1831 } else if (is_auth_only(ses)) {
1832 cf = build_auth_only_sg(op, ses);
1833 } else if (is_cipher_only(ses)) {
1834 cf = build_cipher_only_sg(op, ses);
1835 } else if (is_aead(ses)) {
1836 cf = build_cipher_auth_gcm_sg(op, ses);
1837 auth_only_len = ses->auth_only_len;
1838 } else if (is_auth_cipher(ses)) {
1839 cf = build_cipher_auth_sg(op, ses);
1841 DPAA_SEC_DP_ERR("not supported ops");
1842 frames_to_send = loop;
1847 if (unlikely(!cf)) {
1848 frames_to_send = loop;
1854 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1855 fd->opaque_addr = 0;
1857 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1858 fd->_format1 = qm_fd_compound;
1859 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1860 /* Auth_only_len is set as 0 in descriptor and it is
1861 * overwritten here in the fd.cmd which will update
1865 fd->cmd = 0x80000000 | auth_only_len;
1867 /* In case of PDCP, per packet HFN is stored in
1868 * mbuf priv after sym_op.
1870 if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1871 fd->cmd = 0x80000000 |
1872 *((uint32_t *)((uint8_t *)op +
1873 ses->pdcp.hfn_ovd_offset));
1874 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1875 *((uint32_t *)((uint8_t *)op +
1876 ses->pdcp.hfn_ovd_offset)),
1878 is_proto_pdcp(ses));
1884 while (loop < frames_to_send) {
1885 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1886 frames_to_send - loop);
1888 nb_ops -= frames_to_send;
1889 num_tx += frames_to_send;
1892 dpaa_qp->tx_pkts += num_tx;
1893 dpaa_qp->tx_errs += nb_ops - num_tx;
1899 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1903 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1905 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1907 dpaa_qp->rx_pkts += num_rx;
1908 dpaa_qp->rx_errs += nb_ops - num_rx;
1910 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1915 /** Release queue pair */
1917 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1920 struct dpaa_sec_dev_private *internals;
1921 struct dpaa_sec_qp *qp = NULL;
1923 PMD_INIT_FUNC_TRACE();
1925 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1927 internals = dev->data->dev_private;
1928 if (qp_id >= internals->max_nb_queue_pairs) {
1929 DPAA_SEC_ERR("Max supported qpid %d",
1930 internals->max_nb_queue_pairs);
1934 qp = &internals->qps[qp_id];
1935 qp->internals = NULL;
1936 dev->data->queue_pairs[qp_id] = NULL;
1941 /** Setup a queue pair */
1943 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1944 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1945 __rte_unused int socket_id)
1947 struct dpaa_sec_dev_private *internals;
1948 struct dpaa_sec_qp *qp = NULL;
1950 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1952 internals = dev->data->dev_private;
1953 if (qp_id >= internals->max_nb_queue_pairs) {
1954 DPAA_SEC_ERR("Max supported qpid %d",
1955 internals->max_nb_queue_pairs);
1959 qp = &internals->qps[qp_id];
1960 qp->internals = internals;
1961 dev->data->queue_pairs[qp_id] = qp;
1966 /** Return the number of allocated queue pairs */
1968 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1970 PMD_INIT_FUNC_TRACE();
1972 return dev->data->nb_queue_pairs;
1975 /** Returns the size of session structure */
1977 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1979 PMD_INIT_FUNC_TRACE();
1981 return sizeof(dpaa_sec_session);
1985 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1986 struct rte_crypto_sym_xform *xform,
1987 dpaa_sec_session *session)
1989 session->cipher_alg = xform->cipher.algo;
1990 session->iv.length = xform->cipher.iv.length;
1991 session->iv.offset = xform->cipher.iv.offset;
1992 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1993 RTE_CACHE_LINE_SIZE);
1994 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1995 DPAA_SEC_ERR("No Memory for cipher key");
1998 session->cipher_key.length = xform->cipher.key.length;
2000 memcpy(session->cipher_key.data, xform->cipher.key.data,
2001 xform->cipher.key.length);
2002 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2009 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2010 struct rte_crypto_sym_xform *xform,
2011 dpaa_sec_session *session)
2013 session->auth_alg = xform->auth.algo;
2014 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2015 RTE_CACHE_LINE_SIZE);
2016 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2017 DPAA_SEC_ERR("No Memory for auth key");
2020 session->auth_key.length = xform->auth.key.length;
2021 session->digest_length = xform->auth.digest_length;
2023 memcpy(session->auth_key.data, xform->auth.key.data,
2024 xform->auth.key.length);
2025 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2032 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2033 struct rte_crypto_sym_xform *xform,
2034 dpaa_sec_session *session)
2036 session->aead_alg = xform->aead.algo;
2037 session->iv.length = xform->aead.iv.length;
2038 session->iv.offset = xform->aead.iv.offset;
2039 session->auth_only_len = xform->aead.aad_length;
2040 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2041 RTE_CACHE_LINE_SIZE);
2042 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2043 DPAA_SEC_ERR("No Memory for aead key\n");
2046 session->aead_key.length = xform->aead.key.length;
2047 session->digest_length = xform->aead.digest_length;
2049 memcpy(session->aead_key.data, xform->aead.key.data,
2050 xform->aead.key.length);
2051 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2057 static struct qman_fq *
2058 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2062 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2063 if (qi->inq_attach[i] == 0) {
2064 qi->inq_attach[i] = 1;
2068 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2074 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2078 for (i = 0; i < qi->max_nb_sessions; i++) {
2079 if (&qi->inq[i] == fq) {
2080 qman_retire_fq(fq, NULL);
2082 qi->inq_attach[i] = 0;
2090 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2094 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2095 ret = dpaa_sec_prep_cdb(sess);
2097 DPAA_SEC_ERR("Unable to prepare sec cdb");
2100 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2101 ret = rte_dpaa_portal_init((void *)0);
2103 DPAA_SEC_ERR("Failure in affining portal");
2107 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2108 dpaa_mem_vtop(&sess->cdb),
2109 qman_fq_fqid(&qp->outq));
2111 DPAA_SEC_ERR("Unable to init sec queue");
2117 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2118 struct rte_crypto_sym_xform *xform, void *sess)
2120 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2121 dpaa_sec_session *session = sess;
2124 PMD_INIT_FUNC_TRACE();
2126 if (unlikely(sess == NULL)) {
2127 DPAA_SEC_ERR("invalid session struct");
2130 memset(session, 0, sizeof(dpaa_sec_session));
2132 /* Default IV length = 0 */
2133 session->iv.length = 0;
2136 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2137 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2138 dpaa_sec_cipher_init(dev, xform, session);
2140 /* Authentication Only */
2141 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2142 xform->next == NULL) {
2143 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2144 dpaa_sec_auth_init(dev, xform, session);
2146 /* Cipher then Authenticate */
2147 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2148 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2149 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2150 dpaa_sec_cipher_init(dev, xform, session);
2151 dpaa_sec_auth_init(dev, xform->next, session);
2153 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2157 /* Authenticate then Cipher */
2158 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2159 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2160 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2161 dpaa_sec_auth_init(dev, xform, session);
2162 dpaa_sec_cipher_init(dev, xform->next, session);
2164 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2168 /* AEAD operation for AES-GCM kind of Algorithms */
2169 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2170 xform->next == NULL) {
2171 dpaa_sec_aead_init(dev, xform, session);
2174 DPAA_SEC_ERR("Invalid crypto type");
2177 session->ctx_pool = internals->ctx_pool;
2178 rte_spinlock_lock(&internals->lock);
2179 for (i = 0; i < MAX_DPAA_CORES; i++) {
2180 session->inq[i] = dpaa_sec_attach_rxq(internals);
2181 if (session->inq[i] == NULL) {
2182 DPAA_SEC_ERR("unable to attach sec queue");
2183 rte_spinlock_unlock(&internals->lock);
2187 rte_spinlock_unlock(&internals->lock);
2192 rte_free(session->cipher_key.data);
2193 rte_free(session->auth_key.data);
2194 memset(session, 0, sizeof(dpaa_sec_session));
2200 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2201 struct rte_crypto_sym_xform *xform,
2202 struct rte_cryptodev_sym_session *sess,
2203 struct rte_mempool *mempool)
2205 void *sess_private_data;
2208 PMD_INIT_FUNC_TRACE();
2210 if (rte_mempool_get(mempool, &sess_private_data)) {
2211 DPAA_SEC_ERR("Couldn't get object from session mempool");
2215 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2217 DPAA_SEC_ERR("failed to configure session parameters");
2219 /* Return session to mempool */
2220 rte_mempool_put(mempool, sess_private_data);
2224 set_sym_session_private_data(sess, dev->driver_id,
2232 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2234 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2235 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2238 for (i = 0; i < MAX_DPAA_CORES; i++) {
2240 dpaa_sec_detach_rxq(qi, s->inq[i]);
2244 rte_free(s->cipher_key.data);
2245 rte_free(s->auth_key.data);
2246 memset(s, 0, sizeof(dpaa_sec_session));
2247 rte_mempool_put(sess_mp, (void *)s);
2250 /** Clear the memory of session so it doesn't leave key material behind */
2252 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2253 struct rte_cryptodev_sym_session *sess)
2255 PMD_INIT_FUNC_TRACE();
2256 uint8_t index = dev->driver_id;
2257 void *sess_priv = get_sym_session_private_data(sess, index);
2258 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2261 free_session_memory(dev, s);
2262 set_sym_session_private_data(sess, index, NULL);
2267 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2268 struct rte_security_session_conf *conf,
2271 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2272 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2273 struct rte_crypto_auth_xform *auth_xform = NULL;
2274 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2275 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2278 PMD_INIT_FUNC_TRACE();
2280 memset(session, 0, sizeof(dpaa_sec_session));
2281 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2282 cipher_xform = &conf->crypto_xform->cipher;
2283 if (conf->crypto_xform->next)
2284 auth_xform = &conf->crypto_xform->next->auth;
2286 auth_xform = &conf->crypto_xform->auth;
2287 if (conf->crypto_xform->next)
2288 cipher_xform = &conf->crypto_xform->next->cipher;
2290 session->proto_alg = conf->protocol;
2292 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2293 session->cipher_key.data = rte_zmalloc(NULL,
2294 cipher_xform->key.length,
2295 RTE_CACHE_LINE_SIZE);
2296 if (session->cipher_key.data == NULL &&
2297 cipher_xform->key.length > 0) {
2298 DPAA_SEC_ERR("No Memory for cipher key");
2301 memcpy(session->cipher_key.data, cipher_xform->key.data,
2302 cipher_xform->key.length);
2303 session->cipher_key.length = cipher_xform->key.length;
2305 switch (cipher_xform->algo) {
2306 case RTE_CRYPTO_CIPHER_AES_CBC:
2307 case RTE_CRYPTO_CIPHER_3DES_CBC:
2308 case RTE_CRYPTO_CIPHER_AES_CTR:
2311 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2312 cipher_xform->algo);
2315 session->cipher_alg = cipher_xform->algo;
2317 session->cipher_key.data = NULL;
2318 session->cipher_key.length = 0;
2319 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2322 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2323 session->auth_key.data = rte_zmalloc(NULL,
2324 auth_xform->key.length,
2325 RTE_CACHE_LINE_SIZE);
2326 if (session->auth_key.data == NULL &&
2327 auth_xform->key.length > 0) {
2328 DPAA_SEC_ERR("No Memory for auth key");
2329 rte_free(session->cipher_key.data);
2332 memcpy(session->auth_key.data, auth_xform->key.data,
2333 auth_xform->key.length);
2334 session->auth_key.length = auth_xform->key.length;
2336 switch (auth_xform->algo) {
2337 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2338 case RTE_CRYPTO_AUTH_MD5_HMAC:
2339 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2340 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2341 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2342 case RTE_CRYPTO_AUTH_AES_CMAC:
2345 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2349 session->auth_alg = auth_xform->algo;
2351 session->auth_key.data = NULL;
2352 session->auth_key.length = 0;
2353 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2356 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2357 if (ipsec_xform->tunnel.type ==
2358 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2359 memset(&session->encap_pdb, 0,
2360 sizeof(struct ipsec_encap_pdb) +
2361 sizeof(session->ip4_hdr));
2362 session->ip4_hdr.ip_v = IPVERSION;
2363 session->ip4_hdr.ip_hl = 5;
2364 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2365 sizeof(session->ip4_hdr));
2366 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2367 session->ip4_hdr.ip_id = 0;
2368 session->ip4_hdr.ip_off = 0;
2369 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2370 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2371 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2372 IPPROTO_ESP : IPPROTO_AH;
2373 session->ip4_hdr.ip_sum = 0;
2374 session->ip4_hdr.ip_src =
2375 ipsec_xform->tunnel.ipv4.src_ip;
2376 session->ip4_hdr.ip_dst =
2377 ipsec_xform->tunnel.ipv4.dst_ip;
2378 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2379 (void *)&session->ip4_hdr,
2381 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2382 } else if (ipsec_xform->tunnel.type ==
2383 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2384 memset(&session->encap_pdb, 0,
2385 sizeof(struct ipsec_encap_pdb) +
2386 sizeof(session->ip6_hdr));
2387 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2388 DPAA_IPv6_DEFAULT_VTC_FLOW |
2389 ((ipsec_xform->tunnel.ipv6.dscp <<
2390 RTE_IPV6_HDR_TC_SHIFT) &
2391 RTE_IPV6_HDR_TC_MASK) |
2392 ((ipsec_xform->tunnel.ipv6.flabel <<
2393 RTE_IPV6_HDR_FL_SHIFT) &
2394 RTE_IPV6_HDR_FL_MASK));
2395 /* Payload length will be updated by HW */
2396 session->ip6_hdr.payload_len = 0;
2397 session->ip6_hdr.hop_limits =
2398 ipsec_xform->tunnel.ipv6.hlimit;
2399 session->ip6_hdr.proto = (ipsec_xform->proto ==
2400 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2401 IPPROTO_ESP : IPPROTO_AH;
2402 memcpy(&session->ip6_hdr.src_addr,
2403 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2404 memcpy(&session->ip6_hdr.dst_addr,
2405 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2406 session->encap_pdb.ip_hdr_len =
2407 sizeof(struct rte_ipv6_hdr);
2409 session->encap_pdb.options =
2410 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2411 PDBOPTS_ESP_OIHI_PDB_INL |
2413 PDBHMO_ESP_ENCAP_DTTL |
2415 if (ipsec_xform->options.esn)
2416 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2417 session->encap_pdb.spi = ipsec_xform->spi;
2418 session->dir = DIR_ENC;
2419 } else if (ipsec_xform->direction ==
2420 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2421 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2422 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2423 session->decap_pdb.options = sizeof(struct ip) << 16;
2425 session->decap_pdb.options =
2426 sizeof(struct rte_ipv6_hdr) << 16;
2427 if (ipsec_xform->options.esn)
2428 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2429 session->dir = DIR_DEC;
2432 session->ctx_pool = internals->ctx_pool;
2433 rte_spinlock_lock(&internals->lock);
2434 for (i = 0; i < MAX_DPAA_CORES; i++) {
2435 session->inq[i] = dpaa_sec_attach_rxq(internals);
2436 if (session->inq[i] == NULL) {
2437 DPAA_SEC_ERR("unable to attach sec queue");
2438 rte_spinlock_unlock(&internals->lock);
2442 rte_spinlock_unlock(&internals->lock);
2446 rte_free(session->auth_key.data);
2447 rte_free(session->cipher_key.data);
2448 memset(session, 0, sizeof(dpaa_sec_session));
2453 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2454 struct rte_security_session_conf *conf,
2457 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2458 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2459 struct rte_crypto_auth_xform *auth_xform = NULL;
2460 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2461 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2462 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2465 PMD_INIT_FUNC_TRACE();
2467 memset(session, 0, sizeof(dpaa_sec_session));
2469 /* find xfrm types */
2470 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2471 cipher_xform = &xform->cipher;
2472 if (xform->next != NULL)
2473 auth_xform = &xform->next->auth;
2474 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2475 auth_xform = &xform->auth;
2476 if (xform->next != NULL)
2477 cipher_xform = &xform->next->cipher;
2479 DPAA_SEC_ERR("Invalid crypto type");
2483 session->proto_alg = conf->protocol;
2485 session->cipher_key.data = rte_zmalloc(NULL,
2486 cipher_xform->key.length,
2487 RTE_CACHE_LINE_SIZE);
2488 if (session->cipher_key.data == NULL &&
2489 cipher_xform->key.length > 0) {
2490 DPAA_SEC_ERR("No Memory for cipher key");
2493 session->cipher_key.length = cipher_xform->key.length;
2494 memcpy(session->cipher_key.data, cipher_xform->key.data,
2495 cipher_xform->key.length);
2496 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2498 session->cipher_alg = cipher_xform->algo;
2500 session->cipher_key.data = NULL;
2501 session->cipher_key.length = 0;
2502 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2503 session->dir = DIR_ENC;
2506 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2507 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2508 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2510 "PDCP Seq Num size should be 5/12 bits for cmode");
2516 session->auth_key.data = rte_zmalloc(NULL,
2517 auth_xform->key.length,
2518 RTE_CACHE_LINE_SIZE);
2519 if (!session->auth_key.data &&
2520 auth_xform->key.length > 0) {
2521 DPAA_SEC_ERR("No Memory for auth key");
2522 rte_free(session->cipher_key.data);
2525 session->auth_key.length = auth_xform->key.length;
2526 memcpy(session->auth_key.data, auth_xform->key.data,
2527 auth_xform->key.length);
2528 session->auth_alg = auth_xform->algo;
2530 session->auth_key.data = NULL;
2531 session->auth_key.length = 0;
2532 session->auth_alg = 0;
2534 session->pdcp.domain = pdcp_xform->domain;
2535 session->pdcp.bearer = pdcp_xform->bearer;
2536 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2537 session->pdcp.sn_size = pdcp_xform->sn_size;
2538 session->pdcp.hfn = pdcp_xform->hfn;
2539 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2540 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2541 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2543 session->ctx_pool = dev_priv->ctx_pool;
2544 rte_spinlock_lock(&dev_priv->lock);
2545 for (i = 0; i < MAX_DPAA_CORES; i++) {
2546 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2547 if (session->inq[i] == NULL) {
2548 DPAA_SEC_ERR("unable to attach sec queue");
2549 rte_spinlock_unlock(&dev_priv->lock);
2553 rte_spinlock_unlock(&dev_priv->lock);
2556 rte_free(session->auth_key.data);
2557 rte_free(session->cipher_key.data);
2558 memset(session, 0, sizeof(dpaa_sec_session));
2563 dpaa_sec_security_session_create(void *dev,
2564 struct rte_security_session_conf *conf,
2565 struct rte_security_session *sess,
2566 struct rte_mempool *mempool)
2568 void *sess_private_data;
2569 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2572 if (rte_mempool_get(mempool, &sess_private_data)) {
2573 DPAA_SEC_ERR("Couldn't get object from session mempool");
2577 switch (conf->protocol) {
2578 case RTE_SECURITY_PROTOCOL_IPSEC:
2579 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2582 case RTE_SECURITY_PROTOCOL_PDCP:
2583 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2586 case RTE_SECURITY_PROTOCOL_MACSEC:
2592 DPAA_SEC_ERR("failed to configure session parameters");
2593 /* Return session to mempool */
2594 rte_mempool_put(mempool, sess_private_data);
2598 set_sec_session_private_data(sess, sess_private_data);
2603 /** Clear the memory of session so it doesn't leave key material behind */
2605 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2606 struct rte_security_session *sess)
2608 PMD_INIT_FUNC_TRACE();
2609 void *sess_priv = get_sec_session_private_data(sess);
2610 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2613 free_session_memory((struct rte_cryptodev *)dev, s);
2614 set_sec_session_private_data(sess, NULL);
2620 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2621 struct rte_cryptodev_config *config __rte_unused)
2625 struct dpaa_sec_dev_private *internals;
2627 PMD_INIT_FUNC_TRACE();
2629 internals = dev->data->dev_private;
2630 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
2631 if (!internals->ctx_pool) {
2632 internals->ctx_pool = rte_mempool_create((const char *)str,
2635 CTX_POOL_CACHE_SIZE, 0,
2636 NULL, NULL, NULL, NULL,
2638 if (!internals->ctx_pool) {
2639 DPAA_SEC_ERR("%s create failed\n", str);
2643 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2650 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2652 PMD_INIT_FUNC_TRACE();
2657 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2659 PMD_INIT_FUNC_TRACE();
2663 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2665 struct dpaa_sec_dev_private *internals;
2667 PMD_INIT_FUNC_TRACE();
2672 internals = dev->data->dev_private;
2673 rte_mempool_free(internals->ctx_pool);
2674 internals->ctx_pool = NULL;
2680 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2681 struct rte_cryptodev_info *info)
2683 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2685 PMD_INIT_FUNC_TRACE();
2687 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2688 info->feature_flags = dev->feature_flags;
2689 info->capabilities = dpaa_sec_capabilities;
2690 info->sym.max_nb_sessions = internals->max_nb_sessions;
2691 info->driver_id = cryptodev_driver_id;
2695 static struct rte_cryptodev_ops crypto_ops = {
2696 .dev_configure = dpaa_sec_dev_configure,
2697 .dev_start = dpaa_sec_dev_start,
2698 .dev_stop = dpaa_sec_dev_stop,
2699 .dev_close = dpaa_sec_dev_close,
2700 .dev_infos_get = dpaa_sec_dev_infos_get,
2701 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2702 .queue_pair_release = dpaa_sec_queue_pair_release,
2703 .queue_pair_count = dpaa_sec_queue_pair_count,
2704 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2705 .sym_session_configure = dpaa_sec_sym_session_configure,
2706 .sym_session_clear = dpaa_sec_sym_session_clear
2709 static const struct rte_security_capability *
2710 dpaa_sec_capabilities_get(void *device __rte_unused)
2712 return dpaa_sec_security_cap;
2715 static const struct rte_security_ops dpaa_sec_security_ops = {
2716 .session_create = dpaa_sec_security_session_create,
2717 .session_update = NULL,
2718 .session_stats_get = NULL,
2719 .session_destroy = dpaa_sec_security_session_destroy,
2720 .set_pkt_metadata = NULL,
2721 .capabilities_get = dpaa_sec_capabilities_get
2725 dpaa_sec_uninit(struct rte_cryptodev *dev)
2727 struct dpaa_sec_dev_private *internals;
2732 internals = dev->data->dev_private;
2733 rte_free(dev->security_ctx);
2735 /* In case close has been called, internals->ctx_pool would be NULL */
2736 rte_mempool_free(internals->ctx_pool);
2737 rte_free(internals);
2739 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2740 dev->data->name, rte_socket_id());
2746 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2748 struct dpaa_sec_dev_private *internals;
2749 struct rte_security_ctx *security_instance;
2750 struct dpaa_sec_qp *qp;
2754 PMD_INIT_FUNC_TRACE();
2756 cryptodev->driver_id = cryptodev_driver_id;
2757 cryptodev->dev_ops = &crypto_ops;
2759 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2760 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2761 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2762 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2763 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2764 RTE_CRYPTODEV_FF_SECURITY |
2765 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2766 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2767 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2768 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2769 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2771 internals = cryptodev->data->dev_private;
2772 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2773 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2776 * For secondary processes, we don't initialise any further as primary
2777 * has already done this work. Only check we don't need a different
2780 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2781 DPAA_SEC_WARN("Device already init by primary process");
2785 /* Initialize security_ctx only for primary process*/
2786 security_instance = rte_malloc("rte_security_instances_ops",
2787 sizeof(struct rte_security_ctx), 0);
2788 if (security_instance == NULL)
2790 security_instance->device = (void *)cryptodev;
2791 security_instance->ops = &dpaa_sec_security_ops;
2792 security_instance->sess_cnt = 0;
2793 cryptodev->security_ctx = security_instance;
2795 rte_spinlock_init(&internals->lock);
2796 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2797 /* init qman fq for queue pair */
2798 qp = &internals->qps[i];
2799 ret = dpaa_sec_init_tx(&qp->outq);
2801 DPAA_SEC_ERR("config tx of queue pair %d", i);
2806 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2807 QMAN_FQ_FLAG_TO_DCPORTAL;
2808 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2809 /* create rx qman fq for sessions*/
2810 ret = qman_create_fq(0, flags, &internals->inq[i]);
2811 if (unlikely(ret != 0)) {
2812 DPAA_SEC_ERR("sec qman_create_fq failed");
2817 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2821 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2823 dpaa_sec_uninit(cryptodev);
2828 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2829 struct rte_dpaa_device *dpaa_dev)
2831 struct rte_cryptodev *cryptodev;
2832 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2836 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
2837 dpaa_dev->id.dev_id);
2839 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2840 if (cryptodev == NULL)
2843 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2844 cryptodev->data->dev_private = rte_zmalloc_socket(
2845 "cryptodev private structure",
2846 sizeof(struct dpaa_sec_dev_private),
2847 RTE_CACHE_LINE_SIZE,
2850 if (cryptodev->data->dev_private == NULL)
2851 rte_panic("Cannot allocate memzone for private "
2855 dpaa_dev->crypto_dev = cryptodev;
2856 cryptodev->device = &dpaa_dev->device;
2858 /* init user callbacks */
2859 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2861 /* if sec device version is not configured */
2862 if (!rta_get_sec_era()) {
2863 const struct device_node *caam_node;
2865 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2866 const uint32_t *prop = of_get_property(caam_node,
2871 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2877 /* Invoke PMD device initialization function */
2878 retval = dpaa_sec_dev_init(cryptodev);
2882 /* In case of error, cleanup is done */
2883 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2884 rte_free(cryptodev->data->dev_private);
2886 rte_cryptodev_pmd_release_device(cryptodev);
2892 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2894 struct rte_cryptodev *cryptodev;
2897 cryptodev = dpaa_dev->crypto_dev;
2898 if (cryptodev == NULL)
2901 ret = dpaa_sec_uninit(cryptodev);
2905 return rte_cryptodev_pmd_destroy(cryptodev);
2908 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2909 .drv_type = FSL_DPAA_CRYPTO,
2911 .name = "DPAA SEC PMD"
2913 .probe = cryptodev_dpaa_sec_probe,
2914 .remove = cryptodev_dpaa_sec_remove,
2917 static struct cryptodev_driver dpaa_sec_crypto_drv;
2919 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2920 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2921 cryptodev_driver_id);
2923 RTE_INIT(dpaa_sec_init_log)
2925 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2926 if (dpaa_logtype_sec >= 0)
2927 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);