1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 enum rta_sec_era rta_sec_era;
50 static uint8_t cryptodev_driver_id;
52 static __thread struct rte_crypto_op **dpaa_sec_ops;
53 static __thread int dpaa_sec_op_nb;
56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
61 if (!ctx->fd_status) {
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(
76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 * each packet, memset is costlier than dcbz_64().
88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 dcbz_64(&ctx->job.sg[i]);
91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
111 dpaa_mem_ptov(rte_iova_t paddr)
115 va = (void *)dpaax_iova_table_get_va(paddr);
119 return rte_mem_iova2virt(paddr);
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125 const struct qm_mr_entry *msg)
127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 fq->fqid, msg->ern.rc, msg->ern.seqnum);
131 /* initialize the queue with dest chan as caam chan so that
132 * all the packets in this queue could be dispatched into caam
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
138 struct qm_mcc_initfq fq_opts;
142 /* Clear FQ options */
143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145 flags = QMAN_INITFQ_FLAG_SCHED;
146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 QM_INITFQ_WE_CONTEXTB;
149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 fq_opts.fqd.context_b = fqid_out;
151 fq_opts.fqd.dest.channel = qm_channel_caam;
152 fq_opts.fqd.dest.wq = 0;
154 fq_in->cb.ern = ern_sec_fq_handler;
156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158 ret = qman_init_fq(fq_in, flags, &fq_opts);
159 if (unlikely(ret != 0))
160 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 struct qman_fq *fq __always_unused,
169 const struct qm_dqrr_entry *dqrr)
171 const struct qm_fd *fd;
172 struct dpaa_sec_job *job;
173 struct dpaa_sec_op_ctx *ctx;
175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 return qman_cb_dqrr_defer;
178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 return qman_cb_dqrr_consume;
182 /* sg is embedded in an op ctx,
183 * sg[0] is for output
186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 ctx->fd_status = fd->status;
190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 struct qm_sg_entry *sg_out;
193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196 sg_out = &job->sg[0];
197 hw_sg_to_cpu(sg_out);
198 len = sg_out->length;
200 while (mbuf->next != NULL) {
201 len -= mbuf->data_len;
204 mbuf->data_len = len;
206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 dpaa_sec_op_ending(ctx);
209 return qman_cb_dqrr_consume;
212 /* caam result is put into this queue */
214 dpaa_sec_init_tx(struct qman_fq *fq)
217 struct qm_mcc_initfq opts;
220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 QMAN_FQ_FLAG_DYNAMIC_FQID;
223 ret = qman_create_fq(0, flags, fq);
225 DPAA_SEC_ERR("qman_create_fq failed");
229 memset(&opts, 0, sizeof(opts));
230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235 fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 fq->cb.ern = ern_sec_fq_handler;
238 ret = qman_init_fq(fq, 0, &opts);
240 DPAA_SEC_ERR("unable to init caam source fq!");
247 static inline int is_encode(dpaa_sec_session *ses)
249 return ses->dir == DIR_ENC;
252 static inline int is_decode(dpaa_sec_session *ses)
254 return ses->dir == DIR_DEC;
257 #ifdef RTE_LIBRTE_SECURITY
259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
261 struct alginfo authdata = {0}, cipherdata = {0};
262 struct sec_cdb *cdb = &ses->cdb;
263 struct alginfo *p_authdata = NULL;
264 int32_t shared_desc_len = 0;
266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
272 cipherdata.key = (size_t)ses->cipher_key.data;
273 cipherdata.keylen = ses->cipher_key.length;
274 cipherdata.key_enc_flags = 0;
275 cipherdata.key_type = RTA_DATA_IMM;
276 cipherdata.algtype = ses->cipher_key.alg;
277 cipherdata.algmode = ses->cipher_key.algmode;
279 cdb->sh_desc[0] = cipherdata.keylen;
284 authdata.key = (size_t)ses->auth_key.data;
285 authdata.keylen = ses->auth_key.length;
286 authdata.key_enc_flags = 0;
287 authdata.key_type = RTA_DATA_IMM;
288 authdata.algtype = ses->auth_key.alg;
289 authdata.algmode = ses->auth_key.algmode;
291 p_authdata = &authdata;
293 cdb->sh_desc[1] = authdata.keylen;
296 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
298 (unsigned int *)cdb->sh_desc,
299 &cdb->sh_desc[2], 2);
301 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
305 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
307 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
308 cipherdata.key_type = RTA_DATA_PTR;
310 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
312 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
313 authdata.key_type = RTA_DATA_PTR;
320 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
321 if (ses->dir == DIR_ENC)
322 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, &authdata,
331 else if (ses->dir == DIR_DEC)
332 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, &authdata,
342 if (ses->dir == DIR_ENC)
343 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
344 cdb->sh_desc, 1, swap,
349 ses->pdcp.hfn_threshold,
350 &cipherdata, p_authdata, 0);
351 else if (ses->dir == DIR_DEC)
352 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
353 cdb->sh_desc, 1, swap,
358 ses->pdcp.hfn_threshold,
359 &cipherdata, p_authdata, 0);
361 return shared_desc_len;
364 /* prepare ipsec proto command block of the session */
366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
368 struct alginfo cipherdata = {0}, authdata = {0};
369 struct sec_cdb *cdb = &ses->cdb;
370 int32_t shared_desc_len = 0;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 cipherdata.key = (size_t)ses->cipher_key.data;
379 cipherdata.keylen = ses->cipher_key.length;
380 cipherdata.key_enc_flags = 0;
381 cipherdata.key_type = RTA_DATA_IMM;
382 cipherdata.algtype = ses->cipher_key.alg;
383 cipherdata.algmode = ses->cipher_key.algmode;
385 if (ses->auth_key.length) {
386 authdata.key = (size_t)ses->auth_key.data;
387 authdata.keylen = ses->auth_key.length;
388 authdata.key_enc_flags = 0;
389 authdata.key_type = RTA_DATA_IMM;
390 authdata.algtype = ses->auth_key.alg;
391 authdata.algmode = ses->auth_key.algmode;
394 cdb->sh_desc[0] = cipherdata.keylen;
395 cdb->sh_desc[1] = authdata.keylen;
396 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
398 (unsigned int *)cdb->sh_desc,
399 &cdb->sh_desc[2], 2);
402 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
405 if (cdb->sh_desc[2] & 1)
406 cipherdata.key_type = RTA_DATA_IMM;
408 cipherdata.key = (size_t)dpaa_mem_vtop(
409 (void *)(size_t)cipherdata.key);
410 cipherdata.key_type = RTA_DATA_PTR;
412 if (cdb->sh_desc[2] & (1<<1))
413 authdata.key_type = RTA_DATA_IMM;
415 authdata.key = (size_t)dpaa_mem_vtop(
416 (void *)(size_t)authdata.key);
417 authdata.key_type = RTA_DATA_PTR;
423 if (ses->dir == DIR_ENC) {
424 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
426 true, swap, SHR_SERIAL,
428 (uint8_t *)&ses->ip4_hdr,
429 &cipherdata, &authdata);
430 } else if (ses->dir == DIR_DEC) {
431 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
433 true, swap, SHR_SERIAL,
435 &cipherdata, &authdata);
437 return shared_desc_len;
440 /* prepare command block of the session */
442 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
444 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
445 int32_t shared_desc_len = 0;
446 struct sec_cdb *cdb = &ses->cdb;
448 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
454 memset(cdb, 0, sizeof(struct sec_cdb));
457 #ifdef RTE_LIBRTE_SECURITY
459 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
462 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
465 case DPAA_SEC_CIPHER:
466 alginfo_c.key = (size_t)ses->cipher_key.data;
467 alginfo_c.keylen = ses->cipher_key.length;
468 alginfo_c.key_enc_flags = 0;
469 alginfo_c.key_type = RTA_DATA_IMM;
470 alginfo_c.algtype = ses->cipher_key.alg;
471 alginfo_c.algmode = ses->cipher_key.algmode;
473 switch (ses->cipher_alg) {
474 case RTE_CRYPTO_CIPHER_AES_CBC:
475 case RTE_CRYPTO_CIPHER_3DES_CBC:
476 case RTE_CRYPTO_CIPHER_AES_CTR:
477 case RTE_CRYPTO_CIPHER_3DES_CTR:
478 shared_desc_len = cnstr_shdsc_blkcipher(
480 swap, SHR_NEVER, &alginfo_c,
485 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
486 shared_desc_len = cnstr_shdsc_snow_f8(
487 cdb->sh_desc, true, swap,
491 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
492 shared_desc_len = cnstr_shdsc_zuce(
493 cdb->sh_desc, true, swap,
498 DPAA_SEC_ERR("unsupported cipher alg %d",
504 alginfo_a.key = (size_t)ses->auth_key.data;
505 alginfo_a.keylen = ses->auth_key.length;
506 alginfo_a.key_enc_flags = 0;
507 alginfo_a.key_type = RTA_DATA_IMM;
508 alginfo_a.algtype = ses->auth_key.alg;
509 alginfo_a.algmode = ses->auth_key.algmode;
510 switch (ses->auth_alg) {
511 case RTE_CRYPTO_AUTH_MD5_HMAC:
512 case RTE_CRYPTO_AUTH_SHA1_HMAC:
513 case RTE_CRYPTO_AUTH_SHA224_HMAC:
514 case RTE_CRYPTO_AUTH_SHA256_HMAC:
515 case RTE_CRYPTO_AUTH_SHA384_HMAC:
516 case RTE_CRYPTO_AUTH_SHA512_HMAC:
517 shared_desc_len = cnstr_shdsc_hmac(
519 swap, SHR_NEVER, &alginfo_a,
523 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
524 shared_desc_len = cnstr_shdsc_snow_f9(
525 cdb->sh_desc, true, swap,
530 case RTE_CRYPTO_AUTH_ZUC_EIA3:
531 shared_desc_len = cnstr_shdsc_zuca(
532 cdb->sh_desc, true, swap,
538 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
542 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
543 DPAA_SEC_ERR("not supported aead alg");
546 alginfo.key = (size_t)ses->aead_key.data;
547 alginfo.keylen = ses->aead_key.length;
548 alginfo.key_enc_flags = 0;
549 alginfo.key_type = RTA_DATA_IMM;
550 alginfo.algtype = ses->aead_key.alg;
551 alginfo.algmode = ses->aead_key.algmode;
553 if (ses->dir == DIR_ENC)
554 shared_desc_len = cnstr_shdsc_gcm_encap(
555 cdb->sh_desc, true, swap, SHR_NEVER,
560 shared_desc_len = cnstr_shdsc_gcm_decap(
561 cdb->sh_desc, true, swap, SHR_NEVER,
566 case DPAA_SEC_CIPHER_HASH:
567 alginfo_c.key = (size_t)ses->cipher_key.data;
568 alginfo_c.keylen = ses->cipher_key.length;
569 alginfo_c.key_enc_flags = 0;
570 alginfo_c.key_type = RTA_DATA_IMM;
571 alginfo_c.algtype = ses->cipher_key.alg;
572 alginfo_c.algmode = ses->cipher_key.algmode;
574 alginfo_a.key = (size_t)ses->auth_key.data;
575 alginfo_a.keylen = ses->auth_key.length;
576 alginfo_a.key_enc_flags = 0;
577 alginfo_a.key_type = RTA_DATA_IMM;
578 alginfo_a.algtype = ses->auth_key.alg;
579 alginfo_a.algmode = ses->auth_key.algmode;
581 cdb->sh_desc[0] = alginfo_c.keylen;
582 cdb->sh_desc[1] = alginfo_a.keylen;
583 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
585 (unsigned int *)cdb->sh_desc,
586 &cdb->sh_desc[2], 2);
589 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
592 if (cdb->sh_desc[2] & 1)
593 alginfo_c.key_type = RTA_DATA_IMM;
595 alginfo_c.key = (size_t)dpaa_mem_vtop(
596 (void *)(size_t)alginfo_c.key);
597 alginfo_c.key_type = RTA_DATA_PTR;
599 if (cdb->sh_desc[2] & (1<<1))
600 alginfo_a.key_type = RTA_DATA_IMM;
602 alginfo_a.key = (size_t)dpaa_mem_vtop(
603 (void *)(size_t)alginfo_a.key);
604 alginfo_a.key_type = RTA_DATA_PTR;
609 /* Auth_only_len is set as 0 here and it will be
610 * overwritten in fd for each packet.
612 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
613 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
615 ses->digest_length, ses->dir);
617 case DPAA_SEC_HASH_CIPHER:
619 DPAA_SEC_ERR("error: Unsupported session");
623 if (shared_desc_len < 0) {
624 DPAA_SEC_ERR("error in preparing command block");
625 return shared_desc_len;
628 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
629 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
630 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
635 /* qp is lockless, should be accessed by only one thread */
637 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
640 unsigned int pkts = 0;
641 int num_rx_bufs, ret;
642 struct qm_dqrr_entry *dq;
643 uint32_t vdqcr_flags = 0;
647 * Until request for four buffers, we provide exact number of buffers.
648 * Otherwise we do not set the QM_VDQCR_EXACT flag.
649 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
650 * requested, so we request two less in this case.
653 vdqcr_flags = QM_VDQCR_EXACT;
654 num_rx_bufs = nb_ops;
656 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
657 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
659 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
664 const struct qm_fd *fd;
665 struct dpaa_sec_job *job;
666 struct dpaa_sec_op_ctx *ctx;
667 struct rte_crypto_op *op;
669 dq = qman_dequeue(fq);
674 /* sg is embedded in an op ctx,
675 * sg[0] is for output
678 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
680 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
681 ctx->fd_status = fd->status;
683 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
684 struct qm_sg_entry *sg_out;
686 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
687 op->sym->m_src : op->sym->m_dst;
689 sg_out = &job->sg[0];
690 hw_sg_to_cpu(sg_out);
691 len = sg_out->length;
693 while (mbuf->next != NULL) {
694 len -= mbuf->data_len;
697 mbuf->data_len = len;
699 if (!ctx->fd_status) {
700 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
702 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
703 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
707 /* report op status to sym->op and then free the ctx memeory */
708 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
710 qman_dqrr_consume(fq, dq);
711 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
716 static inline struct dpaa_sec_job *
717 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
719 struct rte_crypto_sym_op *sym = op->sym;
720 struct rte_mbuf *mbuf = sym->m_src;
721 struct dpaa_sec_job *cf;
722 struct dpaa_sec_op_ctx *ctx;
723 struct qm_sg_entry *sg, *out_sg, *in_sg;
724 phys_addr_t start_addr;
725 uint8_t *old_digest, extra_segs;
726 int data_len, data_offset;
728 data_len = sym->auth.data.length;
729 data_offset = sym->auth.data.offset;
731 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
732 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
733 if ((data_len & 7) || (data_offset & 7)) {
734 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
738 data_len = data_len >> 3;
739 data_offset = data_offset >> 3;
747 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
748 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
752 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
758 old_digest = ctx->digest;
762 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
763 out_sg->length = ses->digest_length;
764 cpu_to_hw_sg(out_sg);
768 /* need to extend the input to a compound frame */
769 in_sg->extension = 1;
771 in_sg->length = data_len;
772 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
777 if (ses->iv.length) {
780 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
783 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
784 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
786 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
787 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
790 sg->length = ses->iv.length;
792 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
793 in_sg->length += sg->length;
798 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
799 sg->offset = data_offset;
801 if (data_len <= (mbuf->data_len - data_offset)) {
802 sg->length = data_len;
804 sg->length = mbuf->data_len - data_offset;
806 /* remaining i/p segs */
807 while ((data_len = data_len - sg->length) &&
808 (mbuf = mbuf->next)) {
811 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
812 if (data_len > mbuf->data_len)
813 sg->length = mbuf->data_len;
815 sg->length = data_len;
819 if (is_decode(ses)) {
820 /* Digest verification case */
823 rte_memcpy(old_digest, sym->auth.digest.data,
825 start_addr = dpaa_mem_vtop(old_digest);
826 qm_sg_entry_set64(sg, start_addr);
827 sg->length = ses->digest_length;
828 in_sg->length += ses->digest_length;
839 * |<----data_len------->|
840 * |ip_header|ah_header|icv|payload|
845 static inline struct dpaa_sec_job *
846 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
848 struct rte_crypto_sym_op *sym = op->sym;
849 struct rte_mbuf *mbuf = sym->m_src;
850 struct dpaa_sec_job *cf;
851 struct dpaa_sec_op_ctx *ctx;
852 struct qm_sg_entry *sg, *in_sg;
853 rte_iova_t start_addr;
855 int data_len, data_offset;
857 data_len = sym->auth.data.length;
858 data_offset = sym->auth.data.offset;
860 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
861 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
862 if ((data_len & 7) || (data_offset & 7)) {
863 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
867 data_len = data_len >> 3;
868 data_offset = data_offset >> 3;
871 ctx = dpaa_sec_alloc_ctx(ses, 4);
877 old_digest = ctx->digest;
879 start_addr = rte_pktmbuf_iova(mbuf);
882 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
883 sg->length = ses->digest_length;
888 /* need to extend the input to a compound frame */
889 in_sg->extension = 1;
891 in_sg->length = data_len;
892 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
895 if (ses->iv.length) {
898 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
901 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
902 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
904 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
905 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
908 sg->length = ses->iv.length;
910 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
911 in_sg->length += sg->length;
916 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
917 sg->offset = data_offset;
918 sg->length = data_len;
920 if (is_decode(ses)) {
921 /* Digest verification case */
923 /* hash result or digest, save digest first */
924 rte_memcpy(old_digest, sym->auth.digest.data,
926 /* let's check digest by hw */
927 start_addr = dpaa_mem_vtop(old_digest);
929 qm_sg_entry_set64(sg, start_addr);
930 sg->length = ses->digest_length;
931 in_sg->length += ses->digest_length;
940 static inline struct dpaa_sec_job *
941 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
943 struct rte_crypto_sym_op *sym = op->sym;
944 struct dpaa_sec_job *cf;
945 struct dpaa_sec_op_ctx *ctx;
946 struct qm_sg_entry *sg, *out_sg, *in_sg;
947 struct rte_mbuf *mbuf;
949 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
951 int data_len, data_offset;
953 data_len = sym->cipher.data.length;
954 data_offset = sym->cipher.data.offset;
956 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
957 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
958 if ((data_len & 7) || (data_offset & 7)) {
959 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
963 data_len = data_len >> 3;
964 data_offset = data_offset >> 3;
969 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
972 req_segs = mbuf->nb_segs * 2 + 3;
974 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
975 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
980 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
989 out_sg->extension = 1;
990 out_sg->length = data_len;
991 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
992 cpu_to_hw_sg(out_sg);
996 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
997 sg->length = mbuf->data_len - data_offset;
998 sg->offset = data_offset;
1000 /* Successive segs */
1005 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1006 sg->length = mbuf->data_len;
1015 in_sg->extension = 1;
1017 in_sg->length = data_len + ses->iv.length;
1020 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1021 cpu_to_hw_sg(in_sg);
1024 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1025 sg->length = ses->iv.length;
1030 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1031 sg->length = mbuf->data_len - data_offset;
1032 sg->offset = data_offset;
1034 /* Successive segs */
1039 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1040 sg->length = mbuf->data_len;
1049 static inline struct dpaa_sec_job *
1050 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1052 struct rte_crypto_sym_op *sym = op->sym;
1053 struct dpaa_sec_job *cf;
1054 struct dpaa_sec_op_ctx *ctx;
1055 struct qm_sg_entry *sg;
1056 rte_iova_t src_start_addr, dst_start_addr;
1057 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1059 int data_len, data_offset;
1061 data_len = sym->cipher.data.length;
1062 data_offset = sym->cipher.data.offset;
1064 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1065 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1066 if ((data_len & 7) || (data_offset & 7)) {
1067 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1071 data_len = data_len >> 3;
1072 data_offset = data_offset >> 3;
1075 ctx = dpaa_sec_alloc_ctx(ses, 4);
1082 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1085 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1087 dst_start_addr = src_start_addr;
1091 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1092 sg->length = data_len + ses->iv.length;
1098 /* need to extend the input to a compound frame */
1101 sg->length = data_len + ses->iv.length;
1102 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1106 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1107 sg->length = ses->iv.length;
1111 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1112 sg->length = data_len;
1119 static inline struct dpaa_sec_job *
1120 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1122 struct rte_crypto_sym_op *sym = op->sym;
1123 struct dpaa_sec_job *cf;
1124 struct dpaa_sec_op_ctx *ctx;
1125 struct qm_sg_entry *sg, *out_sg, *in_sg;
1126 struct rte_mbuf *mbuf;
1128 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1133 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1136 req_segs = mbuf->nb_segs * 2 + 4;
1139 if (ses->auth_only_len)
1142 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1143 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1148 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1155 rte_prefetch0(cf->sg);
1158 out_sg = &cf->sg[0];
1159 out_sg->extension = 1;
1161 out_sg->length = sym->aead.data.length + ses->digest_length;
1163 out_sg->length = sym->aead.data.length;
1165 /* output sg entries */
1167 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1168 cpu_to_hw_sg(out_sg);
1171 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1172 sg->length = mbuf->data_len - sym->aead.data.offset;
1173 sg->offset = sym->aead.data.offset;
1175 /* Successive segs */
1180 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1181 sg->length = mbuf->data_len;
1184 sg->length -= ses->digest_length;
1186 if (is_encode(ses)) {
1188 /* set auth output */
1190 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1191 sg->length = ses->digest_length;
1199 in_sg->extension = 1;
1202 in_sg->length = ses->iv.length + sym->aead.data.length
1203 + ses->auth_only_len;
1205 in_sg->length = ses->iv.length + sym->aead.data.length
1206 + ses->auth_only_len + ses->digest_length;
1208 /* input sg entries */
1210 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1211 cpu_to_hw_sg(in_sg);
1214 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1215 sg->length = ses->iv.length;
1218 /* 2nd seg auth only */
1219 if (ses->auth_only_len) {
1221 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1222 sg->length = ses->auth_only_len;
1228 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1229 sg->length = mbuf->data_len - sym->aead.data.offset;
1230 sg->offset = sym->aead.data.offset;
1232 /* Successive segs */
1237 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1238 sg->length = mbuf->data_len;
1242 if (is_decode(ses)) {
1245 memcpy(ctx->digest, sym->aead.digest.data,
1246 ses->digest_length);
1247 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1248 sg->length = ses->digest_length;
1256 static inline struct dpaa_sec_job *
1257 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1259 struct rte_crypto_sym_op *sym = op->sym;
1260 struct dpaa_sec_job *cf;
1261 struct dpaa_sec_op_ctx *ctx;
1262 struct qm_sg_entry *sg;
1263 uint32_t length = 0;
1264 rte_iova_t src_start_addr, dst_start_addr;
1265 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1268 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1271 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1273 dst_start_addr = src_start_addr;
1275 ctx = dpaa_sec_alloc_ctx(ses, 7);
1283 rte_prefetch0(cf->sg);
1285 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1286 if (is_encode(ses)) {
1287 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1288 sg->length = ses->iv.length;
1289 length += sg->length;
1293 if (ses->auth_only_len) {
1294 qm_sg_entry_set64(sg,
1295 dpaa_mem_vtop(sym->aead.aad.data));
1296 sg->length = ses->auth_only_len;
1297 length += sg->length;
1301 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1302 sg->length = sym->aead.data.length;
1303 length += sg->length;
1307 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1308 sg->length = ses->iv.length;
1309 length += sg->length;
1313 if (ses->auth_only_len) {
1314 qm_sg_entry_set64(sg,
1315 dpaa_mem_vtop(sym->aead.aad.data));
1316 sg->length = ses->auth_only_len;
1317 length += sg->length;
1321 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1322 sg->length = sym->aead.data.length;
1323 length += sg->length;
1326 memcpy(ctx->digest, sym->aead.digest.data,
1327 ses->digest_length);
1330 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1331 sg->length = ses->digest_length;
1332 length += sg->length;
1336 /* input compound frame */
1337 cf->sg[1].length = length;
1338 cf->sg[1].extension = 1;
1339 cf->sg[1].final = 1;
1340 cpu_to_hw_sg(&cf->sg[1]);
1344 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1345 qm_sg_entry_set64(sg,
1346 dst_start_addr + sym->aead.data.offset);
1347 sg->length = sym->aead.data.length;
1348 length = sg->length;
1349 if (is_encode(ses)) {
1351 /* set auth output */
1353 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1354 sg->length = ses->digest_length;
1355 length += sg->length;
1360 /* output compound frame */
1361 cf->sg[0].length = length;
1362 cf->sg[0].extension = 1;
1363 cpu_to_hw_sg(&cf->sg[0]);
1368 static inline struct dpaa_sec_job *
1369 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1371 struct rte_crypto_sym_op *sym = op->sym;
1372 struct dpaa_sec_job *cf;
1373 struct dpaa_sec_op_ctx *ctx;
1374 struct qm_sg_entry *sg, *out_sg, *in_sg;
1375 struct rte_mbuf *mbuf;
1377 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1382 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1385 req_segs = mbuf->nb_segs * 2 + 4;
1388 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1389 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1394 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1401 rte_prefetch0(cf->sg);
1404 out_sg = &cf->sg[0];
1405 out_sg->extension = 1;
1407 out_sg->length = sym->auth.data.length + ses->digest_length;
1409 out_sg->length = sym->auth.data.length;
1411 /* output sg entries */
1413 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1414 cpu_to_hw_sg(out_sg);
1417 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1418 sg->length = mbuf->data_len - sym->auth.data.offset;
1419 sg->offset = sym->auth.data.offset;
1421 /* Successive segs */
1426 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1427 sg->length = mbuf->data_len;
1430 sg->length -= ses->digest_length;
1432 if (is_encode(ses)) {
1434 /* set auth output */
1436 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1437 sg->length = ses->digest_length;
1445 in_sg->extension = 1;
1448 in_sg->length = ses->iv.length + sym->auth.data.length;
1450 in_sg->length = ses->iv.length + sym->auth.data.length
1451 + ses->digest_length;
1453 /* input sg entries */
1455 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1456 cpu_to_hw_sg(in_sg);
1459 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1460 sg->length = ses->iv.length;
1465 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1466 sg->length = mbuf->data_len - sym->auth.data.offset;
1467 sg->offset = sym->auth.data.offset;
1469 /* Successive segs */
1474 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1475 sg->length = mbuf->data_len;
1479 sg->length -= ses->digest_length;
1480 if (is_decode(ses)) {
1483 memcpy(ctx->digest, sym->auth.digest.data,
1484 ses->digest_length);
1485 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1486 sg->length = ses->digest_length;
1494 static inline struct dpaa_sec_job *
1495 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1497 struct rte_crypto_sym_op *sym = op->sym;
1498 struct dpaa_sec_job *cf;
1499 struct dpaa_sec_op_ctx *ctx;
1500 struct qm_sg_entry *sg;
1501 rte_iova_t src_start_addr, dst_start_addr;
1502 uint32_t length = 0;
1503 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1506 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1508 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1510 dst_start_addr = src_start_addr;
1512 ctx = dpaa_sec_alloc_ctx(ses, 7);
1520 rte_prefetch0(cf->sg);
1522 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1523 if (is_encode(ses)) {
1524 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1525 sg->length = ses->iv.length;
1526 length += sg->length;
1530 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1531 sg->length = sym->auth.data.length;
1532 length += sg->length;
1536 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1537 sg->length = ses->iv.length;
1538 length += sg->length;
1543 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1544 sg->length = sym->auth.data.length;
1545 length += sg->length;
1548 memcpy(ctx->digest, sym->auth.digest.data,
1549 ses->digest_length);
1552 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1553 sg->length = ses->digest_length;
1554 length += sg->length;
1558 /* input compound frame */
1559 cf->sg[1].length = length;
1560 cf->sg[1].extension = 1;
1561 cf->sg[1].final = 1;
1562 cpu_to_hw_sg(&cf->sg[1]);
1566 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1567 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1568 sg->length = sym->cipher.data.length;
1569 length = sg->length;
1570 if (is_encode(ses)) {
1572 /* set auth output */
1574 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1575 sg->length = ses->digest_length;
1576 length += sg->length;
1581 /* output compound frame */
1582 cf->sg[0].length = length;
1583 cf->sg[0].extension = 1;
1584 cpu_to_hw_sg(&cf->sg[0]);
1589 #ifdef RTE_LIBRTE_SECURITY
1590 static inline struct dpaa_sec_job *
1591 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1593 struct rte_crypto_sym_op *sym = op->sym;
1594 struct dpaa_sec_job *cf;
1595 struct dpaa_sec_op_ctx *ctx;
1596 struct qm_sg_entry *sg;
1597 phys_addr_t src_start_addr, dst_start_addr;
1599 ctx = dpaa_sec_alloc_ctx(ses, 2);
1605 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1608 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1610 dst_start_addr = src_start_addr;
1614 qm_sg_entry_set64(sg, src_start_addr);
1615 sg->length = sym->m_src->pkt_len;
1619 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1622 qm_sg_entry_set64(sg, dst_start_addr);
1623 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1629 static inline struct dpaa_sec_job *
1630 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1632 struct rte_crypto_sym_op *sym = op->sym;
1633 struct dpaa_sec_job *cf;
1634 struct dpaa_sec_op_ctx *ctx;
1635 struct qm_sg_entry *sg, *out_sg, *in_sg;
1636 struct rte_mbuf *mbuf;
1638 uint32_t in_len = 0, out_len = 0;
1645 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1646 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1647 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1652 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1658 out_sg = &cf->sg[0];
1659 out_sg->extension = 1;
1660 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1664 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1667 /* Successive segs */
1668 while (mbuf->next) {
1669 sg->length = mbuf->data_len;
1670 out_len += sg->length;
1674 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1677 sg->length = mbuf->buf_len - mbuf->data_off;
1678 out_len += sg->length;
1682 out_sg->length = out_len;
1683 cpu_to_hw_sg(out_sg);
1688 in_sg->extension = 1;
1690 in_len = mbuf->data_len;
1693 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1696 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1697 sg->length = mbuf->data_len;
1700 /* Successive segs */
1705 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1706 sg->length = mbuf->data_len;
1708 in_len += sg->length;
1714 in_sg->length = in_len;
1715 cpu_to_hw_sg(in_sg);
1717 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1724 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1727 /* Function to transmit the frames to given device and queuepair */
1729 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1730 uint16_t num_tx = 0;
1731 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1732 uint32_t frames_to_send;
1733 struct rte_crypto_op *op;
1734 struct dpaa_sec_job *cf;
1735 dpaa_sec_session *ses;
1736 uint16_t auth_hdr_len, auth_tail_len;
1737 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1738 struct qman_fq *inq[DPAA_SEC_BURST];
1741 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1742 DPAA_SEC_BURST : nb_ops;
1743 for (loop = 0; loop < frames_to_send; loop++) {
1745 if (op->sym->m_src->seqn != 0) {
1746 index = op->sym->m_src->seqn - 1;
1747 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1748 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1749 flags[loop] = ((index & 0x0f) << 8);
1750 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1751 DPAA_PER_LCORE_DQRR_SIZE--;
1752 DPAA_PER_LCORE_DQRR_HELD &=
1757 switch (op->sess_type) {
1758 case RTE_CRYPTO_OP_WITH_SESSION:
1759 ses = (dpaa_sec_session *)
1760 get_sym_session_private_data(
1762 cryptodev_driver_id);
1764 #ifdef RTE_LIBRTE_SECURITY
1765 case RTE_CRYPTO_OP_SECURITY_SESSION:
1766 ses = (dpaa_sec_session *)
1767 get_sec_session_private_data(
1768 op->sym->sec_session);
1773 "sessionless crypto op not supported");
1774 frames_to_send = loop;
1780 DPAA_SEC_DP_ERR("session not available");
1781 frames_to_send = loop;
1786 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1787 if (dpaa_sec_attach_sess_q(qp, ses)) {
1788 frames_to_send = loop;
1792 } else if (unlikely(ses->qp[rte_lcore_id() %
1793 MAX_DPAA_CORES] != qp)) {
1794 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1796 ses->qp[rte_lcore_id() %
1797 MAX_DPAA_CORES], qp);
1798 frames_to_send = loop;
1803 auth_hdr_len = op->sym->auth.data.length -
1804 op->sym->cipher.data.length;
1807 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1808 ((op->sym->m_dst == NULL) ||
1809 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1810 switch (ses->ctxt) {
1811 #ifdef RTE_LIBRTE_SECURITY
1813 case DPAA_SEC_IPSEC:
1814 cf = build_proto(op, ses);
1818 cf = build_auth_only(op, ses);
1820 case DPAA_SEC_CIPHER:
1821 cf = build_cipher_only(op, ses);
1824 cf = build_cipher_auth_gcm(op, ses);
1825 auth_hdr_len = ses->auth_only_len;
1827 case DPAA_SEC_CIPHER_HASH:
1829 op->sym->cipher.data.offset
1830 - op->sym->auth.data.offset;
1832 op->sym->auth.data.length
1833 - op->sym->cipher.data.length
1835 cf = build_cipher_auth(op, ses);
1838 DPAA_SEC_DP_ERR("not supported ops");
1839 frames_to_send = loop;
1844 switch (ses->ctxt) {
1845 #ifdef RTE_LIBRTE_SECURITY
1847 case DPAA_SEC_IPSEC:
1848 cf = build_proto_sg(op, ses);
1852 cf = build_auth_only_sg(op, ses);
1854 case DPAA_SEC_CIPHER:
1855 cf = build_cipher_only_sg(op, ses);
1858 cf = build_cipher_auth_gcm_sg(op, ses);
1859 auth_hdr_len = ses->auth_only_len;
1861 case DPAA_SEC_CIPHER_HASH:
1863 op->sym->cipher.data.offset
1864 - op->sym->auth.data.offset;
1866 op->sym->auth.data.length
1867 - op->sym->cipher.data.length
1869 cf = build_cipher_auth_sg(op, ses);
1872 DPAA_SEC_DP_ERR("not supported ops");
1873 frames_to_send = loop;
1878 if (unlikely(!cf)) {
1879 frames_to_send = loop;
1885 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1886 fd->opaque_addr = 0;
1888 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1889 fd->_format1 = qm_fd_compound;
1890 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1892 /* Auth_only_len is set as 0 in descriptor and it is
1893 * overwritten here in the fd.cmd which will update
1896 if (auth_hdr_len || auth_tail_len) {
1897 fd->cmd = 0x80000000;
1899 ((auth_tail_len << 16) | auth_hdr_len);
1902 #ifdef RTE_LIBRTE_SECURITY
1903 /* In case of PDCP, per packet HFN is stored in
1904 * mbuf priv after sym_op.
1906 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1907 fd->cmd = 0x80000000 |
1908 *((uint32_t *)((uint8_t *)op +
1909 ses->pdcp.hfn_ovd_offset));
1910 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1911 *((uint32_t *)((uint8_t *)op +
1912 ses->pdcp.hfn_ovd_offset)),
1919 while (loop < frames_to_send) {
1920 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1921 &flags[loop], frames_to_send - loop);
1923 nb_ops -= frames_to_send;
1924 num_tx += frames_to_send;
1927 dpaa_qp->tx_pkts += num_tx;
1928 dpaa_qp->tx_errs += nb_ops - num_tx;
1934 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1938 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1940 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1942 dpaa_qp->rx_pkts += num_rx;
1943 dpaa_qp->rx_errs += nb_ops - num_rx;
1945 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1950 /** Release queue pair */
1952 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1955 struct dpaa_sec_dev_private *internals;
1956 struct dpaa_sec_qp *qp = NULL;
1958 PMD_INIT_FUNC_TRACE();
1960 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1962 internals = dev->data->dev_private;
1963 if (qp_id >= internals->max_nb_queue_pairs) {
1964 DPAA_SEC_ERR("Max supported qpid %d",
1965 internals->max_nb_queue_pairs);
1969 qp = &internals->qps[qp_id];
1970 rte_mempool_free(qp->ctx_pool);
1971 qp->internals = NULL;
1972 dev->data->queue_pairs[qp_id] = NULL;
1977 /** Setup a queue pair */
1979 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1980 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1981 __rte_unused int socket_id)
1983 struct dpaa_sec_dev_private *internals;
1984 struct dpaa_sec_qp *qp = NULL;
1987 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1989 internals = dev->data->dev_private;
1990 if (qp_id >= internals->max_nb_queue_pairs) {
1991 DPAA_SEC_ERR("Max supported qpid %d",
1992 internals->max_nb_queue_pairs);
1996 qp = &internals->qps[qp_id];
1997 qp->internals = internals;
1998 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1999 dev->data->dev_id, qp_id);
2000 if (!qp->ctx_pool) {
2001 qp->ctx_pool = rte_mempool_create((const char *)str,
2004 CTX_POOL_CACHE_SIZE, 0,
2005 NULL, NULL, NULL, NULL,
2007 if (!qp->ctx_pool) {
2008 DPAA_SEC_ERR("%s create failed\n", str);
2012 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2013 dev->data->dev_id, qp_id);
2014 dev->data->queue_pairs[qp_id] = qp;
2019 /** Return the number of allocated queue pairs */
2021 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2023 PMD_INIT_FUNC_TRACE();
2025 return dev->data->nb_queue_pairs;
2028 /** Returns the size of session structure */
2030 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2032 PMD_INIT_FUNC_TRACE();
2034 return sizeof(dpaa_sec_session);
2038 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2039 struct rte_crypto_sym_xform *xform,
2040 dpaa_sec_session *session)
2042 session->ctxt = DPAA_SEC_CIPHER;
2043 session->cipher_alg = xform->cipher.algo;
2044 session->iv.length = xform->cipher.iv.length;
2045 session->iv.offset = xform->cipher.iv.offset;
2046 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2047 RTE_CACHE_LINE_SIZE);
2048 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2049 DPAA_SEC_ERR("No Memory for cipher key");
2052 session->cipher_key.length = xform->cipher.key.length;
2054 memcpy(session->cipher_key.data, xform->cipher.key.data,
2055 xform->cipher.key.length);
2056 switch (xform->cipher.algo) {
2057 case RTE_CRYPTO_CIPHER_AES_CBC:
2058 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2059 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2061 case RTE_CRYPTO_CIPHER_3DES_CBC:
2062 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2063 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2065 case RTE_CRYPTO_CIPHER_AES_CTR:
2066 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2067 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2069 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2070 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2072 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2073 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2076 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2077 xform->cipher.algo);
2078 rte_free(session->cipher_key.data);
2081 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2088 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2089 struct rte_crypto_sym_xform *xform,
2090 dpaa_sec_session *session)
2092 session->ctxt = DPAA_SEC_AUTH;
2093 session->auth_alg = xform->auth.algo;
2094 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2095 RTE_CACHE_LINE_SIZE);
2096 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2097 DPAA_SEC_ERR("No Memory for auth key");
2100 session->auth_key.length = xform->auth.key.length;
2101 session->digest_length = xform->auth.digest_length;
2102 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2103 session->iv.offset = xform->auth.iv.offset;
2104 session->iv.length = xform->auth.iv.length;
2107 memcpy(session->auth_key.data, xform->auth.key.data,
2108 xform->auth.key.length);
2110 switch (xform->auth.algo) {
2111 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2112 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2113 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2115 case RTE_CRYPTO_AUTH_MD5_HMAC:
2116 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2117 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2119 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2120 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2121 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2123 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2124 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2125 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2127 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2128 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2129 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2131 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2132 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2133 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2135 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2136 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2137 session->auth_key.algmode = OP_ALG_AAI_F9;
2139 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2140 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2141 session->auth_key.algmode = OP_ALG_AAI_F9;
2144 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2146 rte_free(session->auth_key.data);
2150 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2157 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2158 struct rte_crypto_sym_xform *xform,
2159 dpaa_sec_session *session)
2162 struct rte_crypto_cipher_xform *cipher_xform;
2163 struct rte_crypto_auth_xform *auth_xform;
2165 session->ctxt = DPAA_SEC_CIPHER_HASH;
2166 if (session->auth_cipher_text) {
2167 cipher_xform = &xform->cipher;
2168 auth_xform = &xform->next->auth;
2170 cipher_xform = &xform->next->cipher;
2171 auth_xform = &xform->auth;
2174 /* Set IV parameters */
2175 session->iv.offset = cipher_xform->iv.offset;
2176 session->iv.length = cipher_xform->iv.length;
2178 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2179 RTE_CACHE_LINE_SIZE);
2180 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2181 DPAA_SEC_ERR("No Memory for cipher key");
2184 session->cipher_key.length = cipher_xform->key.length;
2185 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2186 RTE_CACHE_LINE_SIZE);
2187 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2188 DPAA_SEC_ERR("No Memory for auth key");
2189 rte_free(session->cipher_key.data);
2192 session->auth_key.length = auth_xform->key.length;
2193 memcpy(session->cipher_key.data, cipher_xform->key.data,
2194 cipher_xform->key.length);
2195 memcpy(session->auth_key.data, auth_xform->key.data,
2196 auth_xform->key.length);
2198 session->digest_length = auth_xform->digest_length;
2199 session->auth_alg = auth_xform->algo;
2201 switch (auth_xform->algo) {
2202 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2203 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2204 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2206 case RTE_CRYPTO_AUTH_MD5_HMAC:
2207 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2208 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2210 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2211 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2212 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2214 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2215 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2216 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2218 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2219 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2220 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2222 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2223 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2224 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2227 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2232 session->cipher_alg = cipher_xform->algo;
2234 switch (cipher_xform->algo) {
2235 case RTE_CRYPTO_CIPHER_AES_CBC:
2236 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2237 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2239 case RTE_CRYPTO_CIPHER_3DES_CBC:
2240 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2241 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2243 case RTE_CRYPTO_CIPHER_AES_CTR:
2244 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2245 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2248 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2249 cipher_xform->algo);
2252 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2257 rte_free(session->cipher_key.data);
2258 rte_free(session->auth_key.data);
2263 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2264 struct rte_crypto_sym_xform *xform,
2265 dpaa_sec_session *session)
2267 session->aead_alg = xform->aead.algo;
2268 session->ctxt = DPAA_SEC_AEAD;
2269 session->iv.length = xform->aead.iv.length;
2270 session->iv.offset = xform->aead.iv.offset;
2271 session->auth_only_len = xform->aead.aad_length;
2272 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2273 RTE_CACHE_LINE_SIZE);
2274 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2275 DPAA_SEC_ERR("No Memory for aead key\n");
2278 session->aead_key.length = xform->aead.key.length;
2279 session->digest_length = xform->aead.digest_length;
2281 memcpy(session->aead_key.data, xform->aead.key.data,
2282 xform->aead.key.length);
2284 switch (session->aead_alg) {
2285 case RTE_CRYPTO_AEAD_AES_GCM:
2286 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2287 session->aead_key.algmode = OP_ALG_AAI_GCM;
2290 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2291 rte_free(session->aead_key.data);
2295 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2301 static struct qman_fq *
2302 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2306 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2307 if (qi->inq_attach[i] == 0) {
2308 qi->inq_attach[i] = 1;
2312 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2318 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2322 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2323 if (&qi->inq[i] == fq) {
2324 if (qman_retire_fq(fq, NULL) != 0)
2325 DPAA_SEC_WARN("Queue is not retired\n");
2327 qi->inq_attach[i] = 0;
2335 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2339 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2340 ret = dpaa_sec_prep_cdb(sess);
2342 DPAA_SEC_ERR("Unable to prepare sec cdb");
2345 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2346 ret = rte_dpaa_portal_init((void *)0);
2348 DPAA_SEC_ERR("Failure in affining portal");
2352 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2353 dpaa_mem_vtop(&sess->cdb),
2354 qman_fq_fqid(&qp->outq));
2356 DPAA_SEC_ERR("Unable to init sec queue");
2362 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2363 struct rte_crypto_sym_xform *xform, void *sess)
2365 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2366 dpaa_sec_session *session = sess;
2370 PMD_INIT_FUNC_TRACE();
2372 if (unlikely(sess == NULL)) {
2373 DPAA_SEC_ERR("invalid session struct");
2376 memset(session, 0, sizeof(dpaa_sec_session));
2378 /* Default IV length = 0 */
2379 session->iv.length = 0;
2382 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2383 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2384 ret = dpaa_sec_cipher_init(dev, xform, session);
2386 /* Authentication Only */
2387 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2388 xform->next == NULL) {
2389 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2390 session->ctxt = DPAA_SEC_AUTH;
2391 ret = dpaa_sec_auth_init(dev, xform, session);
2393 /* Cipher then Authenticate */
2394 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2395 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2396 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2397 session->auth_cipher_text = 1;
2398 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2399 ret = dpaa_sec_auth_init(dev, xform, session);
2400 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2401 ret = dpaa_sec_cipher_init(dev, xform, session);
2403 ret = dpaa_sec_chain_init(dev, xform, session);
2405 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2408 /* Authenticate then Cipher */
2409 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2410 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2411 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2412 session->auth_cipher_text = 0;
2413 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2414 ret = dpaa_sec_cipher_init(dev, xform, session);
2415 else if (xform->next->cipher.algo
2416 == RTE_CRYPTO_CIPHER_NULL)
2417 ret = dpaa_sec_auth_init(dev, xform, session);
2419 ret = dpaa_sec_chain_init(dev, xform, session);
2421 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2425 /* AEAD operation for AES-GCM kind of Algorithms */
2426 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2427 xform->next == NULL) {
2428 ret = dpaa_sec_aead_init(dev, xform, session);
2431 DPAA_SEC_ERR("Invalid crypto type");
2435 DPAA_SEC_ERR("unable to init session");
2439 rte_spinlock_lock(&internals->lock);
2440 for (i = 0; i < MAX_DPAA_CORES; i++) {
2441 session->inq[i] = dpaa_sec_attach_rxq(internals);
2442 if (session->inq[i] == NULL) {
2443 DPAA_SEC_ERR("unable to attach sec queue");
2444 rte_spinlock_unlock(&internals->lock);
2448 rte_spinlock_unlock(&internals->lock);
2453 rte_free(session->cipher_key.data);
2454 rte_free(session->auth_key.data);
2455 memset(session, 0, sizeof(dpaa_sec_session));
2461 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2462 struct rte_crypto_sym_xform *xform,
2463 struct rte_cryptodev_sym_session *sess,
2464 struct rte_mempool *mempool)
2466 void *sess_private_data;
2469 PMD_INIT_FUNC_TRACE();
2471 if (rte_mempool_get(mempool, &sess_private_data)) {
2472 DPAA_SEC_ERR("Couldn't get object from session mempool");
2476 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2478 DPAA_SEC_ERR("failed to configure session parameters");
2480 /* Return session to mempool */
2481 rte_mempool_put(mempool, sess_private_data);
2485 set_sym_session_private_data(sess, dev->driver_id,
2493 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2495 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2496 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2499 for (i = 0; i < MAX_DPAA_CORES; i++) {
2501 dpaa_sec_detach_rxq(qi, s->inq[i]);
2505 rte_free(s->cipher_key.data);
2506 rte_free(s->auth_key.data);
2507 memset(s, 0, sizeof(dpaa_sec_session));
2508 rte_mempool_put(sess_mp, (void *)s);
2511 /** Clear the memory of session so it doesn't leave key material behind */
2513 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2514 struct rte_cryptodev_sym_session *sess)
2516 PMD_INIT_FUNC_TRACE();
2517 uint8_t index = dev->driver_id;
2518 void *sess_priv = get_sym_session_private_data(sess, index);
2519 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2522 free_session_memory(dev, s);
2523 set_sym_session_private_data(sess, index, NULL);
2527 #ifdef RTE_LIBRTE_SECURITY
2529 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2530 struct rte_security_ipsec_xform *ipsec_xform,
2531 dpaa_sec_session *session)
2533 PMD_INIT_FUNC_TRACE();
2535 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2536 RTE_CACHE_LINE_SIZE);
2537 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2538 DPAA_SEC_ERR("No Memory for aead key");
2541 memcpy(session->aead_key.data, aead_xform->key.data,
2542 aead_xform->key.length);
2544 session->digest_length = aead_xform->digest_length;
2545 session->aead_key.length = aead_xform->key.length;
2547 switch (aead_xform->algo) {
2548 case RTE_CRYPTO_AEAD_AES_GCM:
2549 switch (session->digest_length) {
2551 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2554 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2557 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2560 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2561 session->digest_length);
2564 if (session->dir == DIR_ENC) {
2565 memcpy(session->encap_pdb.gcm.salt,
2566 (uint8_t *)&(ipsec_xform->salt), 4);
2568 memcpy(session->decap_pdb.gcm.salt,
2569 (uint8_t *)&(ipsec_xform->salt), 4);
2571 session->aead_key.algmode = OP_ALG_AAI_GCM;
2572 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2575 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2583 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2584 struct rte_crypto_auth_xform *auth_xform,
2585 dpaa_sec_session *session)
2588 session->cipher_key.data = rte_zmalloc(NULL,
2589 cipher_xform->key.length,
2590 RTE_CACHE_LINE_SIZE);
2591 if (session->cipher_key.data == NULL &&
2592 cipher_xform->key.length > 0) {
2593 DPAA_SEC_ERR("No Memory for cipher key");
2597 session->cipher_key.length = cipher_xform->key.length;
2598 memcpy(session->cipher_key.data, cipher_xform->key.data,
2599 cipher_xform->key.length);
2600 session->cipher_alg = cipher_xform->algo;
2602 session->cipher_key.data = NULL;
2603 session->cipher_key.length = 0;
2604 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2608 session->auth_key.data = rte_zmalloc(NULL,
2609 auth_xform->key.length,
2610 RTE_CACHE_LINE_SIZE);
2611 if (session->auth_key.data == NULL &&
2612 auth_xform->key.length > 0) {
2613 DPAA_SEC_ERR("No Memory for auth key");
2616 session->auth_key.length = auth_xform->key.length;
2617 memcpy(session->auth_key.data, auth_xform->key.data,
2618 auth_xform->key.length);
2619 session->auth_alg = auth_xform->algo;
2620 session->digest_length = auth_xform->digest_length;
2622 session->auth_key.data = NULL;
2623 session->auth_key.length = 0;
2624 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2627 switch (session->auth_alg) {
2628 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2629 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2630 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2632 case RTE_CRYPTO_AUTH_MD5_HMAC:
2633 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2634 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2636 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2637 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2638 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2639 if (session->digest_length != 16)
2641 "+++Using sha256-hmac truncated len is non-standard,"
2642 "it will not work with lookaside proto");
2644 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2645 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2646 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2648 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2649 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2650 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2652 case RTE_CRYPTO_AUTH_AES_CMAC:
2653 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2655 case RTE_CRYPTO_AUTH_NULL:
2656 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2658 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2659 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2660 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2661 case RTE_CRYPTO_AUTH_SHA1:
2662 case RTE_CRYPTO_AUTH_SHA256:
2663 case RTE_CRYPTO_AUTH_SHA512:
2664 case RTE_CRYPTO_AUTH_SHA224:
2665 case RTE_CRYPTO_AUTH_SHA384:
2666 case RTE_CRYPTO_AUTH_MD5:
2667 case RTE_CRYPTO_AUTH_AES_GMAC:
2668 case RTE_CRYPTO_AUTH_KASUMI_F9:
2669 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2670 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2671 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2675 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2680 switch (session->cipher_alg) {
2681 case RTE_CRYPTO_CIPHER_AES_CBC:
2682 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2683 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2685 case RTE_CRYPTO_CIPHER_3DES_CBC:
2686 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2687 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2689 case RTE_CRYPTO_CIPHER_AES_CTR:
2690 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2691 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2693 case RTE_CRYPTO_CIPHER_NULL:
2694 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2696 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2697 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2698 case RTE_CRYPTO_CIPHER_3DES_ECB:
2699 case RTE_CRYPTO_CIPHER_AES_ECB:
2700 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2701 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2702 session->cipher_alg);
2705 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2706 session->cipher_alg);
2714 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2715 struct rte_security_session_conf *conf,
2718 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2719 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2720 struct rte_crypto_auth_xform *auth_xform = NULL;
2721 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2722 struct rte_crypto_aead_xform *aead_xform = NULL;
2723 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2727 PMD_INIT_FUNC_TRACE();
2729 memset(session, 0, sizeof(dpaa_sec_session));
2730 session->proto_alg = conf->protocol;
2731 session->ctxt = DPAA_SEC_IPSEC;
2733 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2734 session->dir = DIR_ENC;
2736 session->dir = DIR_DEC;
2738 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2739 cipher_xform = &conf->crypto_xform->cipher;
2740 if (conf->crypto_xform->next)
2741 auth_xform = &conf->crypto_xform->next->auth;
2742 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2744 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2745 auth_xform = &conf->crypto_xform->auth;
2746 if (conf->crypto_xform->next)
2747 cipher_xform = &conf->crypto_xform->next->cipher;
2748 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2750 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2751 aead_xform = &conf->crypto_xform->aead;
2752 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2753 ipsec_xform, session);
2755 DPAA_SEC_ERR("XFORM not specified");
2760 DPAA_SEC_ERR("Failed to process xform");
2764 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2765 if (ipsec_xform->tunnel.type ==
2766 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2767 session->ip4_hdr.ip_v = IPVERSION;
2768 session->ip4_hdr.ip_hl = 5;
2769 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2770 sizeof(session->ip4_hdr));
2771 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2772 session->ip4_hdr.ip_id = 0;
2773 session->ip4_hdr.ip_off = 0;
2774 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2775 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2776 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2777 IPPROTO_ESP : IPPROTO_AH;
2778 session->ip4_hdr.ip_sum = 0;
2779 session->ip4_hdr.ip_src =
2780 ipsec_xform->tunnel.ipv4.src_ip;
2781 session->ip4_hdr.ip_dst =
2782 ipsec_xform->tunnel.ipv4.dst_ip;
2783 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2784 (void *)&session->ip4_hdr,
2786 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2787 } else if (ipsec_xform->tunnel.type ==
2788 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2789 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2790 DPAA_IPv6_DEFAULT_VTC_FLOW |
2791 ((ipsec_xform->tunnel.ipv6.dscp <<
2792 RTE_IPV6_HDR_TC_SHIFT) &
2793 RTE_IPV6_HDR_TC_MASK) |
2794 ((ipsec_xform->tunnel.ipv6.flabel <<
2795 RTE_IPV6_HDR_FL_SHIFT) &
2796 RTE_IPV6_HDR_FL_MASK));
2797 /* Payload length will be updated by HW */
2798 session->ip6_hdr.payload_len = 0;
2799 session->ip6_hdr.hop_limits =
2800 ipsec_xform->tunnel.ipv6.hlimit;
2801 session->ip6_hdr.proto = (ipsec_xform->proto ==
2802 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2803 IPPROTO_ESP : IPPROTO_AH;
2804 memcpy(&session->ip6_hdr.src_addr,
2805 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2806 memcpy(&session->ip6_hdr.dst_addr,
2807 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2808 session->encap_pdb.ip_hdr_len =
2809 sizeof(struct rte_ipv6_hdr);
2811 session->encap_pdb.options =
2812 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2813 PDBOPTS_ESP_OIHI_PDB_INL |
2815 PDBHMO_ESP_ENCAP_DTTL |
2817 if (ipsec_xform->options.esn)
2818 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2819 session->encap_pdb.spi = ipsec_xform->spi;
2821 } else if (ipsec_xform->direction ==
2822 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2823 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2824 session->decap_pdb.options = sizeof(struct ip) << 16;
2826 session->decap_pdb.options =
2827 sizeof(struct rte_ipv6_hdr) << 16;
2828 if (ipsec_xform->options.esn)
2829 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2832 rte_spinlock_lock(&internals->lock);
2833 for (i = 0; i < MAX_DPAA_CORES; i++) {
2834 session->inq[i] = dpaa_sec_attach_rxq(internals);
2835 if (session->inq[i] == NULL) {
2836 DPAA_SEC_ERR("unable to attach sec queue");
2837 rte_spinlock_unlock(&internals->lock);
2841 rte_spinlock_unlock(&internals->lock);
2845 rte_free(session->auth_key.data);
2846 rte_free(session->cipher_key.data);
2847 memset(session, 0, sizeof(dpaa_sec_session));
2852 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2853 struct rte_security_session_conf *conf,
2856 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2857 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2858 struct rte_crypto_auth_xform *auth_xform = NULL;
2859 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2860 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2861 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2864 PMD_INIT_FUNC_TRACE();
2866 memset(session, 0, sizeof(dpaa_sec_session));
2868 /* find xfrm types */
2869 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2870 cipher_xform = &xform->cipher;
2871 if (xform->next != NULL)
2872 auth_xform = &xform->next->auth;
2873 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2874 auth_xform = &xform->auth;
2875 if (xform->next != NULL)
2876 cipher_xform = &xform->next->cipher;
2878 DPAA_SEC_ERR("Invalid crypto type");
2882 session->proto_alg = conf->protocol;
2883 session->ctxt = DPAA_SEC_PDCP;
2886 switch (cipher_xform->algo) {
2887 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2888 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2890 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2891 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2893 case RTE_CRYPTO_CIPHER_AES_CTR:
2894 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2896 case RTE_CRYPTO_CIPHER_NULL:
2897 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2900 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2901 session->cipher_alg);
2905 session->cipher_key.data = rte_zmalloc(NULL,
2906 cipher_xform->key.length,
2907 RTE_CACHE_LINE_SIZE);
2908 if (session->cipher_key.data == NULL &&
2909 cipher_xform->key.length > 0) {
2910 DPAA_SEC_ERR("No Memory for cipher key");
2913 session->cipher_key.length = cipher_xform->key.length;
2914 memcpy(session->cipher_key.data, cipher_xform->key.data,
2915 cipher_xform->key.length);
2916 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2918 session->cipher_alg = cipher_xform->algo;
2920 session->cipher_key.data = NULL;
2921 session->cipher_key.length = 0;
2922 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2923 session->dir = DIR_ENC;
2926 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2927 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2928 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2930 "PDCP Seq Num size should be 5/12 bits for cmode");
2936 switch (auth_xform->algo) {
2937 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2938 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2940 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2941 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2943 case RTE_CRYPTO_AUTH_AES_CMAC:
2944 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2946 case RTE_CRYPTO_AUTH_NULL:
2947 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2950 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2952 rte_free(session->cipher_key.data);
2955 session->auth_key.data = rte_zmalloc(NULL,
2956 auth_xform->key.length,
2957 RTE_CACHE_LINE_SIZE);
2958 if (!session->auth_key.data &&
2959 auth_xform->key.length > 0) {
2960 DPAA_SEC_ERR("No Memory for auth key");
2961 rte_free(session->cipher_key.data);
2964 session->auth_key.length = auth_xform->key.length;
2965 memcpy(session->auth_key.data, auth_xform->key.data,
2966 auth_xform->key.length);
2967 session->auth_alg = auth_xform->algo;
2969 session->auth_key.data = NULL;
2970 session->auth_key.length = 0;
2971 session->auth_alg = 0;
2973 session->pdcp.domain = pdcp_xform->domain;
2974 session->pdcp.bearer = pdcp_xform->bearer;
2975 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2976 session->pdcp.sn_size = pdcp_xform->sn_size;
2977 session->pdcp.hfn = pdcp_xform->hfn;
2978 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2979 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2980 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2982 rte_spinlock_lock(&dev_priv->lock);
2983 for (i = 0; i < MAX_DPAA_CORES; i++) {
2984 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2985 if (session->inq[i] == NULL) {
2986 DPAA_SEC_ERR("unable to attach sec queue");
2987 rte_spinlock_unlock(&dev_priv->lock);
2991 rte_spinlock_unlock(&dev_priv->lock);
2994 rte_free(session->auth_key.data);
2995 rte_free(session->cipher_key.data);
2996 memset(session, 0, sizeof(dpaa_sec_session));
3001 dpaa_sec_security_session_create(void *dev,
3002 struct rte_security_session_conf *conf,
3003 struct rte_security_session *sess,
3004 struct rte_mempool *mempool)
3006 void *sess_private_data;
3007 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3010 if (rte_mempool_get(mempool, &sess_private_data)) {
3011 DPAA_SEC_ERR("Couldn't get object from session mempool");
3015 switch (conf->protocol) {
3016 case RTE_SECURITY_PROTOCOL_IPSEC:
3017 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3020 case RTE_SECURITY_PROTOCOL_PDCP:
3021 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3024 case RTE_SECURITY_PROTOCOL_MACSEC:
3030 DPAA_SEC_ERR("failed to configure session parameters");
3031 /* Return session to mempool */
3032 rte_mempool_put(mempool, sess_private_data);
3036 set_sec_session_private_data(sess, sess_private_data);
3041 /** Clear the memory of session so it doesn't leave key material behind */
3043 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3044 struct rte_security_session *sess)
3046 PMD_INIT_FUNC_TRACE();
3047 void *sess_priv = get_sec_session_private_data(sess);
3048 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3051 free_session_memory((struct rte_cryptodev *)dev, s);
3052 set_sec_session_private_data(sess, NULL);
3058 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3059 struct rte_cryptodev_config *config __rte_unused)
3061 PMD_INIT_FUNC_TRACE();
3067 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3069 PMD_INIT_FUNC_TRACE();
3074 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3076 PMD_INIT_FUNC_TRACE();
3080 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3082 PMD_INIT_FUNC_TRACE();
3091 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3092 struct rte_cryptodev_info *info)
3094 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3096 PMD_INIT_FUNC_TRACE();
3098 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3099 info->feature_flags = dev->feature_flags;
3100 info->capabilities = dpaa_sec_capabilities;
3101 info->sym.max_nb_sessions = internals->max_nb_sessions;
3102 info->driver_id = cryptodev_driver_id;
3106 static enum qman_cb_dqrr_result
3107 dpaa_sec_process_parallel_event(void *event,
3108 struct qman_portal *qm __always_unused,
3109 struct qman_fq *outq,
3110 const struct qm_dqrr_entry *dqrr,
3113 const struct qm_fd *fd;
3114 struct dpaa_sec_job *job;
3115 struct dpaa_sec_op_ctx *ctx;
3116 struct rte_event *ev = (struct rte_event *)event;
3120 /* sg is embedded in an op ctx,
3121 * sg[0] is for output
3124 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3126 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3127 ctx->fd_status = fd->status;
3128 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3129 struct qm_sg_entry *sg_out;
3132 sg_out = &job->sg[0];
3133 hw_sg_to_cpu(sg_out);
3134 len = sg_out->length;
3135 ctx->op->sym->m_src->pkt_len = len;
3136 ctx->op->sym->m_src->data_len = len;
3138 if (!ctx->fd_status) {
3139 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3141 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3142 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3144 ev->event_ptr = (void *)ctx->op;
3146 ev->flow_id = outq->ev.flow_id;
3147 ev->sub_event_type = outq->ev.sub_event_type;
3148 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3149 ev->op = RTE_EVENT_OP_NEW;
3150 ev->sched_type = outq->ev.sched_type;
3151 ev->queue_id = outq->ev.queue_id;
3152 ev->priority = outq->ev.priority;
3153 *bufs = (void *)ctx->op;
3155 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3157 return qman_cb_dqrr_consume;
3160 static enum qman_cb_dqrr_result
3161 dpaa_sec_process_atomic_event(void *event,
3162 struct qman_portal *qm __rte_unused,
3163 struct qman_fq *outq,
3164 const struct qm_dqrr_entry *dqrr,
3168 const struct qm_fd *fd;
3169 struct dpaa_sec_job *job;
3170 struct dpaa_sec_op_ctx *ctx;
3171 struct rte_event *ev = (struct rte_event *)event;
3175 /* sg is embedded in an op ctx,
3176 * sg[0] is for output
3179 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3181 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3182 ctx->fd_status = fd->status;
3183 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3184 struct qm_sg_entry *sg_out;
3187 sg_out = &job->sg[0];
3188 hw_sg_to_cpu(sg_out);
3189 len = sg_out->length;
3190 ctx->op->sym->m_src->pkt_len = len;
3191 ctx->op->sym->m_src->data_len = len;
3193 if (!ctx->fd_status) {
3194 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3196 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3197 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3199 ev->event_ptr = (void *)ctx->op;
3200 ev->flow_id = outq->ev.flow_id;
3201 ev->sub_event_type = outq->ev.sub_event_type;
3202 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3203 ev->op = RTE_EVENT_OP_NEW;
3204 ev->sched_type = outq->ev.sched_type;
3205 ev->queue_id = outq->ev.queue_id;
3206 ev->priority = outq->ev.priority;
3208 /* Save active dqrr entries */
3209 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3210 DPAA_PER_LCORE_DQRR_SIZE++;
3211 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3212 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3213 ev->impl_opaque = index + 1;
3214 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3215 *bufs = (void *)ctx->op;
3217 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3219 return qman_cb_dqrr_defer;
3223 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3226 const struct rte_event *event)
3228 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3229 struct qm_mcc_initfq opts = {0};
3233 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3234 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3235 opts.fqd.dest.channel = ch_id;
3237 switch (event->sched_type) {
3238 case RTE_SCHED_TYPE_ATOMIC:
3239 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3240 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3241 * configuration with HOLD_ACTIVE setting
3243 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3244 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3246 case RTE_SCHED_TYPE_ORDERED:
3247 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3250 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3251 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3255 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3256 if (unlikely(ret)) {
3257 DPAA_SEC_ERR("unable to init caam source fq!");
3261 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3267 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3270 struct qm_mcc_initfq opts = {0};
3272 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3274 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3275 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3276 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3277 qp->outq.cb.ern = ern_sec_fq_handler;
3278 qman_retire_fq(&qp->outq, NULL);
3279 qman_oos_fq(&qp->outq);
3280 ret = qman_init_fq(&qp->outq, 0, &opts);
3282 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3283 qp->outq.cb.dqrr = NULL;
3288 static struct rte_cryptodev_ops crypto_ops = {
3289 .dev_configure = dpaa_sec_dev_configure,
3290 .dev_start = dpaa_sec_dev_start,
3291 .dev_stop = dpaa_sec_dev_stop,
3292 .dev_close = dpaa_sec_dev_close,
3293 .dev_infos_get = dpaa_sec_dev_infos_get,
3294 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3295 .queue_pair_release = dpaa_sec_queue_pair_release,
3296 .queue_pair_count = dpaa_sec_queue_pair_count,
3297 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3298 .sym_session_configure = dpaa_sec_sym_session_configure,
3299 .sym_session_clear = dpaa_sec_sym_session_clear
3302 #ifdef RTE_LIBRTE_SECURITY
3303 static const struct rte_security_capability *
3304 dpaa_sec_capabilities_get(void *device __rte_unused)
3306 return dpaa_sec_security_cap;
3309 static const struct rte_security_ops dpaa_sec_security_ops = {
3310 .session_create = dpaa_sec_security_session_create,
3311 .session_update = NULL,
3312 .session_stats_get = NULL,
3313 .session_destroy = dpaa_sec_security_session_destroy,
3314 .set_pkt_metadata = NULL,
3315 .capabilities_get = dpaa_sec_capabilities_get
3319 dpaa_sec_uninit(struct rte_cryptodev *dev)
3321 struct dpaa_sec_dev_private *internals;
3326 internals = dev->data->dev_private;
3327 rte_free(dev->security_ctx);
3329 rte_free(internals);
3331 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3332 dev->data->name, rte_socket_id());
3338 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3340 struct dpaa_sec_dev_private *internals;
3341 #ifdef RTE_LIBRTE_SECURITY
3342 struct rte_security_ctx *security_instance;
3344 struct dpaa_sec_qp *qp;
3348 PMD_INIT_FUNC_TRACE();
3350 cryptodev->driver_id = cryptodev_driver_id;
3351 cryptodev->dev_ops = &crypto_ops;
3353 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3354 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3355 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3356 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3357 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3358 RTE_CRYPTODEV_FF_SECURITY |
3359 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3360 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3361 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3362 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3363 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3365 internals = cryptodev->data->dev_private;
3366 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3367 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3370 * For secondary processes, we don't initialise any further as primary
3371 * has already done this work. Only check we don't need a different
3374 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3375 DPAA_SEC_WARN("Device already init by primary process");
3378 #ifdef RTE_LIBRTE_SECURITY
3379 /* Initialize security_ctx only for primary process*/
3380 security_instance = rte_malloc("rte_security_instances_ops",
3381 sizeof(struct rte_security_ctx), 0);
3382 if (security_instance == NULL)
3384 security_instance->device = (void *)cryptodev;
3385 security_instance->ops = &dpaa_sec_security_ops;
3386 security_instance->sess_cnt = 0;
3387 cryptodev->security_ctx = security_instance;
3389 rte_spinlock_init(&internals->lock);
3390 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3391 /* init qman fq for queue pair */
3392 qp = &internals->qps[i];
3393 ret = dpaa_sec_init_tx(&qp->outq);
3395 DPAA_SEC_ERR("config tx of queue pair %d", i);
3400 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3401 QMAN_FQ_FLAG_TO_DCPORTAL;
3402 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3403 /* create rx qman fq for sessions*/
3404 ret = qman_create_fq(0, flags, &internals->inq[i]);
3405 if (unlikely(ret != 0)) {
3406 DPAA_SEC_ERR("sec qman_create_fq failed");
3411 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3415 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3417 dpaa_sec_uninit(cryptodev);
3422 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3423 struct rte_dpaa_device *dpaa_dev)
3425 struct rte_cryptodev *cryptodev;
3426 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3430 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3432 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3433 if (cryptodev == NULL)
3436 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3437 cryptodev->data->dev_private = rte_zmalloc_socket(
3438 "cryptodev private structure",
3439 sizeof(struct dpaa_sec_dev_private),
3440 RTE_CACHE_LINE_SIZE,
3443 if (cryptodev->data->dev_private == NULL)
3444 rte_panic("Cannot allocate memzone for private "
3448 dpaa_dev->crypto_dev = cryptodev;
3449 cryptodev->device = &dpaa_dev->device;
3451 /* init user callbacks */
3452 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3454 /* if sec device version is not configured */
3455 if (!rta_get_sec_era()) {
3456 const struct device_node *caam_node;
3458 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3459 const uint32_t *prop = of_get_property(caam_node,
3464 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3470 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3471 retval = rte_dpaa_portal_init((void *)1);
3473 DPAA_SEC_ERR("Unable to initialize portal");
3478 /* Invoke PMD device initialization function */
3479 retval = dpaa_sec_dev_init(cryptodev);
3483 /* In case of error, cleanup is done */
3484 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3485 rte_free(cryptodev->data->dev_private);
3487 rte_cryptodev_pmd_release_device(cryptodev);
3493 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3495 struct rte_cryptodev *cryptodev;
3498 cryptodev = dpaa_dev->crypto_dev;
3499 if (cryptodev == NULL)
3502 ret = dpaa_sec_uninit(cryptodev);
3506 return rte_cryptodev_pmd_destroy(cryptodev);
3509 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3510 .drv_type = FSL_DPAA_CRYPTO,
3512 .name = "DPAA SEC PMD"
3514 .probe = cryptodev_dpaa_sec_probe,
3515 .remove = cryptodev_dpaa_sec_remove,
3518 static struct cryptodev_driver dpaa_sec_crypto_drv;
3520 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3521 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3522 cryptodev_driver_id);
3524 RTE_INIT(dpaa_sec_init_log)
3526 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3527 if (dpaa_logtype_sec >= 0)
3528 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);