1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 enum rta_sec_era rta_sec_era;
50 static uint8_t cryptodev_driver_id;
52 static __thread struct rte_crypto_op **dpaa_sec_ops;
53 static __thread int dpaa_sec_op_nb;
56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
61 if (!ctx->fd_status) {
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(
76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 * each packet, memset is costlier than dcbz_64().
88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 dcbz_64(&ctx->job.sg[i]);
91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
111 dpaa_mem_ptov(rte_iova_t paddr)
115 va = (void *)dpaax_iova_table_get_va(paddr);
119 return rte_mem_iova2virt(paddr);
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125 const struct qm_mr_entry *msg)
127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 fq->fqid, msg->ern.rc, msg->ern.seqnum);
131 /* initialize the queue with dest chan as caam chan so that
132 * all the packets in this queue could be dispatched into caam
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
138 struct qm_mcc_initfq fq_opts;
142 /* Clear FQ options */
143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145 flags = QMAN_INITFQ_FLAG_SCHED;
146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 QM_INITFQ_WE_CONTEXTB;
149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 fq_opts.fqd.context_b = fqid_out;
151 fq_opts.fqd.dest.channel = qm_channel_caam;
152 fq_opts.fqd.dest.wq = 0;
154 fq_in->cb.ern = ern_sec_fq_handler;
156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158 ret = qman_init_fq(fq_in, flags, &fq_opts);
159 if (unlikely(ret != 0))
160 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 struct qman_fq *fq __always_unused,
169 const struct qm_dqrr_entry *dqrr)
171 const struct qm_fd *fd;
172 struct dpaa_sec_job *job;
173 struct dpaa_sec_op_ctx *ctx;
175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 return qman_cb_dqrr_defer;
178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 return qman_cb_dqrr_consume;
182 /* sg is embedded in an op ctx,
183 * sg[0] is for output
186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 ctx->fd_status = fd->status;
190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 struct qm_sg_entry *sg_out;
193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196 sg_out = &job->sg[0];
197 hw_sg_to_cpu(sg_out);
198 len = sg_out->length;
200 while (mbuf->next != NULL) {
201 len -= mbuf->data_len;
204 mbuf->data_len = len;
206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 dpaa_sec_op_ending(ctx);
209 return qman_cb_dqrr_consume;
212 /* caam result is put into this queue */
214 dpaa_sec_init_tx(struct qman_fq *fq)
217 struct qm_mcc_initfq opts;
220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 QMAN_FQ_FLAG_DYNAMIC_FQID;
223 ret = qman_create_fq(0, flags, fq);
225 DPAA_SEC_ERR("qman_create_fq failed");
229 memset(&opts, 0, sizeof(opts));
230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235 fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 fq->cb.ern = ern_sec_fq_handler;
238 ret = qman_init_fq(fq, 0, &opts);
240 DPAA_SEC_ERR("unable to init caam source fq!");
247 static inline int is_encode(dpaa_sec_session *ses)
249 return ses->dir == DIR_ENC;
252 static inline int is_decode(dpaa_sec_session *ses)
254 return ses->dir == DIR_DEC;
257 #ifdef RTE_LIBRTE_SECURITY
259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
261 struct alginfo authdata = {0}, cipherdata = {0};
262 struct sec_cdb *cdb = &ses->cdb;
263 struct alginfo *p_authdata = NULL;
264 int32_t shared_desc_len = 0;
266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
272 cipherdata.key = (size_t)ses->cipher_key.data;
273 cipherdata.keylen = ses->cipher_key.length;
274 cipherdata.key_enc_flags = 0;
275 cipherdata.key_type = RTA_DATA_IMM;
276 cipherdata.algtype = ses->cipher_key.alg;
277 cipherdata.algmode = ses->cipher_key.algmode;
279 cdb->sh_desc[0] = cipherdata.keylen;
284 authdata.key = (size_t)ses->auth_key.data;
285 authdata.keylen = ses->auth_key.length;
286 authdata.key_enc_flags = 0;
287 authdata.key_type = RTA_DATA_IMM;
288 authdata.algtype = ses->auth_key.alg;
289 authdata.algmode = ses->auth_key.algmode;
291 p_authdata = &authdata;
293 cdb->sh_desc[1] = authdata.keylen;
296 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
298 (unsigned int *)cdb->sh_desc,
299 &cdb->sh_desc[2], 2);
301 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
305 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
307 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
308 cipherdata.key_type = RTA_DATA_PTR;
310 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
312 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
313 authdata.key_type = RTA_DATA_PTR;
320 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
321 if (ses->dir == DIR_ENC)
322 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, &authdata,
331 else if (ses->dir == DIR_DEC)
332 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, &authdata,
342 if (ses->dir == DIR_ENC)
343 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
344 cdb->sh_desc, 1, swap,
349 ses->pdcp.hfn_threshold,
350 &cipherdata, p_authdata, 0);
351 else if (ses->dir == DIR_DEC)
352 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
353 cdb->sh_desc, 1, swap,
358 ses->pdcp.hfn_threshold,
359 &cipherdata, p_authdata, 0);
361 return shared_desc_len;
364 /* prepare ipsec proto command block of the session */
366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
368 struct alginfo cipherdata = {0}, authdata = {0};
369 struct sec_cdb *cdb = &ses->cdb;
370 int32_t shared_desc_len = 0;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 cipherdata.key = (size_t)ses->cipher_key.data;
379 cipherdata.keylen = ses->cipher_key.length;
380 cipherdata.key_enc_flags = 0;
381 cipherdata.key_type = RTA_DATA_IMM;
382 cipherdata.algtype = ses->cipher_key.alg;
383 cipherdata.algmode = ses->cipher_key.algmode;
385 if (ses->auth_key.length) {
386 authdata.key = (size_t)ses->auth_key.data;
387 authdata.keylen = ses->auth_key.length;
388 authdata.key_enc_flags = 0;
389 authdata.key_type = RTA_DATA_IMM;
390 authdata.algtype = ses->auth_key.alg;
391 authdata.algmode = ses->auth_key.algmode;
394 cdb->sh_desc[0] = cipherdata.keylen;
395 cdb->sh_desc[1] = authdata.keylen;
396 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
398 (unsigned int *)cdb->sh_desc,
399 &cdb->sh_desc[2], 2);
402 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
405 if (cdb->sh_desc[2] & 1)
406 cipherdata.key_type = RTA_DATA_IMM;
408 cipherdata.key = (size_t)dpaa_mem_vtop(
409 (void *)(size_t)cipherdata.key);
410 cipherdata.key_type = RTA_DATA_PTR;
412 if (cdb->sh_desc[2] & (1<<1))
413 authdata.key_type = RTA_DATA_IMM;
415 authdata.key = (size_t)dpaa_mem_vtop(
416 (void *)(size_t)authdata.key);
417 authdata.key_type = RTA_DATA_PTR;
423 if (ses->dir == DIR_ENC) {
424 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
426 true, swap, SHR_SERIAL,
428 (uint8_t *)&ses->ip4_hdr,
429 &cipherdata, &authdata);
430 } else if (ses->dir == DIR_DEC) {
431 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
433 true, swap, SHR_SERIAL,
435 &cipherdata, &authdata);
437 return shared_desc_len;
440 /* prepare command block of the session */
442 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
444 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
445 int32_t shared_desc_len = 0;
446 struct sec_cdb *cdb = &ses->cdb;
448 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
454 memset(cdb, 0, sizeof(struct sec_cdb));
457 #ifdef RTE_LIBRTE_SECURITY
459 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
462 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
465 case DPAA_SEC_CIPHER:
466 alginfo_c.key = (size_t)ses->cipher_key.data;
467 alginfo_c.keylen = ses->cipher_key.length;
468 alginfo_c.key_enc_flags = 0;
469 alginfo_c.key_type = RTA_DATA_IMM;
470 alginfo_c.algtype = ses->cipher_key.alg;
471 alginfo_c.algmode = ses->cipher_key.algmode;
473 switch (ses->cipher_alg) {
474 case RTE_CRYPTO_CIPHER_AES_CBC:
475 case RTE_CRYPTO_CIPHER_3DES_CBC:
476 case RTE_CRYPTO_CIPHER_AES_CTR:
477 case RTE_CRYPTO_CIPHER_3DES_CTR:
478 shared_desc_len = cnstr_shdsc_blkcipher(
480 swap, SHR_NEVER, &alginfo_c,
484 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
485 shared_desc_len = cnstr_shdsc_snow_f8(
486 cdb->sh_desc, true, swap,
490 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
491 shared_desc_len = cnstr_shdsc_zuce(
492 cdb->sh_desc, true, swap,
497 DPAA_SEC_ERR("unsupported cipher alg %d",
503 alginfo_a.key = (size_t)ses->auth_key.data;
504 alginfo_a.keylen = ses->auth_key.length;
505 alginfo_a.key_enc_flags = 0;
506 alginfo_a.key_type = RTA_DATA_IMM;
507 alginfo_a.algtype = ses->auth_key.alg;
508 alginfo_a.algmode = ses->auth_key.algmode;
509 switch (ses->auth_alg) {
510 case RTE_CRYPTO_AUTH_MD5_HMAC:
511 case RTE_CRYPTO_AUTH_SHA1_HMAC:
512 case RTE_CRYPTO_AUTH_SHA224_HMAC:
513 case RTE_CRYPTO_AUTH_SHA256_HMAC:
514 case RTE_CRYPTO_AUTH_SHA384_HMAC:
515 case RTE_CRYPTO_AUTH_SHA512_HMAC:
516 shared_desc_len = cnstr_shdsc_hmac(
518 swap, SHR_NEVER, &alginfo_a,
522 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
523 shared_desc_len = cnstr_shdsc_snow_f9(
524 cdb->sh_desc, true, swap,
529 case RTE_CRYPTO_AUTH_ZUC_EIA3:
530 shared_desc_len = cnstr_shdsc_zuca(
531 cdb->sh_desc, true, swap,
537 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
541 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
542 DPAA_SEC_ERR("not supported aead alg");
545 alginfo.key = (size_t)ses->aead_key.data;
546 alginfo.keylen = ses->aead_key.length;
547 alginfo.key_enc_flags = 0;
548 alginfo.key_type = RTA_DATA_IMM;
549 alginfo.algtype = ses->aead_key.alg;
550 alginfo.algmode = ses->aead_key.algmode;
552 if (ses->dir == DIR_ENC)
553 shared_desc_len = cnstr_shdsc_gcm_encap(
554 cdb->sh_desc, true, swap, SHR_NEVER,
559 shared_desc_len = cnstr_shdsc_gcm_decap(
560 cdb->sh_desc, true, swap, SHR_NEVER,
565 case DPAA_SEC_CIPHER_HASH:
566 alginfo_c.key = (size_t)ses->cipher_key.data;
567 alginfo_c.keylen = ses->cipher_key.length;
568 alginfo_c.key_enc_flags = 0;
569 alginfo_c.key_type = RTA_DATA_IMM;
570 alginfo_c.algtype = ses->cipher_key.alg;
571 alginfo_c.algmode = ses->cipher_key.algmode;
573 alginfo_a.key = (size_t)ses->auth_key.data;
574 alginfo_a.keylen = ses->auth_key.length;
575 alginfo_a.key_enc_flags = 0;
576 alginfo_a.key_type = RTA_DATA_IMM;
577 alginfo_a.algtype = ses->auth_key.alg;
578 alginfo_a.algmode = ses->auth_key.algmode;
580 cdb->sh_desc[0] = alginfo_c.keylen;
581 cdb->sh_desc[1] = alginfo_a.keylen;
582 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
584 (unsigned int *)cdb->sh_desc,
585 &cdb->sh_desc[2], 2);
588 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
591 if (cdb->sh_desc[2] & 1)
592 alginfo_c.key_type = RTA_DATA_IMM;
594 alginfo_c.key = (size_t)dpaa_mem_vtop(
595 (void *)(size_t)alginfo_c.key);
596 alginfo_c.key_type = RTA_DATA_PTR;
598 if (cdb->sh_desc[2] & (1<<1))
599 alginfo_a.key_type = RTA_DATA_IMM;
601 alginfo_a.key = (size_t)dpaa_mem_vtop(
602 (void *)(size_t)alginfo_a.key);
603 alginfo_a.key_type = RTA_DATA_PTR;
608 /* Auth_only_len is set as 0 here and it will be
609 * overwritten in fd for each packet.
611 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
612 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
614 ses->digest_length, ses->dir);
616 case DPAA_SEC_HASH_CIPHER:
618 DPAA_SEC_ERR("error: Unsupported session");
622 if (shared_desc_len < 0) {
623 DPAA_SEC_ERR("error in preparing command block");
624 return shared_desc_len;
627 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
628 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
629 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
634 /* qp is lockless, should be accessed by only one thread */
636 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
639 unsigned int pkts = 0;
640 int num_rx_bufs, ret;
641 struct qm_dqrr_entry *dq;
642 uint32_t vdqcr_flags = 0;
646 * Until request for four buffers, we provide exact number of buffers.
647 * Otherwise we do not set the QM_VDQCR_EXACT flag.
648 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
649 * requested, so we request two less in this case.
652 vdqcr_flags = QM_VDQCR_EXACT;
653 num_rx_bufs = nb_ops;
655 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
656 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
658 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
663 const struct qm_fd *fd;
664 struct dpaa_sec_job *job;
665 struct dpaa_sec_op_ctx *ctx;
666 struct rte_crypto_op *op;
668 dq = qman_dequeue(fq);
673 /* sg is embedded in an op ctx,
674 * sg[0] is for output
677 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
679 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
680 ctx->fd_status = fd->status;
682 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
683 struct qm_sg_entry *sg_out;
685 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
686 op->sym->m_src : op->sym->m_dst;
688 sg_out = &job->sg[0];
689 hw_sg_to_cpu(sg_out);
690 len = sg_out->length;
692 while (mbuf->next != NULL) {
693 len -= mbuf->data_len;
696 mbuf->data_len = len;
698 if (!ctx->fd_status) {
699 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
701 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
702 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
706 /* report op status to sym->op and then free the ctx memeory */
707 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
709 qman_dqrr_consume(fq, dq);
710 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
715 static inline struct dpaa_sec_job *
716 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
718 struct rte_crypto_sym_op *sym = op->sym;
719 struct rte_mbuf *mbuf = sym->m_src;
720 struct dpaa_sec_job *cf;
721 struct dpaa_sec_op_ctx *ctx;
722 struct qm_sg_entry *sg, *out_sg, *in_sg;
723 phys_addr_t start_addr;
724 uint8_t *old_digest, extra_segs;
725 int data_len, data_offset;
727 data_len = sym->auth.data.length;
728 data_offset = sym->auth.data.offset;
730 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
731 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
732 if ((data_len & 7) || (data_offset & 7)) {
733 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
737 data_len = data_len >> 3;
738 data_offset = data_offset >> 3;
746 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
747 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
751 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
757 old_digest = ctx->digest;
761 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
762 out_sg->length = ses->digest_length;
763 cpu_to_hw_sg(out_sg);
767 /* need to extend the input to a compound frame */
768 in_sg->extension = 1;
770 in_sg->length = data_len;
771 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
776 if (ses->iv.length) {
779 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
782 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
783 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
785 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
786 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
789 sg->length = ses->iv.length;
791 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
792 in_sg->length += sg->length;
797 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
798 sg->offset = data_offset;
800 if (data_len <= (mbuf->data_len - data_offset)) {
801 sg->length = data_len;
803 sg->length = mbuf->data_len - data_offset;
805 /* remaining i/p segs */
806 while ((data_len = data_len - sg->length) &&
807 (mbuf = mbuf->next)) {
810 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
811 if (data_len > mbuf->data_len)
812 sg->length = mbuf->data_len;
814 sg->length = data_len;
818 if (is_decode(ses)) {
819 /* Digest verification case */
822 rte_memcpy(old_digest, sym->auth.digest.data,
824 start_addr = dpaa_mem_vtop(old_digest);
825 qm_sg_entry_set64(sg, start_addr);
826 sg->length = ses->digest_length;
827 in_sg->length += ses->digest_length;
838 * |<----data_len------->|
839 * |ip_header|ah_header|icv|payload|
844 static inline struct dpaa_sec_job *
845 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
847 struct rte_crypto_sym_op *sym = op->sym;
848 struct rte_mbuf *mbuf = sym->m_src;
849 struct dpaa_sec_job *cf;
850 struct dpaa_sec_op_ctx *ctx;
851 struct qm_sg_entry *sg, *in_sg;
852 rte_iova_t start_addr;
854 int data_len, data_offset;
856 data_len = sym->auth.data.length;
857 data_offset = sym->auth.data.offset;
859 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
860 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
861 if ((data_len & 7) || (data_offset & 7)) {
862 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
866 data_len = data_len >> 3;
867 data_offset = data_offset >> 3;
870 ctx = dpaa_sec_alloc_ctx(ses, 4);
876 old_digest = ctx->digest;
878 start_addr = rte_pktmbuf_iova(mbuf);
881 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
882 sg->length = ses->digest_length;
887 /* need to extend the input to a compound frame */
888 in_sg->extension = 1;
890 in_sg->length = data_len;
891 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
894 if (ses->iv.length) {
897 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
900 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
901 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
903 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
904 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
907 sg->length = ses->iv.length;
909 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
910 in_sg->length += sg->length;
915 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
916 sg->offset = data_offset;
917 sg->length = data_len;
919 if (is_decode(ses)) {
920 /* Digest verification case */
922 /* hash result or digest, save digest first */
923 rte_memcpy(old_digest, sym->auth.digest.data,
925 /* let's check digest by hw */
926 start_addr = dpaa_mem_vtop(old_digest);
928 qm_sg_entry_set64(sg, start_addr);
929 sg->length = ses->digest_length;
930 in_sg->length += ses->digest_length;
939 static inline struct dpaa_sec_job *
940 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
942 struct rte_crypto_sym_op *sym = op->sym;
943 struct dpaa_sec_job *cf;
944 struct dpaa_sec_op_ctx *ctx;
945 struct qm_sg_entry *sg, *out_sg, *in_sg;
946 struct rte_mbuf *mbuf;
948 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
950 int data_len, data_offset;
952 data_len = sym->cipher.data.length;
953 data_offset = sym->cipher.data.offset;
955 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
956 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
957 if ((data_len & 7) || (data_offset & 7)) {
958 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
962 data_len = data_len >> 3;
963 data_offset = data_offset >> 3;
968 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
971 req_segs = mbuf->nb_segs * 2 + 3;
973 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
974 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
979 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
988 out_sg->extension = 1;
989 out_sg->length = data_len;
990 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
991 cpu_to_hw_sg(out_sg);
995 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996 sg->length = mbuf->data_len - data_offset;
997 sg->offset = data_offset;
999 /* Successive segs */
1004 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1005 sg->length = mbuf->data_len;
1014 in_sg->extension = 1;
1016 in_sg->length = data_len + ses->iv.length;
1019 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1020 cpu_to_hw_sg(in_sg);
1023 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1024 sg->length = ses->iv.length;
1029 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1030 sg->length = mbuf->data_len - data_offset;
1031 sg->offset = data_offset;
1033 /* Successive segs */
1038 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1039 sg->length = mbuf->data_len;
1048 static inline struct dpaa_sec_job *
1049 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1051 struct rte_crypto_sym_op *sym = op->sym;
1052 struct dpaa_sec_job *cf;
1053 struct dpaa_sec_op_ctx *ctx;
1054 struct qm_sg_entry *sg;
1055 rte_iova_t src_start_addr, dst_start_addr;
1056 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1058 int data_len, data_offset;
1060 data_len = sym->cipher.data.length;
1061 data_offset = sym->cipher.data.offset;
1063 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1064 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1065 if ((data_len & 7) || (data_offset & 7)) {
1066 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1070 data_len = data_len >> 3;
1071 data_offset = data_offset >> 3;
1074 ctx = dpaa_sec_alloc_ctx(ses, 4);
1081 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1084 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1086 dst_start_addr = src_start_addr;
1090 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1091 sg->length = data_len + ses->iv.length;
1097 /* need to extend the input to a compound frame */
1100 sg->length = data_len + ses->iv.length;
1101 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1105 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1106 sg->length = ses->iv.length;
1110 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1111 sg->length = data_len;
1118 static inline struct dpaa_sec_job *
1119 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1121 struct rte_crypto_sym_op *sym = op->sym;
1122 struct dpaa_sec_job *cf;
1123 struct dpaa_sec_op_ctx *ctx;
1124 struct qm_sg_entry *sg, *out_sg, *in_sg;
1125 struct rte_mbuf *mbuf;
1127 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1132 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1135 req_segs = mbuf->nb_segs * 2 + 4;
1138 if (ses->auth_only_len)
1141 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1142 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1147 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1154 rte_prefetch0(cf->sg);
1157 out_sg = &cf->sg[0];
1158 out_sg->extension = 1;
1160 out_sg->length = sym->aead.data.length + ses->digest_length;
1162 out_sg->length = sym->aead.data.length;
1164 /* output sg entries */
1166 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1167 cpu_to_hw_sg(out_sg);
1170 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1171 sg->length = mbuf->data_len - sym->aead.data.offset;
1172 sg->offset = sym->aead.data.offset;
1174 /* Successive segs */
1179 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1180 sg->length = mbuf->data_len;
1183 sg->length -= ses->digest_length;
1185 if (is_encode(ses)) {
1187 /* set auth output */
1189 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1190 sg->length = ses->digest_length;
1198 in_sg->extension = 1;
1201 in_sg->length = ses->iv.length + sym->aead.data.length
1202 + ses->auth_only_len;
1204 in_sg->length = ses->iv.length + sym->aead.data.length
1205 + ses->auth_only_len + ses->digest_length;
1207 /* input sg entries */
1209 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1210 cpu_to_hw_sg(in_sg);
1213 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1214 sg->length = ses->iv.length;
1217 /* 2nd seg auth only */
1218 if (ses->auth_only_len) {
1220 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1221 sg->length = ses->auth_only_len;
1227 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1228 sg->length = mbuf->data_len - sym->aead.data.offset;
1229 sg->offset = sym->aead.data.offset;
1231 /* Successive segs */
1236 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1237 sg->length = mbuf->data_len;
1241 if (is_decode(ses)) {
1244 memcpy(ctx->digest, sym->aead.digest.data,
1245 ses->digest_length);
1246 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1247 sg->length = ses->digest_length;
1255 static inline struct dpaa_sec_job *
1256 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1258 struct rte_crypto_sym_op *sym = op->sym;
1259 struct dpaa_sec_job *cf;
1260 struct dpaa_sec_op_ctx *ctx;
1261 struct qm_sg_entry *sg;
1262 uint32_t length = 0;
1263 rte_iova_t src_start_addr, dst_start_addr;
1264 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1267 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1270 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1272 dst_start_addr = src_start_addr;
1274 ctx = dpaa_sec_alloc_ctx(ses, 7);
1282 rte_prefetch0(cf->sg);
1284 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1285 if (is_encode(ses)) {
1286 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1287 sg->length = ses->iv.length;
1288 length += sg->length;
1292 if (ses->auth_only_len) {
1293 qm_sg_entry_set64(sg,
1294 dpaa_mem_vtop(sym->aead.aad.data));
1295 sg->length = ses->auth_only_len;
1296 length += sg->length;
1300 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1301 sg->length = sym->aead.data.length;
1302 length += sg->length;
1306 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1307 sg->length = ses->iv.length;
1308 length += sg->length;
1312 if (ses->auth_only_len) {
1313 qm_sg_entry_set64(sg,
1314 dpaa_mem_vtop(sym->aead.aad.data));
1315 sg->length = ses->auth_only_len;
1316 length += sg->length;
1320 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1321 sg->length = sym->aead.data.length;
1322 length += sg->length;
1325 memcpy(ctx->digest, sym->aead.digest.data,
1326 ses->digest_length);
1329 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1330 sg->length = ses->digest_length;
1331 length += sg->length;
1335 /* input compound frame */
1336 cf->sg[1].length = length;
1337 cf->sg[1].extension = 1;
1338 cf->sg[1].final = 1;
1339 cpu_to_hw_sg(&cf->sg[1]);
1343 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1344 qm_sg_entry_set64(sg,
1345 dst_start_addr + sym->aead.data.offset);
1346 sg->length = sym->aead.data.length;
1347 length = sg->length;
1348 if (is_encode(ses)) {
1350 /* set auth output */
1352 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1353 sg->length = ses->digest_length;
1354 length += sg->length;
1359 /* output compound frame */
1360 cf->sg[0].length = length;
1361 cf->sg[0].extension = 1;
1362 cpu_to_hw_sg(&cf->sg[0]);
1367 static inline struct dpaa_sec_job *
1368 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1370 struct rte_crypto_sym_op *sym = op->sym;
1371 struct dpaa_sec_job *cf;
1372 struct dpaa_sec_op_ctx *ctx;
1373 struct qm_sg_entry *sg, *out_sg, *in_sg;
1374 struct rte_mbuf *mbuf;
1376 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1381 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1384 req_segs = mbuf->nb_segs * 2 + 4;
1387 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1388 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1393 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1400 rte_prefetch0(cf->sg);
1403 out_sg = &cf->sg[0];
1404 out_sg->extension = 1;
1406 out_sg->length = sym->auth.data.length + ses->digest_length;
1408 out_sg->length = sym->auth.data.length;
1410 /* output sg entries */
1412 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1413 cpu_to_hw_sg(out_sg);
1416 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1417 sg->length = mbuf->data_len - sym->auth.data.offset;
1418 sg->offset = sym->auth.data.offset;
1420 /* Successive segs */
1425 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1426 sg->length = mbuf->data_len;
1429 sg->length -= ses->digest_length;
1431 if (is_encode(ses)) {
1433 /* set auth output */
1435 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1436 sg->length = ses->digest_length;
1444 in_sg->extension = 1;
1447 in_sg->length = ses->iv.length + sym->auth.data.length;
1449 in_sg->length = ses->iv.length + sym->auth.data.length
1450 + ses->digest_length;
1452 /* input sg entries */
1454 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1455 cpu_to_hw_sg(in_sg);
1458 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1459 sg->length = ses->iv.length;
1464 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1465 sg->length = mbuf->data_len - sym->auth.data.offset;
1466 sg->offset = sym->auth.data.offset;
1468 /* Successive segs */
1473 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1474 sg->length = mbuf->data_len;
1478 sg->length -= ses->digest_length;
1479 if (is_decode(ses)) {
1482 memcpy(ctx->digest, sym->auth.digest.data,
1483 ses->digest_length);
1484 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1485 sg->length = ses->digest_length;
1493 static inline struct dpaa_sec_job *
1494 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1496 struct rte_crypto_sym_op *sym = op->sym;
1497 struct dpaa_sec_job *cf;
1498 struct dpaa_sec_op_ctx *ctx;
1499 struct qm_sg_entry *sg;
1500 rte_iova_t src_start_addr, dst_start_addr;
1501 uint32_t length = 0;
1502 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1505 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1507 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1509 dst_start_addr = src_start_addr;
1511 ctx = dpaa_sec_alloc_ctx(ses, 7);
1519 rte_prefetch0(cf->sg);
1521 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1522 if (is_encode(ses)) {
1523 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1524 sg->length = ses->iv.length;
1525 length += sg->length;
1529 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1530 sg->length = sym->auth.data.length;
1531 length += sg->length;
1535 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1536 sg->length = ses->iv.length;
1537 length += sg->length;
1542 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1543 sg->length = sym->auth.data.length;
1544 length += sg->length;
1547 memcpy(ctx->digest, sym->auth.digest.data,
1548 ses->digest_length);
1551 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1552 sg->length = ses->digest_length;
1553 length += sg->length;
1557 /* input compound frame */
1558 cf->sg[1].length = length;
1559 cf->sg[1].extension = 1;
1560 cf->sg[1].final = 1;
1561 cpu_to_hw_sg(&cf->sg[1]);
1565 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1566 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1567 sg->length = sym->cipher.data.length;
1568 length = sg->length;
1569 if (is_encode(ses)) {
1571 /* set auth output */
1573 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1574 sg->length = ses->digest_length;
1575 length += sg->length;
1580 /* output compound frame */
1581 cf->sg[0].length = length;
1582 cf->sg[0].extension = 1;
1583 cpu_to_hw_sg(&cf->sg[0]);
1588 #ifdef RTE_LIBRTE_SECURITY
1589 static inline struct dpaa_sec_job *
1590 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1592 struct rte_crypto_sym_op *sym = op->sym;
1593 struct dpaa_sec_job *cf;
1594 struct dpaa_sec_op_ctx *ctx;
1595 struct qm_sg_entry *sg;
1596 phys_addr_t src_start_addr, dst_start_addr;
1598 ctx = dpaa_sec_alloc_ctx(ses, 2);
1604 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1607 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1609 dst_start_addr = src_start_addr;
1613 qm_sg_entry_set64(sg, src_start_addr);
1614 sg->length = sym->m_src->pkt_len;
1618 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1621 qm_sg_entry_set64(sg, dst_start_addr);
1622 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1628 static inline struct dpaa_sec_job *
1629 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1631 struct rte_crypto_sym_op *sym = op->sym;
1632 struct dpaa_sec_job *cf;
1633 struct dpaa_sec_op_ctx *ctx;
1634 struct qm_sg_entry *sg, *out_sg, *in_sg;
1635 struct rte_mbuf *mbuf;
1637 uint32_t in_len = 0, out_len = 0;
1644 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1645 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1646 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1651 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1657 out_sg = &cf->sg[0];
1658 out_sg->extension = 1;
1659 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1663 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1666 /* Successive segs */
1667 while (mbuf->next) {
1668 sg->length = mbuf->data_len;
1669 out_len += sg->length;
1673 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1676 sg->length = mbuf->buf_len - mbuf->data_off;
1677 out_len += sg->length;
1681 out_sg->length = out_len;
1682 cpu_to_hw_sg(out_sg);
1687 in_sg->extension = 1;
1689 in_len = mbuf->data_len;
1692 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1695 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1696 sg->length = mbuf->data_len;
1699 /* Successive segs */
1704 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1705 sg->length = mbuf->data_len;
1707 in_len += sg->length;
1713 in_sg->length = in_len;
1714 cpu_to_hw_sg(in_sg);
1716 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1723 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1726 /* Function to transmit the frames to given device and queuepair */
1728 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1729 uint16_t num_tx = 0;
1730 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1731 uint32_t frames_to_send;
1732 struct rte_crypto_op *op;
1733 struct dpaa_sec_job *cf;
1734 dpaa_sec_session *ses;
1735 uint16_t auth_hdr_len, auth_tail_len;
1736 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1737 struct qman_fq *inq[DPAA_SEC_BURST];
1740 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1741 DPAA_SEC_BURST : nb_ops;
1742 for (loop = 0; loop < frames_to_send; loop++) {
1744 if (op->sym->m_src->seqn != 0) {
1745 index = op->sym->m_src->seqn - 1;
1746 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1747 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1748 flags[loop] = ((index & 0x0f) << 8);
1749 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1750 DPAA_PER_LCORE_DQRR_SIZE--;
1751 DPAA_PER_LCORE_DQRR_HELD &=
1756 switch (op->sess_type) {
1757 case RTE_CRYPTO_OP_WITH_SESSION:
1758 ses = (dpaa_sec_session *)
1759 get_sym_session_private_data(
1761 cryptodev_driver_id);
1763 #ifdef RTE_LIBRTE_SECURITY
1764 case RTE_CRYPTO_OP_SECURITY_SESSION:
1765 ses = (dpaa_sec_session *)
1766 get_sec_session_private_data(
1767 op->sym->sec_session);
1772 "sessionless crypto op not supported");
1773 frames_to_send = loop;
1779 DPAA_SEC_DP_ERR("session not available");
1780 frames_to_send = loop;
1785 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1786 if (dpaa_sec_attach_sess_q(qp, ses)) {
1787 frames_to_send = loop;
1791 } else if (unlikely(ses->qp[rte_lcore_id() %
1792 MAX_DPAA_CORES] != qp)) {
1793 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1795 ses->qp[rte_lcore_id() %
1796 MAX_DPAA_CORES], qp);
1797 frames_to_send = loop;
1802 auth_hdr_len = op->sym->auth.data.length -
1803 op->sym->cipher.data.length;
1806 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1807 ((op->sym->m_dst == NULL) ||
1808 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1809 switch (ses->ctxt) {
1810 #ifdef RTE_LIBRTE_SECURITY
1812 case DPAA_SEC_IPSEC:
1813 cf = build_proto(op, ses);
1817 cf = build_auth_only(op, ses);
1819 case DPAA_SEC_CIPHER:
1820 cf = build_cipher_only(op, ses);
1823 cf = build_cipher_auth_gcm(op, ses);
1824 auth_hdr_len = ses->auth_only_len;
1826 case DPAA_SEC_CIPHER_HASH:
1828 op->sym->cipher.data.offset
1829 - op->sym->auth.data.offset;
1831 op->sym->auth.data.length
1832 - op->sym->cipher.data.length
1834 cf = build_cipher_auth(op, ses);
1837 DPAA_SEC_DP_ERR("not supported ops");
1838 frames_to_send = loop;
1843 switch (ses->ctxt) {
1844 #ifdef RTE_LIBRTE_SECURITY
1846 case DPAA_SEC_IPSEC:
1847 cf = build_proto_sg(op, ses);
1851 cf = build_auth_only_sg(op, ses);
1853 case DPAA_SEC_CIPHER:
1854 cf = build_cipher_only_sg(op, ses);
1857 cf = build_cipher_auth_gcm_sg(op, ses);
1858 auth_hdr_len = ses->auth_only_len;
1860 case DPAA_SEC_CIPHER_HASH:
1862 op->sym->cipher.data.offset
1863 - op->sym->auth.data.offset;
1865 op->sym->auth.data.length
1866 - op->sym->cipher.data.length
1868 cf = build_cipher_auth_sg(op, ses);
1871 DPAA_SEC_DP_ERR("not supported ops");
1872 frames_to_send = loop;
1877 if (unlikely(!cf)) {
1878 frames_to_send = loop;
1884 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1885 fd->opaque_addr = 0;
1887 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1888 fd->_format1 = qm_fd_compound;
1889 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1891 /* Auth_only_len is set as 0 in descriptor and it is
1892 * overwritten here in the fd.cmd which will update
1895 if (auth_hdr_len || auth_tail_len) {
1896 fd->cmd = 0x80000000;
1898 ((auth_tail_len << 16) | auth_hdr_len);
1901 #ifdef RTE_LIBRTE_SECURITY
1902 /* In case of PDCP, per packet HFN is stored in
1903 * mbuf priv after sym_op.
1905 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1906 fd->cmd = 0x80000000 |
1907 *((uint32_t *)((uint8_t *)op +
1908 ses->pdcp.hfn_ovd_offset));
1909 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1910 *((uint32_t *)((uint8_t *)op +
1911 ses->pdcp.hfn_ovd_offset)),
1918 while (loop < frames_to_send) {
1919 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1920 &flags[loop], frames_to_send - loop);
1922 nb_ops -= frames_to_send;
1923 num_tx += frames_to_send;
1926 dpaa_qp->tx_pkts += num_tx;
1927 dpaa_qp->tx_errs += nb_ops - num_tx;
1933 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1937 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1939 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1941 dpaa_qp->rx_pkts += num_rx;
1942 dpaa_qp->rx_errs += nb_ops - num_rx;
1944 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1949 /** Release queue pair */
1951 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1954 struct dpaa_sec_dev_private *internals;
1955 struct dpaa_sec_qp *qp = NULL;
1957 PMD_INIT_FUNC_TRACE();
1959 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1961 internals = dev->data->dev_private;
1962 if (qp_id >= internals->max_nb_queue_pairs) {
1963 DPAA_SEC_ERR("Max supported qpid %d",
1964 internals->max_nb_queue_pairs);
1968 qp = &internals->qps[qp_id];
1969 rte_mempool_free(qp->ctx_pool);
1970 qp->internals = NULL;
1971 dev->data->queue_pairs[qp_id] = NULL;
1976 /** Setup a queue pair */
1978 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1979 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1980 __rte_unused int socket_id)
1982 struct dpaa_sec_dev_private *internals;
1983 struct dpaa_sec_qp *qp = NULL;
1986 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1988 internals = dev->data->dev_private;
1989 if (qp_id >= internals->max_nb_queue_pairs) {
1990 DPAA_SEC_ERR("Max supported qpid %d",
1991 internals->max_nb_queue_pairs);
1995 qp = &internals->qps[qp_id];
1996 qp->internals = internals;
1997 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1998 dev->data->dev_id, qp_id);
1999 if (!qp->ctx_pool) {
2000 qp->ctx_pool = rte_mempool_create((const char *)str,
2003 CTX_POOL_CACHE_SIZE, 0,
2004 NULL, NULL, NULL, NULL,
2006 if (!qp->ctx_pool) {
2007 DPAA_SEC_ERR("%s create failed\n", str);
2011 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2012 dev->data->dev_id, qp_id);
2013 dev->data->queue_pairs[qp_id] = qp;
2018 /** Return the number of allocated queue pairs */
2020 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2022 PMD_INIT_FUNC_TRACE();
2024 return dev->data->nb_queue_pairs;
2027 /** Returns the size of session structure */
2029 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2031 PMD_INIT_FUNC_TRACE();
2033 return sizeof(dpaa_sec_session);
2037 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2038 struct rte_crypto_sym_xform *xform,
2039 dpaa_sec_session *session)
2041 session->ctxt = DPAA_SEC_CIPHER;
2042 session->cipher_alg = xform->cipher.algo;
2043 session->iv.length = xform->cipher.iv.length;
2044 session->iv.offset = xform->cipher.iv.offset;
2045 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2046 RTE_CACHE_LINE_SIZE);
2047 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2048 DPAA_SEC_ERR("No Memory for cipher key");
2051 session->cipher_key.length = xform->cipher.key.length;
2053 memcpy(session->cipher_key.data, xform->cipher.key.data,
2054 xform->cipher.key.length);
2055 switch (xform->cipher.algo) {
2056 case RTE_CRYPTO_CIPHER_AES_CBC:
2057 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2058 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2060 case RTE_CRYPTO_CIPHER_3DES_CBC:
2061 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2062 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2064 case RTE_CRYPTO_CIPHER_AES_CTR:
2065 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2066 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2068 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2069 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2071 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2072 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2075 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2076 xform->cipher.algo);
2077 rte_free(session->cipher_key.data);
2080 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2087 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2088 struct rte_crypto_sym_xform *xform,
2089 dpaa_sec_session *session)
2091 session->ctxt = DPAA_SEC_AUTH;
2092 session->auth_alg = xform->auth.algo;
2093 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2094 RTE_CACHE_LINE_SIZE);
2095 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2096 DPAA_SEC_ERR("No Memory for auth key");
2099 session->auth_key.length = xform->auth.key.length;
2100 session->digest_length = xform->auth.digest_length;
2101 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2102 session->iv.offset = xform->auth.iv.offset;
2103 session->iv.length = xform->auth.iv.length;
2106 memcpy(session->auth_key.data, xform->auth.key.data,
2107 xform->auth.key.length);
2109 switch (xform->auth.algo) {
2110 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2111 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2112 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2114 case RTE_CRYPTO_AUTH_MD5_HMAC:
2115 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2116 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2118 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2119 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2120 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2122 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2123 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2124 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2126 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2127 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2128 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2130 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2131 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2132 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2134 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2135 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2136 session->auth_key.algmode = OP_ALG_AAI_F9;
2138 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2139 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2140 session->auth_key.algmode = OP_ALG_AAI_F9;
2143 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2145 rte_free(session->auth_key.data);
2149 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2156 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2157 struct rte_crypto_sym_xform *xform,
2158 dpaa_sec_session *session)
2161 struct rte_crypto_cipher_xform *cipher_xform;
2162 struct rte_crypto_auth_xform *auth_xform;
2164 session->ctxt = DPAA_SEC_CIPHER_HASH;
2165 if (session->auth_cipher_text) {
2166 cipher_xform = &xform->cipher;
2167 auth_xform = &xform->next->auth;
2169 cipher_xform = &xform->next->cipher;
2170 auth_xform = &xform->auth;
2173 /* Set IV parameters */
2174 session->iv.offset = cipher_xform->iv.offset;
2175 session->iv.length = cipher_xform->iv.length;
2177 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2178 RTE_CACHE_LINE_SIZE);
2179 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2180 DPAA_SEC_ERR("No Memory for cipher key");
2183 session->cipher_key.length = cipher_xform->key.length;
2184 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2185 RTE_CACHE_LINE_SIZE);
2186 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2187 DPAA_SEC_ERR("No Memory for auth key");
2188 rte_free(session->cipher_key.data);
2191 session->auth_key.length = auth_xform->key.length;
2192 memcpy(session->cipher_key.data, cipher_xform->key.data,
2193 cipher_xform->key.length);
2194 memcpy(session->auth_key.data, auth_xform->key.data,
2195 auth_xform->key.length);
2197 session->digest_length = auth_xform->digest_length;
2198 session->auth_alg = auth_xform->algo;
2200 switch (auth_xform->algo) {
2201 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2202 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2203 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2205 case RTE_CRYPTO_AUTH_MD5_HMAC:
2206 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2207 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2209 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2210 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2211 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2213 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2214 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2215 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2217 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2218 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2219 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2221 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2222 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2223 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2226 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2231 session->cipher_alg = cipher_xform->algo;
2233 switch (cipher_xform->algo) {
2234 case RTE_CRYPTO_CIPHER_AES_CBC:
2235 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2236 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2238 case RTE_CRYPTO_CIPHER_3DES_CBC:
2239 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2240 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2242 case RTE_CRYPTO_CIPHER_AES_CTR:
2243 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2244 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2247 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2248 cipher_xform->algo);
2251 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2256 rte_free(session->cipher_key.data);
2257 rte_free(session->auth_key.data);
2262 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2263 struct rte_crypto_sym_xform *xform,
2264 dpaa_sec_session *session)
2266 session->aead_alg = xform->aead.algo;
2267 session->ctxt = DPAA_SEC_AEAD;
2268 session->iv.length = xform->aead.iv.length;
2269 session->iv.offset = xform->aead.iv.offset;
2270 session->auth_only_len = xform->aead.aad_length;
2271 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2272 RTE_CACHE_LINE_SIZE);
2273 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2274 DPAA_SEC_ERR("No Memory for aead key\n");
2277 session->aead_key.length = xform->aead.key.length;
2278 session->digest_length = xform->aead.digest_length;
2280 memcpy(session->aead_key.data, xform->aead.key.data,
2281 xform->aead.key.length);
2283 switch (session->aead_alg) {
2284 case RTE_CRYPTO_AEAD_AES_GCM:
2285 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2286 session->aead_key.algmode = OP_ALG_AAI_GCM;
2289 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2290 rte_free(session->aead_key.data);
2294 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2300 static struct qman_fq *
2301 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2305 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2306 if (qi->inq_attach[i] == 0) {
2307 qi->inq_attach[i] = 1;
2311 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2317 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2321 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2322 if (&qi->inq[i] == fq) {
2323 if (qman_retire_fq(fq, NULL) != 0)
2324 DPAA_SEC_WARN("Queue is not retired\n");
2326 qi->inq_attach[i] = 0;
2334 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2338 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2339 ret = dpaa_sec_prep_cdb(sess);
2341 DPAA_SEC_ERR("Unable to prepare sec cdb");
2344 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2345 ret = rte_dpaa_portal_init((void *)0);
2347 DPAA_SEC_ERR("Failure in affining portal");
2351 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2352 dpaa_mem_vtop(&sess->cdb),
2353 qman_fq_fqid(&qp->outq));
2355 DPAA_SEC_ERR("Unable to init sec queue");
2361 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2362 struct rte_crypto_sym_xform *xform, void *sess)
2364 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2365 dpaa_sec_session *session = sess;
2369 PMD_INIT_FUNC_TRACE();
2371 if (unlikely(sess == NULL)) {
2372 DPAA_SEC_ERR("invalid session struct");
2375 memset(session, 0, sizeof(dpaa_sec_session));
2377 /* Default IV length = 0 */
2378 session->iv.length = 0;
2381 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2382 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2383 ret = dpaa_sec_cipher_init(dev, xform, session);
2385 /* Authentication Only */
2386 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2387 xform->next == NULL) {
2388 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2389 session->ctxt = DPAA_SEC_AUTH;
2390 ret = dpaa_sec_auth_init(dev, xform, session);
2392 /* Cipher then Authenticate */
2393 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2394 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2395 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2396 session->auth_cipher_text = 1;
2397 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2398 ret = dpaa_sec_auth_init(dev, xform, session);
2399 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2400 ret = dpaa_sec_cipher_init(dev, xform, session);
2402 ret = dpaa_sec_chain_init(dev, xform, session);
2404 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2407 /* Authenticate then Cipher */
2408 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2409 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2410 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2411 session->auth_cipher_text = 0;
2412 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2413 ret = dpaa_sec_cipher_init(dev, xform, session);
2414 else if (xform->next->cipher.algo
2415 == RTE_CRYPTO_CIPHER_NULL)
2416 ret = dpaa_sec_auth_init(dev, xform, session);
2418 ret = dpaa_sec_chain_init(dev, xform, session);
2420 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2424 /* AEAD operation for AES-GCM kind of Algorithms */
2425 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2426 xform->next == NULL) {
2427 ret = dpaa_sec_aead_init(dev, xform, session);
2430 DPAA_SEC_ERR("Invalid crypto type");
2434 DPAA_SEC_ERR("unable to init session");
2438 rte_spinlock_lock(&internals->lock);
2439 for (i = 0; i < MAX_DPAA_CORES; i++) {
2440 session->inq[i] = dpaa_sec_attach_rxq(internals);
2441 if (session->inq[i] == NULL) {
2442 DPAA_SEC_ERR("unable to attach sec queue");
2443 rte_spinlock_unlock(&internals->lock);
2447 rte_spinlock_unlock(&internals->lock);
2452 rte_free(session->cipher_key.data);
2453 rte_free(session->auth_key.data);
2454 memset(session, 0, sizeof(dpaa_sec_session));
2460 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2461 struct rte_crypto_sym_xform *xform,
2462 struct rte_cryptodev_sym_session *sess,
2463 struct rte_mempool *mempool)
2465 void *sess_private_data;
2468 PMD_INIT_FUNC_TRACE();
2470 if (rte_mempool_get(mempool, &sess_private_data)) {
2471 DPAA_SEC_ERR("Couldn't get object from session mempool");
2475 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2477 DPAA_SEC_ERR("failed to configure session parameters");
2479 /* Return session to mempool */
2480 rte_mempool_put(mempool, sess_private_data);
2484 set_sym_session_private_data(sess, dev->driver_id,
2492 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2494 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2495 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2498 for (i = 0; i < MAX_DPAA_CORES; i++) {
2500 dpaa_sec_detach_rxq(qi, s->inq[i]);
2504 rte_free(s->cipher_key.data);
2505 rte_free(s->auth_key.data);
2506 memset(s, 0, sizeof(dpaa_sec_session));
2507 rte_mempool_put(sess_mp, (void *)s);
2510 /** Clear the memory of session so it doesn't leave key material behind */
2512 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2513 struct rte_cryptodev_sym_session *sess)
2515 PMD_INIT_FUNC_TRACE();
2516 uint8_t index = dev->driver_id;
2517 void *sess_priv = get_sym_session_private_data(sess, index);
2518 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2521 free_session_memory(dev, s);
2522 set_sym_session_private_data(sess, index, NULL);
2526 #ifdef RTE_LIBRTE_SECURITY
2528 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2529 struct rte_security_ipsec_xform *ipsec_xform,
2530 dpaa_sec_session *session)
2532 PMD_INIT_FUNC_TRACE();
2534 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2535 RTE_CACHE_LINE_SIZE);
2536 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2537 DPAA_SEC_ERR("No Memory for aead key");
2540 memcpy(session->aead_key.data, aead_xform->key.data,
2541 aead_xform->key.length);
2543 session->digest_length = aead_xform->digest_length;
2544 session->aead_key.length = aead_xform->key.length;
2546 switch (aead_xform->algo) {
2547 case RTE_CRYPTO_AEAD_AES_GCM:
2548 switch (session->digest_length) {
2550 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2553 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2556 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2559 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2560 session->digest_length);
2563 if (session->dir == DIR_ENC) {
2564 memcpy(session->encap_pdb.gcm.salt,
2565 (uint8_t *)&(ipsec_xform->salt), 4);
2567 memcpy(session->decap_pdb.gcm.salt,
2568 (uint8_t *)&(ipsec_xform->salt), 4);
2570 session->aead_key.algmode = OP_ALG_AAI_GCM;
2571 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2574 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2582 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2583 struct rte_crypto_auth_xform *auth_xform,
2584 struct rte_security_ipsec_xform *ipsec_xform,
2585 dpaa_sec_session *session)
2588 session->cipher_key.data = rte_zmalloc(NULL,
2589 cipher_xform->key.length,
2590 RTE_CACHE_LINE_SIZE);
2591 if (session->cipher_key.data == NULL &&
2592 cipher_xform->key.length > 0) {
2593 DPAA_SEC_ERR("No Memory for cipher key");
2597 session->cipher_key.length = cipher_xform->key.length;
2598 memcpy(session->cipher_key.data, cipher_xform->key.data,
2599 cipher_xform->key.length);
2600 session->cipher_alg = cipher_xform->algo;
2602 session->cipher_key.data = NULL;
2603 session->cipher_key.length = 0;
2604 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2608 session->auth_key.data = rte_zmalloc(NULL,
2609 auth_xform->key.length,
2610 RTE_CACHE_LINE_SIZE);
2611 if (session->auth_key.data == NULL &&
2612 auth_xform->key.length > 0) {
2613 DPAA_SEC_ERR("No Memory for auth key");
2616 session->auth_key.length = auth_xform->key.length;
2617 memcpy(session->auth_key.data, auth_xform->key.data,
2618 auth_xform->key.length);
2619 session->auth_alg = auth_xform->algo;
2620 session->digest_length = auth_xform->digest_length;
2622 session->auth_key.data = NULL;
2623 session->auth_key.length = 0;
2624 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2627 switch (session->auth_alg) {
2628 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2629 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2630 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2632 case RTE_CRYPTO_AUTH_MD5_HMAC:
2633 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2634 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2636 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2637 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2638 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2639 if (session->digest_length != 16)
2641 "+++Using sha256-hmac truncated len is non-standard,"
2642 "it will not work with lookaside proto");
2644 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2645 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2646 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2648 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2649 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2650 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2652 case RTE_CRYPTO_AUTH_AES_CMAC:
2653 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2655 case RTE_CRYPTO_AUTH_NULL:
2656 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2658 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2659 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2660 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2661 case RTE_CRYPTO_AUTH_SHA1:
2662 case RTE_CRYPTO_AUTH_SHA256:
2663 case RTE_CRYPTO_AUTH_SHA512:
2664 case RTE_CRYPTO_AUTH_SHA224:
2665 case RTE_CRYPTO_AUTH_SHA384:
2666 case RTE_CRYPTO_AUTH_MD5:
2667 case RTE_CRYPTO_AUTH_AES_GMAC:
2668 case RTE_CRYPTO_AUTH_KASUMI_F9:
2669 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2670 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2671 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2675 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2680 switch (session->cipher_alg) {
2681 case RTE_CRYPTO_CIPHER_AES_CBC:
2682 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2683 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2685 case RTE_CRYPTO_CIPHER_3DES_CBC:
2686 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2687 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2689 case RTE_CRYPTO_CIPHER_AES_CTR:
2690 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2691 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2692 if (session->dir == DIR_ENC) {
2693 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2694 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2696 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2697 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2700 case RTE_CRYPTO_CIPHER_NULL:
2701 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2703 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2704 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2705 case RTE_CRYPTO_CIPHER_3DES_ECB:
2706 case RTE_CRYPTO_CIPHER_AES_ECB:
2707 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2708 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2709 session->cipher_alg);
2712 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2713 session->cipher_alg);
2721 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2722 struct rte_security_session_conf *conf,
2725 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2726 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2727 struct rte_crypto_auth_xform *auth_xform = NULL;
2728 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2729 struct rte_crypto_aead_xform *aead_xform = NULL;
2730 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2734 PMD_INIT_FUNC_TRACE();
2736 memset(session, 0, sizeof(dpaa_sec_session));
2737 session->proto_alg = conf->protocol;
2738 session->ctxt = DPAA_SEC_IPSEC;
2740 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2741 session->dir = DIR_ENC;
2743 session->dir = DIR_DEC;
2745 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2746 cipher_xform = &conf->crypto_xform->cipher;
2747 if (conf->crypto_xform->next)
2748 auth_xform = &conf->crypto_xform->next->auth;
2749 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2750 ipsec_xform, session);
2751 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2752 auth_xform = &conf->crypto_xform->auth;
2753 if (conf->crypto_xform->next)
2754 cipher_xform = &conf->crypto_xform->next->cipher;
2755 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2756 ipsec_xform, session);
2757 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2758 aead_xform = &conf->crypto_xform->aead;
2759 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2760 ipsec_xform, session);
2762 DPAA_SEC_ERR("XFORM not specified");
2767 DPAA_SEC_ERR("Failed to process xform");
2771 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2772 if (ipsec_xform->tunnel.type ==
2773 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2774 session->ip4_hdr.ip_v = IPVERSION;
2775 session->ip4_hdr.ip_hl = 5;
2776 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2777 sizeof(session->ip4_hdr));
2778 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2779 session->ip4_hdr.ip_id = 0;
2780 session->ip4_hdr.ip_off = 0;
2781 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2782 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2783 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2784 IPPROTO_ESP : IPPROTO_AH;
2785 session->ip4_hdr.ip_sum = 0;
2786 session->ip4_hdr.ip_src =
2787 ipsec_xform->tunnel.ipv4.src_ip;
2788 session->ip4_hdr.ip_dst =
2789 ipsec_xform->tunnel.ipv4.dst_ip;
2790 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2791 (void *)&session->ip4_hdr,
2793 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2794 } else if (ipsec_xform->tunnel.type ==
2795 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2796 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2797 DPAA_IPv6_DEFAULT_VTC_FLOW |
2798 ((ipsec_xform->tunnel.ipv6.dscp <<
2799 RTE_IPV6_HDR_TC_SHIFT) &
2800 RTE_IPV6_HDR_TC_MASK) |
2801 ((ipsec_xform->tunnel.ipv6.flabel <<
2802 RTE_IPV6_HDR_FL_SHIFT) &
2803 RTE_IPV6_HDR_FL_MASK));
2804 /* Payload length will be updated by HW */
2805 session->ip6_hdr.payload_len = 0;
2806 session->ip6_hdr.hop_limits =
2807 ipsec_xform->tunnel.ipv6.hlimit;
2808 session->ip6_hdr.proto = (ipsec_xform->proto ==
2809 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2810 IPPROTO_ESP : IPPROTO_AH;
2811 memcpy(&session->ip6_hdr.src_addr,
2812 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2813 memcpy(&session->ip6_hdr.dst_addr,
2814 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2815 session->encap_pdb.ip_hdr_len =
2816 sizeof(struct rte_ipv6_hdr);
2818 session->encap_pdb.options =
2819 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2820 PDBOPTS_ESP_OIHI_PDB_INL |
2822 PDBHMO_ESP_ENCAP_DTTL |
2824 if (ipsec_xform->options.esn)
2825 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2826 session->encap_pdb.spi = ipsec_xform->spi;
2828 } else if (ipsec_xform->direction ==
2829 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2830 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2831 session->decap_pdb.options = sizeof(struct ip) << 16;
2833 session->decap_pdb.options =
2834 sizeof(struct rte_ipv6_hdr) << 16;
2835 if (ipsec_xform->options.esn)
2836 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2839 rte_spinlock_lock(&internals->lock);
2840 for (i = 0; i < MAX_DPAA_CORES; i++) {
2841 session->inq[i] = dpaa_sec_attach_rxq(internals);
2842 if (session->inq[i] == NULL) {
2843 DPAA_SEC_ERR("unable to attach sec queue");
2844 rte_spinlock_unlock(&internals->lock);
2848 rte_spinlock_unlock(&internals->lock);
2852 rte_free(session->auth_key.data);
2853 rte_free(session->cipher_key.data);
2854 memset(session, 0, sizeof(dpaa_sec_session));
2859 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2860 struct rte_security_session_conf *conf,
2863 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2864 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2865 struct rte_crypto_auth_xform *auth_xform = NULL;
2866 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2867 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2868 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2871 PMD_INIT_FUNC_TRACE();
2873 memset(session, 0, sizeof(dpaa_sec_session));
2875 /* find xfrm types */
2876 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2877 cipher_xform = &xform->cipher;
2878 if (xform->next != NULL)
2879 auth_xform = &xform->next->auth;
2880 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2881 auth_xform = &xform->auth;
2882 if (xform->next != NULL)
2883 cipher_xform = &xform->next->cipher;
2885 DPAA_SEC_ERR("Invalid crypto type");
2889 session->proto_alg = conf->protocol;
2890 session->ctxt = DPAA_SEC_PDCP;
2893 switch (cipher_xform->algo) {
2894 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2895 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2897 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2898 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2900 case RTE_CRYPTO_CIPHER_AES_CTR:
2901 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2903 case RTE_CRYPTO_CIPHER_NULL:
2904 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2907 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2908 session->cipher_alg);
2912 session->cipher_key.data = rte_zmalloc(NULL,
2913 cipher_xform->key.length,
2914 RTE_CACHE_LINE_SIZE);
2915 if (session->cipher_key.data == NULL &&
2916 cipher_xform->key.length > 0) {
2917 DPAA_SEC_ERR("No Memory for cipher key");
2920 session->cipher_key.length = cipher_xform->key.length;
2921 memcpy(session->cipher_key.data, cipher_xform->key.data,
2922 cipher_xform->key.length);
2923 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2925 session->cipher_alg = cipher_xform->algo;
2927 session->cipher_key.data = NULL;
2928 session->cipher_key.length = 0;
2929 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2930 session->dir = DIR_ENC;
2933 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2934 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2935 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2937 "PDCP Seq Num size should be 5/12 bits for cmode");
2943 switch (auth_xform->algo) {
2944 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2945 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2947 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2948 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2950 case RTE_CRYPTO_AUTH_AES_CMAC:
2951 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2953 case RTE_CRYPTO_AUTH_NULL:
2954 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2957 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2959 rte_free(session->cipher_key.data);
2962 session->auth_key.data = rte_zmalloc(NULL,
2963 auth_xform->key.length,
2964 RTE_CACHE_LINE_SIZE);
2965 if (!session->auth_key.data &&
2966 auth_xform->key.length > 0) {
2967 DPAA_SEC_ERR("No Memory for auth key");
2968 rte_free(session->cipher_key.data);
2971 session->auth_key.length = auth_xform->key.length;
2972 memcpy(session->auth_key.data, auth_xform->key.data,
2973 auth_xform->key.length);
2974 session->auth_alg = auth_xform->algo;
2976 session->auth_key.data = NULL;
2977 session->auth_key.length = 0;
2978 session->auth_alg = 0;
2980 session->pdcp.domain = pdcp_xform->domain;
2981 session->pdcp.bearer = pdcp_xform->bearer;
2982 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2983 session->pdcp.sn_size = pdcp_xform->sn_size;
2984 session->pdcp.hfn = pdcp_xform->hfn;
2985 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2986 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2987 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2989 rte_spinlock_lock(&dev_priv->lock);
2990 for (i = 0; i < MAX_DPAA_CORES; i++) {
2991 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2992 if (session->inq[i] == NULL) {
2993 DPAA_SEC_ERR("unable to attach sec queue");
2994 rte_spinlock_unlock(&dev_priv->lock);
2998 rte_spinlock_unlock(&dev_priv->lock);
3001 rte_free(session->auth_key.data);
3002 rte_free(session->cipher_key.data);
3003 memset(session, 0, sizeof(dpaa_sec_session));
3008 dpaa_sec_security_session_create(void *dev,
3009 struct rte_security_session_conf *conf,
3010 struct rte_security_session *sess,
3011 struct rte_mempool *mempool)
3013 void *sess_private_data;
3014 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3017 if (rte_mempool_get(mempool, &sess_private_data)) {
3018 DPAA_SEC_ERR("Couldn't get object from session mempool");
3022 switch (conf->protocol) {
3023 case RTE_SECURITY_PROTOCOL_IPSEC:
3024 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3027 case RTE_SECURITY_PROTOCOL_PDCP:
3028 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3031 case RTE_SECURITY_PROTOCOL_MACSEC:
3037 DPAA_SEC_ERR("failed to configure session parameters");
3038 /* Return session to mempool */
3039 rte_mempool_put(mempool, sess_private_data);
3043 set_sec_session_private_data(sess, sess_private_data);
3048 /** Clear the memory of session so it doesn't leave key material behind */
3050 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3051 struct rte_security_session *sess)
3053 PMD_INIT_FUNC_TRACE();
3054 void *sess_priv = get_sec_session_private_data(sess);
3055 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3058 free_session_memory((struct rte_cryptodev *)dev, s);
3059 set_sec_session_private_data(sess, NULL);
3065 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3066 struct rte_cryptodev_config *config __rte_unused)
3068 PMD_INIT_FUNC_TRACE();
3074 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3076 PMD_INIT_FUNC_TRACE();
3081 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3083 PMD_INIT_FUNC_TRACE();
3087 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3089 PMD_INIT_FUNC_TRACE();
3098 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3099 struct rte_cryptodev_info *info)
3101 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3103 PMD_INIT_FUNC_TRACE();
3105 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3106 info->feature_flags = dev->feature_flags;
3107 info->capabilities = dpaa_sec_capabilities;
3108 info->sym.max_nb_sessions = internals->max_nb_sessions;
3109 info->driver_id = cryptodev_driver_id;
3113 static enum qman_cb_dqrr_result
3114 dpaa_sec_process_parallel_event(void *event,
3115 struct qman_portal *qm __always_unused,
3116 struct qman_fq *outq,
3117 const struct qm_dqrr_entry *dqrr,
3120 const struct qm_fd *fd;
3121 struct dpaa_sec_job *job;
3122 struct dpaa_sec_op_ctx *ctx;
3123 struct rte_event *ev = (struct rte_event *)event;
3127 /* sg is embedded in an op ctx,
3128 * sg[0] is for output
3131 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3133 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3134 ctx->fd_status = fd->status;
3135 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3136 struct qm_sg_entry *sg_out;
3139 sg_out = &job->sg[0];
3140 hw_sg_to_cpu(sg_out);
3141 len = sg_out->length;
3142 ctx->op->sym->m_src->pkt_len = len;
3143 ctx->op->sym->m_src->data_len = len;
3145 if (!ctx->fd_status) {
3146 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3148 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3149 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3151 ev->event_ptr = (void *)ctx->op;
3153 ev->flow_id = outq->ev.flow_id;
3154 ev->sub_event_type = outq->ev.sub_event_type;
3155 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3156 ev->op = RTE_EVENT_OP_NEW;
3157 ev->sched_type = outq->ev.sched_type;
3158 ev->queue_id = outq->ev.queue_id;
3159 ev->priority = outq->ev.priority;
3160 *bufs = (void *)ctx->op;
3162 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3164 return qman_cb_dqrr_consume;
3167 static enum qman_cb_dqrr_result
3168 dpaa_sec_process_atomic_event(void *event,
3169 struct qman_portal *qm __rte_unused,
3170 struct qman_fq *outq,
3171 const struct qm_dqrr_entry *dqrr,
3175 const struct qm_fd *fd;
3176 struct dpaa_sec_job *job;
3177 struct dpaa_sec_op_ctx *ctx;
3178 struct rte_event *ev = (struct rte_event *)event;
3182 /* sg is embedded in an op ctx,
3183 * sg[0] is for output
3186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3189 ctx->fd_status = fd->status;
3190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3191 struct qm_sg_entry *sg_out;
3194 sg_out = &job->sg[0];
3195 hw_sg_to_cpu(sg_out);
3196 len = sg_out->length;
3197 ctx->op->sym->m_src->pkt_len = len;
3198 ctx->op->sym->m_src->data_len = len;
3200 if (!ctx->fd_status) {
3201 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3203 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3204 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3206 ev->event_ptr = (void *)ctx->op;
3207 ev->flow_id = outq->ev.flow_id;
3208 ev->sub_event_type = outq->ev.sub_event_type;
3209 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3210 ev->op = RTE_EVENT_OP_NEW;
3211 ev->sched_type = outq->ev.sched_type;
3212 ev->queue_id = outq->ev.queue_id;
3213 ev->priority = outq->ev.priority;
3215 /* Save active dqrr entries */
3216 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3217 DPAA_PER_LCORE_DQRR_SIZE++;
3218 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3219 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3220 ev->impl_opaque = index + 1;
3221 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3222 *bufs = (void *)ctx->op;
3224 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3226 return qman_cb_dqrr_defer;
3230 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3233 const struct rte_event *event)
3235 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3236 struct qm_mcc_initfq opts = {0};
3240 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3241 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3242 opts.fqd.dest.channel = ch_id;
3244 switch (event->sched_type) {
3245 case RTE_SCHED_TYPE_ATOMIC:
3246 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3247 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3248 * configuration with HOLD_ACTIVE setting
3250 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3251 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3253 case RTE_SCHED_TYPE_ORDERED:
3254 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3257 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3258 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3262 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3263 if (unlikely(ret)) {
3264 DPAA_SEC_ERR("unable to init caam source fq!");
3268 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3274 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3277 struct qm_mcc_initfq opts = {0};
3279 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3281 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3282 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3283 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3284 qp->outq.cb.ern = ern_sec_fq_handler;
3285 qman_retire_fq(&qp->outq, NULL);
3286 qman_oos_fq(&qp->outq);
3287 ret = qman_init_fq(&qp->outq, 0, &opts);
3289 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3290 qp->outq.cb.dqrr = NULL;
3295 static struct rte_cryptodev_ops crypto_ops = {
3296 .dev_configure = dpaa_sec_dev_configure,
3297 .dev_start = dpaa_sec_dev_start,
3298 .dev_stop = dpaa_sec_dev_stop,
3299 .dev_close = dpaa_sec_dev_close,
3300 .dev_infos_get = dpaa_sec_dev_infos_get,
3301 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3302 .queue_pair_release = dpaa_sec_queue_pair_release,
3303 .queue_pair_count = dpaa_sec_queue_pair_count,
3304 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3305 .sym_session_configure = dpaa_sec_sym_session_configure,
3306 .sym_session_clear = dpaa_sec_sym_session_clear
3309 #ifdef RTE_LIBRTE_SECURITY
3310 static const struct rte_security_capability *
3311 dpaa_sec_capabilities_get(void *device __rte_unused)
3313 return dpaa_sec_security_cap;
3316 static const struct rte_security_ops dpaa_sec_security_ops = {
3317 .session_create = dpaa_sec_security_session_create,
3318 .session_update = NULL,
3319 .session_stats_get = NULL,
3320 .session_destroy = dpaa_sec_security_session_destroy,
3321 .set_pkt_metadata = NULL,
3322 .capabilities_get = dpaa_sec_capabilities_get
3326 dpaa_sec_uninit(struct rte_cryptodev *dev)
3328 struct dpaa_sec_dev_private *internals;
3333 internals = dev->data->dev_private;
3334 rte_free(dev->security_ctx);
3336 rte_free(internals);
3338 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3339 dev->data->name, rte_socket_id());
3345 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3347 struct dpaa_sec_dev_private *internals;
3348 #ifdef RTE_LIBRTE_SECURITY
3349 struct rte_security_ctx *security_instance;
3351 struct dpaa_sec_qp *qp;
3355 PMD_INIT_FUNC_TRACE();
3357 cryptodev->driver_id = cryptodev_driver_id;
3358 cryptodev->dev_ops = &crypto_ops;
3360 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3361 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3362 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3363 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3364 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3365 RTE_CRYPTODEV_FF_SECURITY |
3366 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3367 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3368 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3369 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3370 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3372 internals = cryptodev->data->dev_private;
3373 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3374 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3377 * For secondary processes, we don't initialise any further as primary
3378 * has already done this work. Only check we don't need a different
3381 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3382 DPAA_SEC_WARN("Device already init by primary process");
3385 #ifdef RTE_LIBRTE_SECURITY
3386 /* Initialize security_ctx only for primary process*/
3387 security_instance = rte_malloc("rte_security_instances_ops",
3388 sizeof(struct rte_security_ctx), 0);
3389 if (security_instance == NULL)
3391 security_instance->device = (void *)cryptodev;
3392 security_instance->ops = &dpaa_sec_security_ops;
3393 security_instance->sess_cnt = 0;
3394 cryptodev->security_ctx = security_instance;
3396 rte_spinlock_init(&internals->lock);
3397 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3398 /* init qman fq for queue pair */
3399 qp = &internals->qps[i];
3400 ret = dpaa_sec_init_tx(&qp->outq);
3402 DPAA_SEC_ERR("config tx of queue pair %d", i);
3407 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3408 QMAN_FQ_FLAG_TO_DCPORTAL;
3409 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3410 /* create rx qman fq for sessions*/
3411 ret = qman_create_fq(0, flags, &internals->inq[i]);
3412 if (unlikely(ret != 0)) {
3413 DPAA_SEC_ERR("sec qman_create_fq failed");
3418 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3422 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3424 dpaa_sec_uninit(cryptodev);
3429 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3430 struct rte_dpaa_device *dpaa_dev)
3432 struct rte_cryptodev *cryptodev;
3433 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3437 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3439 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3440 if (cryptodev == NULL)
3443 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3444 cryptodev->data->dev_private = rte_zmalloc_socket(
3445 "cryptodev private structure",
3446 sizeof(struct dpaa_sec_dev_private),
3447 RTE_CACHE_LINE_SIZE,
3450 if (cryptodev->data->dev_private == NULL)
3451 rte_panic("Cannot allocate memzone for private "
3455 dpaa_dev->crypto_dev = cryptodev;
3456 cryptodev->device = &dpaa_dev->device;
3458 /* init user callbacks */
3459 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3461 /* if sec device version is not configured */
3462 if (!rta_get_sec_era()) {
3463 const struct device_node *caam_node;
3465 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3466 const uint32_t *prop = of_get_property(caam_node,
3471 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3477 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3478 retval = rte_dpaa_portal_init((void *)1);
3480 DPAA_SEC_ERR("Unable to initialize portal");
3485 /* Invoke PMD device initialization function */
3486 retval = dpaa_sec_dev_init(cryptodev);
3490 /* In case of error, cleanup is done */
3491 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3492 rte_free(cryptodev->data->dev_private);
3494 rte_cryptodev_pmd_release_device(cryptodev);
3500 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3502 struct rte_cryptodev *cryptodev;
3505 cryptodev = dpaa_dev->crypto_dev;
3506 if (cryptodev == NULL)
3509 ret = dpaa_sec_uninit(cryptodev);
3513 return rte_cryptodev_pmd_destroy(cryptodev);
3516 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3517 .drv_type = FSL_DPAA_CRYPTO,
3519 .name = "DPAA SEC PMD"
3521 .probe = cryptodev_dpaa_sec_probe,
3522 .remove = cryptodev_dpaa_sec_remove,
3525 static struct cryptodev_driver dpaa_sec_crypto_drv;
3527 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3528 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3529 cryptodev_driver_id);
3531 RTE_INIT(dpaa_sec_init_log)
3533 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3534 if (dpaa_logtype_sec >= 0)
3535 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);