1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 enum rta_sec_era rta_sec_era;
50 static uint8_t cryptodev_driver_id;
52 static __thread struct rte_crypto_op **dpaa_sec_ops;
53 static __thread int dpaa_sec_op_nb;
56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
61 if (!ctx->fd_status) {
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(
76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 * each packet, memset is costlier than dcbz_64().
88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 dcbz_64(&ctx->job.sg[i]);
91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
111 dpaa_mem_ptov(rte_iova_t paddr)
115 va = (void *)dpaax_iova_table_get_va(paddr);
119 return rte_mem_iova2virt(paddr);
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125 const struct qm_mr_entry *msg)
127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 fq->fqid, msg->ern.rc, msg->ern.seqnum);
131 /* initialize the queue with dest chan as caam chan so that
132 * all the packets in this queue could be dispatched into caam
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
138 struct qm_mcc_initfq fq_opts;
142 /* Clear FQ options */
143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145 flags = QMAN_INITFQ_FLAG_SCHED;
146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 QM_INITFQ_WE_CONTEXTB;
149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 fq_opts.fqd.context_b = fqid_out;
151 fq_opts.fqd.dest.channel = qm_channel_caam;
152 fq_opts.fqd.dest.wq = 0;
154 fq_in->cb.ern = ern_sec_fq_handler;
156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158 ret = qman_init_fq(fq_in, flags, &fq_opts);
159 if (unlikely(ret != 0))
160 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 struct qman_fq *fq __always_unused,
169 const struct qm_dqrr_entry *dqrr)
171 const struct qm_fd *fd;
172 struct dpaa_sec_job *job;
173 struct dpaa_sec_op_ctx *ctx;
175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 return qman_cb_dqrr_defer;
178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 return qman_cb_dqrr_consume;
182 /* sg is embedded in an op ctx,
183 * sg[0] is for output
186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 ctx->fd_status = fd->status;
190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 struct qm_sg_entry *sg_out;
193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196 sg_out = &job->sg[0];
197 hw_sg_to_cpu(sg_out);
198 len = sg_out->length;
200 while (mbuf->next != NULL) {
201 len -= mbuf->data_len;
204 mbuf->data_len = len;
206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 dpaa_sec_op_ending(ctx);
209 return qman_cb_dqrr_consume;
212 /* caam result is put into this queue */
214 dpaa_sec_init_tx(struct qman_fq *fq)
217 struct qm_mcc_initfq opts;
220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 QMAN_FQ_FLAG_DYNAMIC_FQID;
223 ret = qman_create_fq(0, flags, fq);
225 DPAA_SEC_ERR("qman_create_fq failed");
229 memset(&opts, 0, sizeof(opts));
230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235 fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 fq->cb.ern = ern_sec_fq_handler;
238 ret = qman_init_fq(fq, 0, &opts);
240 DPAA_SEC_ERR("unable to init caam source fq!");
247 static inline int is_encode(dpaa_sec_session *ses)
249 return ses->dir == DIR_ENC;
252 static inline int is_decode(dpaa_sec_session *ses)
254 return ses->dir == DIR_DEC;
257 #ifdef RTE_LIBRTE_SECURITY
259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
261 struct alginfo authdata = {0}, cipherdata = {0};
262 struct sec_cdb *cdb = &ses->cdb;
263 struct alginfo *p_authdata = NULL;
264 int32_t shared_desc_len = 0;
266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
272 cipherdata.key = (size_t)ses->cipher_key.data;
273 cipherdata.keylen = ses->cipher_key.length;
274 cipherdata.key_enc_flags = 0;
275 cipherdata.key_type = RTA_DATA_IMM;
276 cipherdata.algtype = ses->cipher_key.alg;
277 cipherdata.algmode = ses->cipher_key.algmode;
279 cdb->sh_desc[0] = cipherdata.keylen;
284 authdata.key = (size_t)ses->auth_key.data;
285 authdata.keylen = ses->auth_key.length;
286 authdata.key_enc_flags = 0;
287 authdata.key_type = RTA_DATA_IMM;
288 authdata.algtype = ses->auth_key.alg;
289 authdata.algmode = ses->auth_key.algmode;
291 p_authdata = &authdata;
293 cdb->sh_desc[1] = authdata.keylen;
296 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
298 (unsigned int *)cdb->sh_desc,
299 &cdb->sh_desc[2], 2);
301 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
305 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
307 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
308 cipherdata.key_type = RTA_DATA_PTR;
310 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
312 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
313 authdata.key_type = RTA_DATA_PTR;
320 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
321 if (ses->dir == DIR_ENC)
322 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, &authdata,
331 else if (ses->dir == DIR_DEC)
332 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, &authdata,
342 if (ses->dir == DIR_ENC)
343 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
344 cdb->sh_desc, 1, swap,
349 ses->pdcp.hfn_threshold,
350 &cipherdata, p_authdata, 0);
351 else if (ses->dir == DIR_DEC)
352 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
353 cdb->sh_desc, 1, swap,
358 ses->pdcp.hfn_threshold,
359 &cipherdata, p_authdata, 0);
361 return shared_desc_len;
364 /* prepare ipsec proto command block of the session */
366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
368 struct alginfo cipherdata = {0}, authdata = {0};
369 struct sec_cdb *cdb = &ses->cdb;
370 int32_t shared_desc_len = 0;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 cipherdata.key = (size_t)ses->cipher_key.data;
379 cipherdata.keylen = ses->cipher_key.length;
380 cipherdata.key_enc_flags = 0;
381 cipherdata.key_type = RTA_DATA_IMM;
382 cipherdata.algtype = ses->cipher_key.alg;
383 cipherdata.algmode = ses->cipher_key.algmode;
385 authdata.key = (size_t)ses->auth_key.data;
386 authdata.keylen = ses->auth_key.length;
387 authdata.key_enc_flags = 0;
388 authdata.key_type = RTA_DATA_IMM;
389 authdata.algtype = ses->auth_key.alg;
390 authdata.algmode = ses->auth_key.algmode;
392 cdb->sh_desc[0] = cipherdata.keylen;
393 cdb->sh_desc[1] = authdata.keylen;
394 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
396 (unsigned int *)cdb->sh_desc,
397 &cdb->sh_desc[2], 2);
400 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
403 if (cdb->sh_desc[2] & 1)
404 cipherdata.key_type = RTA_DATA_IMM;
406 cipherdata.key = (size_t)dpaa_mem_vtop(
407 (void *)(size_t)cipherdata.key);
408 cipherdata.key_type = RTA_DATA_PTR;
410 if (cdb->sh_desc[2] & (1<<1))
411 authdata.key_type = RTA_DATA_IMM;
413 authdata.key = (size_t)dpaa_mem_vtop(
414 (void *)(size_t)authdata.key);
415 authdata.key_type = RTA_DATA_PTR;
421 if (ses->dir == DIR_ENC) {
422 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
424 true, swap, SHR_SERIAL,
426 (uint8_t *)&ses->ip4_hdr,
427 &cipherdata, &authdata);
428 } else if (ses->dir == DIR_DEC) {
429 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
431 true, swap, SHR_SERIAL,
433 &cipherdata, &authdata);
435 return shared_desc_len;
438 /* prepare command block of the session */
440 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
442 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
443 int32_t shared_desc_len = 0;
444 struct sec_cdb *cdb = &ses->cdb;
446 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
452 memset(cdb, 0, sizeof(struct sec_cdb));
455 #ifdef RTE_LIBRTE_SECURITY
457 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
460 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
463 case DPAA_SEC_CIPHER:
464 alginfo_c.key = (size_t)ses->cipher_key.data;
465 alginfo_c.keylen = ses->cipher_key.length;
466 alginfo_c.key_enc_flags = 0;
467 alginfo_c.key_type = RTA_DATA_IMM;
468 alginfo_c.algtype = ses->cipher_key.alg;
469 alginfo_c.algmode = ses->cipher_key.algmode;
471 switch (ses->cipher_alg) {
472 case RTE_CRYPTO_CIPHER_AES_CBC:
473 case RTE_CRYPTO_CIPHER_3DES_CBC:
474 case RTE_CRYPTO_CIPHER_AES_CTR:
475 case RTE_CRYPTO_CIPHER_3DES_CTR:
476 shared_desc_len = cnstr_shdsc_blkcipher(
478 swap, SHR_NEVER, &alginfo_c,
483 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
484 shared_desc_len = cnstr_shdsc_snow_f8(
485 cdb->sh_desc, true, swap,
489 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
490 shared_desc_len = cnstr_shdsc_zuce(
491 cdb->sh_desc, true, swap,
496 DPAA_SEC_ERR("unsupported cipher alg %d",
502 alginfo_a.key = (size_t)ses->auth_key.data;
503 alginfo_a.keylen = ses->auth_key.length;
504 alginfo_a.key_enc_flags = 0;
505 alginfo_a.key_type = RTA_DATA_IMM;
506 alginfo_a.algtype = ses->auth_key.alg;
507 alginfo_a.algmode = ses->auth_key.algmode;
508 switch (ses->auth_alg) {
509 case RTE_CRYPTO_AUTH_MD5_HMAC:
510 case RTE_CRYPTO_AUTH_SHA1_HMAC:
511 case RTE_CRYPTO_AUTH_SHA224_HMAC:
512 case RTE_CRYPTO_AUTH_SHA256_HMAC:
513 case RTE_CRYPTO_AUTH_SHA384_HMAC:
514 case RTE_CRYPTO_AUTH_SHA512_HMAC:
515 shared_desc_len = cnstr_shdsc_hmac(
517 swap, SHR_NEVER, &alginfo_a,
521 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
522 shared_desc_len = cnstr_shdsc_snow_f9(
523 cdb->sh_desc, true, swap,
528 case RTE_CRYPTO_AUTH_ZUC_EIA3:
529 shared_desc_len = cnstr_shdsc_zuca(
530 cdb->sh_desc, true, swap,
536 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
540 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
541 DPAA_SEC_ERR("not supported aead alg");
544 alginfo.key = (size_t)ses->aead_key.data;
545 alginfo.keylen = ses->aead_key.length;
546 alginfo.key_enc_flags = 0;
547 alginfo.key_type = RTA_DATA_IMM;
548 alginfo.algtype = ses->aead_key.alg;
549 alginfo.algmode = ses->aead_key.algmode;
551 if (ses->dir == DIR_ENC)
552 shared_desc_len = cnstr_shdsc_gcm_encap(
553 cdb->sh_desc, true, swap, SHR_NEVER,
558 shared_desc_len = cnstr_shdsc_gcm_decap(
559 cdb->sh_desc, true, swap, SHR_NEVER,
564 case DPAA_SEC_CIPHER_HASH:
565 alginfo_c.key = (size_t)ses->cipher_key.data;
566 alginfo_c.keylen = ses->cipher_key.length;
567 alginfo_c.key_enc_flags = 0;
568 alginfo_c.key_type = RTA_DATA_IMM;
569 alginfo_c.algtype = ses->cipher_key.alg;
570 alginfo_c.algmode = ses->cipher_key.algmode;
572 alginfo_a.key = (size_t)ses->auth_key.data;
573 alginfo_a.keylen = ses->auth_key.length;
574 alginfo_a.key_enc_flags = 0;
575 alginfo_a.key_type = RTA_DATA_IMM;
576 alginfo_a.algtype = ses->auth_key.alg;
577 alginfo_a.algmode = ses->auth_key.algmode;
579 cdb->sh_desc[0] = alginfo_c.keylen;
580 cdb->sh_desc[1] = alginfo_a.keylen;
581 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
583 (unsigned int *)cdb->sh_desc,
584 &cdb->sh_desc[2], 2);
587 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
590 if (cdb->sh_desc[2] & 1)
591 alginfo_c.key_type = RTA_DATA_IMM;
593 alginfo_c.key = (size_t)dpaa_mem_vtop(
594 (void *)(size_t)alginfo_c.key);
595 alginfo_c.key_type = RTA_DATA_PTR;
597 if (cdb->sh_desc[2] & (1<<1))
598 alginfo_a.key_type = RTA_DATA_IMM;
600 alginfo_a.key = (size_t)dpaa_mem_vtop(
601 (void *)(size_t)alginfo_a.key);
602 alginfo_a.key_type = RTA_DATA_PTR;
607 /* Auth_only_len is set as 0 here and it will be
608 * overwritten in fd for each packet.
610 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
611 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
613 ses->digest_length, ses->dir);
615 case DPAA_SEC_HASH_CIPHER:
617 DPAA_SEC_ERR("error: Unsupported session");
621 if (shared_desc_len < 0) {
622 DPAA_SEC_ERR("error in preparing command block");
623 return shared_desc_len;
626 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
627 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
628 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
633 /* qp is lockless, should be accessed by only one thread */
635 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
638 unsigned int pkts = 0;
639 int num_rx_bufs, ret;
640 struct qm_dqrr_entry *dq;
641 uint32_t vdqcr_flags = 0;
645 * Until request for four buffers, we provide exact number of buffers.
646 * Otherwise we do not set the QM_VDQCR_EXACT flag.
647 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
648 * requested, so we request two less in this case.
651 vdqcr_flags = QM_VDQCR_EXACT;
652 num_rx_bufs = nb_ops;
654 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
655 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
657 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
662 const struct qm_fd *fd;
663 struct dpaa_sec_job *job;
664 struct dpaa_sec_op_ctx *ctx;
665 struct rte_crypto_op *op;
667 dq = qman_dequeue(fq);
672 /* sg is embedded in an op ctx,
673 * sg[0] is for output
676 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
678 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
679 ctx->fd_status = fd->status;
681 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
682 struct qm_sg_entry *sg_out;
684 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
685 op->sym->m_src : op->sym->m_dst;
687 sg_out = &job->sg[0];
688 hw_sg_to_cpu(sg_out);
689 len = sg_out->length;
691 while (mbuf->next != NULL) {
692 len -= mbuf->data_len;
695 mbuf->data_len = len;
697 if (!ctx->fd_status) {
698 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
700 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
701 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
705 /* report op status to sym->op and then free the ctx memeory */
706 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
708 qman_dqrr_consume(fq, dq);
709 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
714 static inline struct dpaa_sec_job *
715 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
717 struct rte_crypto_sym_op *sym = op->sym;
718 struct rte_mbuf *mbuf = sym->m_src;
719 struct dpaa_sec_job *cf;
720 struct dpaa_sec_op_ctx *ctx;
721 struct qm_sg_entry *sg, *out_sg, *in_sg;
722 phys_addr_t start_addr;
723 uint8_t *old_digest, extra_segs;
724 int data_len, data_offset;
726 data_len = sym->auth.data.length;
727 data_offset = sym->auth.data.offset;
729 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
730 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
731 if ((data_len & 7) || (data_offset & 7)) {
732 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
736 data_len = data_len >> 3;
737 data_offset = data_offset >> 3;
745 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
746 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
750 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
756 old_digest = ctx->digest;
760 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
761 out_sg->length = ses->digest_length;
762 cpu_to_hw_sg(out_sg);
766 /* need to extend the input to a compound frame */
767 in_sg->extension = 1;
769 in_sg->length = data_len;
770 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
775 if (ses->iv.length) {
778 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
781 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
782 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
784 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
785 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
788 sg->length = ses->iv.length;
790 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
791 in_sg->length += sg->length;
796 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
797 sg->offset = data_offset;
799 if (data_len <= (mbuf->data_len - data_offset)) {
800 sg->length = data_len;
802 sg->length = mbuf->data_len - data_offset;
804 /* remaining i/p segs */
805 while ((data_len = data_len - sg->length) &&
806 (mbuf = mbuf->next)) {
809 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
810 if (data_len > mbuf->data_len)
811 sg->length = mbuf->data_len;
813 sg->length = data_len;
817 if (is_decode(ses)) {
818 /* Digest verification case */
821 rte_memcpy(old_digest, sym->auth.digest.data,
823 start_addr = dpaa_mem_vtop(old_digest);
824 qm_sg_entry_set64(sg, start_addr);
825 sg->length = ses->digest_length;
826 in_sg->length += ses->digest_length;
837 * |<----data_len------->|
838 * |ip_header|ah_header|icv|payload|
843 static inline struct dpaa_sec_job *
844 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
846 struct rte_crypto_sym_op *sym = op->sym;
847 struct rte_mbuf *mbuf = sym->m_src;
848 struct dpaa_sec_job *cf;
849 struct dpaa_sec_op_ctx *ctx;
850 struct qm_sg_entry *sg, *in_sg;
851 rte_iova_t start_addr;
853 int data_len, data_offset;
855 data_len = sym->auth.data.length;
856 data_offset = sym->auth.data.offset;
858 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
859 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
860 if ((data_len & 7) || (data_offset & 7)) {
861 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
865 data_len = data_len >> 3;
866 data_offset = data_offset >> 3;
869 ctx = dpaa_sec_alloc_ctx(ses, 4);
875 old_digest = ctx->digest;
877 start_addr = rte_pktmbuf_iova(mbuf);
880 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
881 sg->length = ses->digest_length;
886 /* need to extend the input to a compound frame */
887 in_sg->extension = 1;
889 in_sg->length = data_len;
890 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
893 if (ses->iv.length) {
896 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
899 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
900 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
902 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
903 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
906 sg->length = ses->iv.length;
908 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
909 in_sg->length += sg->length;
914 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
915 sg->offset = data_offset;
916 sg->length = data_len;
918 if (is_decode(ses)) {
919 /* Digest verification case */
921 /* hash result or digest, save digest first */
922 rte_memcpy(old_digest, sym->auth.digest.data,
924 /* let's check digest by hw */
925 start_addr = dpaa_mem_vtop(old_digest);
927 qm_sg_entry_set64(sg, start_addr);
928 sg->length = ses->digest_length;
929 in_sg->length += ses->digest_length;
938 static inline struct dpaa_sec_job *
939 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
941 struct rte_crypto_sym_op *sym = op->sym;
942 struct dpaa_sec_job *cf;
943 struct dpaa_sec_op_ctx *ctx;
944 struct qm_sg_entry *sg, *out_sg, *in_sg;
945 struct rte_mbuf *mbuf;
947 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
949 int data_len, data_offset;
951 data_len = sym->cipher.data.length;
952 data_offset = sym->cipher.data.offset;
954 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
955 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
956 if ((data_len & 7) || (data_offset & 7)) {
957 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
961 data_len = data_len >> 3;
962 data_offset = data_offset >> 3;
967 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
970 req_segs = mbuf->nb_segs * 2 + 3;
972 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
973 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
978 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
987 out_sg->extension = 1;
988 out_sg->length = data_len;
989 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
990 cpu_to_hw_sg(out_sg);
994 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
995 sg->length = mbuf->data_len - data_offset;
996 sg->offset = data_offset;
998 /* Successive segs */
1003 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004 sg->length = mbuf->data_len;
1013 in_sg->extension = 1;
1015 in_sg->length = data_len + ses->iv.length;
1018 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1019 cpu_to_hw_sg(in_sg);
1022 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1023 sg->length = ses->iv.length;
1028 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1029 sg->length = mbuf->data_len - data_offset;
1030 sg->offset = data_offset;
1032 /* Successive segs */
1037 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1038 sg->length = mbuf->data_len;
1047 static inline struct dpaa_sec_job *
1048 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1050 struct rte_crypto_sym_op *sym = op->sym;
1051 struct dpaa_sec_job *cf;
1052 struct dpaa_sec_op_ctx *ctx;
1053 struct qm_sg_entry *sg;
1054 rte_iova_t src_start_addr, dst_start_addr;
1055 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1057 int data_len, data_offset;
1059 data_len = sym->cipher.data.length;
1060 data_offset = sym->cipher.data.offset;
1062 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1063 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1064 if ((data_len & 7) || (data_offset & 7)) {
1065 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1069 data_len = data_len >> 3;
1070 data_offset = data_offset >> 3;
1073 ctx = dpaa_sec_alloc_ctx(ses, 4);
1080 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1083 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1085 dst_start_addr = src_start_addr;
1089 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1090 sg->length = data_len + ses->iv.length;
1096 /* need to extend the input to a compound frame */
1099 sg->length = data_len + ses->iv.length;
1100 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1104 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1105 sg->length = ses->iv.length;
1109 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1110 sg->length = data_len;
1117 static inline struct dpaa_sec_job *
1118 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1120 struct rte_crypto_sym_op *sym = op->sym;
1121 struct dpaa_sec_job *cf;
1122 struct dpaa_sec_op_ctx *ctx;
1123 struct qm_sg_entry *sg, *out_sg, *in_sg;
1124 struct rte_mbuf *mbuf;
1126 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1131 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1134 req_segs = mbuf->nb_segs * 2 + 4;
1137 if (ses->auth_only_len)
1140 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1141 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1146 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1153 rte_prefetch0(cf->sg);
1156 out_sg = &cf->sg[0];
1157 out_sg->extension = 1;
1159 out_sg->length = sym->aead.data.length + ses->digest_length;
1161 out_sg->length = sym->aead.data.length;
1163 /* output sg entries */
1165 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1166 cpu_to_hw_sg(out_sg);
1169 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1170 sg->length = mbuf->data_len - sym->aead.data.offset;
1171 sg->offset = sym->aead.data.offset;
1173 /* Successive segs */
1178 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1179 sg->length = mbuf->data_len;
1182 sg->length -= ses->digest_length;
1184 if (is_encode(ses)) {
1186 /* set auth output */
1188 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1189 sg->length = ses->digest_length;
1197 in_sg->extension = 1;
1200 in_sg->length = ses->iv.length + sym->aead.data.length
1201 + ses->auth_only_len;
1203 in_sg->length = ses->iv.length + sym->aead.data.length
1204 + ses->auth_only_len + ses->digest_length;
1206 /* input sg entries */
1208 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1209 cpu_to_hw_sg(in_sg);
1212 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1213 sg->length = ses->iv.length;
1216 /* 2nd seg auth only */
1217 if (ses->auth_only_len) {
1219 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1220 sg->length = ses->auth_only_len;
1226 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1227 sg->length = mbuf->data_len - sym->aead.data.offset;
1228 sg->offset = sym->aead.data.offset;
1230 /* Successive segs */
1235 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1236 sg->length = mbuf->data_len;
1240 if (is_decode(ses)) {
1243 memcpy(ctx->digest, sym->aead.digest.data,
1244 ses->digest_length);
1245 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1246 sg->length = ses->digest_length;
1254 static inline struct dpaa_sec_job *
1255 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1257 struct rte_crypto_sym_op *sym = op->sym;
1258 struct dpaa_sec_job *cf;
1259 struct dpaa_sec_op_ctx *ctx;
1260 struct qm_sg_entry *sg;
1261 uint32_t length = 0;
1262 rte_iova_t src_start_addr, dst_start_addr;
1263 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1266 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1269 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1271 dst_start_addr = src_start_addr;
1273 ctx = dpaa_sec_alloc_ctx(ses, 7);
1281 rte_prefetch0(cf->sg);
1283 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1284 if (is_encode(ses)) {
1285 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1286 sg->length = ses->iv.length;
1287 length += sg->length;
1291 if (ses->auth_only_len) {
1292 qm_sg_entry_set64(sg,
1293 dpaa_mem_vtop(sym->aead.aad.data));
1294 sg->length = ses->auth_only_len;
1295 length += sg->length;
1299 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1300 sg->length = sym->aead.data.length;
1301 length += sg->length;
1305 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306 sg->length = ses->iv.length;
1307 length += sg->length;
1311 if (ses->auth_only_len) {
1312 qm_sg_entry_set64(sg,
1313 dpaa_mem_vtop(sym->aead.aad.data));
1314 sg->length = ses->auth_only_len;
1315 length += sg->length;
1319 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320 sg->length = sym->aead.data.length;
1321 length += sg->length;
1324 memcpy(ctx->digest, sym->aead.digest.data,
1325 ses->digest_length);
1328 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1329 sg->length = ses->digest_length;
1330 length += sg->length;
1334 /* input compound frame */
1335 cf->sg[1].length = length;
1336 cf->sg[1].extension = 1;
1337 cf->sg[1].final = 1;
1338 cpu_to_hw_sg(&cf->sg[1]);
1342 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1343 qm_sg_entry_set64(sg,
1344 dst_start_addr + sym->aead.data.offset);
1345 sg->length = sym->aead.data.length;
1346 length = sg->length;
1347 if (is_encode(ses)) {
1349 /* set auth output */
1351 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1352 sg->length = ses->digest_length;
1353 length += sg->length;
1358 /* output compound frame */
1359 cf->sg[0].length = length;
1360 cf->sg[0].extension = 1;
1361 cpu_to_hw_sg(&cf->sg[0]);
1366 static inline struct dpaa_sec_job *
1367 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1369 struct rte_crypto_sym_op *sym = op->sym;
1370 struct dpaa_sec_job *cf;
1371 struct dpaa_sec_op_ctx *ctx;
1372 struct qm_sg_entry *sg, *out_sg, *in_sg;
1373 struct rte_mbuf *mbuf;
1375 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1380 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1383 req_segs = mbuf->nb_segs * 2 + 4;
1386 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1387 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1392 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1399 rte_prefetch0(cf->sg);
1402 out_sg = &cf->sg[0];
1403 out_sg->extension = 1;
1405 out_sg->length = sym->auth.data.length + ses->digest_length;
1407 out_sg->length = sym->auth.data.length;
1409 /* output sg entries */
1411 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1412 cpu_to_hw_sg(out_sg);
1415 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1416 sg->length = mbuf->data_len - sym->auth.data.offset;
1417 sg->offset = sym->auth.data.offset;
1419 /* Successive segs */
1424 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1425 sg->length = mbuf->data_len;
1428 sg->length -= ses->digest_length;
1430 if (is_encode(ses)) {
1432 /* set auth output */
1434 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1435 sg->length = ses->digest_length;
1443 in_sg->extension = 1;
1446 in_sg->length = ses->iv.length + sym->auth.data.length;
1448 in_sg->length = ses->iv.length + sym->auth.data.length
1449 + ses->digest_length;
1451 /* input sg entries */
1453 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1454 cpu_to_hw_sg(in_sg);
1457 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1458 sg->length = ses->iv.length;
1463 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1464 sg->length = mbuf->data_len - sym->auth.data.offset;
1465 sg->offset = sym->auth.data.offset;
1467 /* Successive segs */
1472 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1473 sg->length = mbuf->data_len;
1477 sg->length -= ses->digest_length;
1478 if (is_decode(ses)) {
1481 memcpy(ctx->digest, sym->auth.digest.data,
1482 ses->digest_length);
1483 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1484 sg->length = ses->digest_length;
1492 static inline struct dpaa_sec_job *
1493 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1495 struct rte_crypto_sym_op *sym = op->sym;
1496 struct dpaa_sec_job *cf;
1497 struct dpaa_sec_op_ctx *ctx;
1498 struct qm_sg_entry *sg;
1499 rte_iova_t src_start_addr, dst_start_addr;
1500 uint32_t length = 0;
1501 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1504 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1506 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1508 dst_start_addr = src_start_addr;
1510 ctx = dpaa_sec_alloc_ctx(ses, 7);
1518 rte_prefetch0(cf->sg);
1520 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1521 if (is_encode(ses)) {
1522 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1523 sg->length = ses->iv.length;
1524 length += sg->length;
1528 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1529 sg->length = sym->auth.data.length;
1530 length += sg->length;
1534 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1535 sg->length = ses->iv.length;
1536 length += sg->length;
1541 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1542 sg->length = sym->auth.data.length;
1543 length += sg->length;
1546 memcpy(ctx->digest, sym->auth.digest.data,
1547 ses->digest_length);
1550 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1551 sg->length = ses->digest_length;
1552 length += sg->length;
1556 /* input compound frame */
1557 cf->sg[1].length = length;
1558 cf->sg[1].extension = 1;
1559 cf->sg[1].final = 1;
1560 cpu_to_hw_sg(&cf->sg[1]);
1564 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1565 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1566 sg->length = sym->cipher.data.length;
1567 length = sg->length;
1568 if (is_encode(ses)) {
1570 /* set auth output */
1572 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1573 sg->length = ses->digest_length;
1574 length += sg->length;
1579 /* output compound frame */
1580 cf->sg[0].length = length;
1581 cf->sg[0].extension = 1;
1582 cpu_to_hw_sg(&cf->sg[0]);
1587 #ifdef RTE_LIBRTE_SECURITY
1588 static inline struct dpaa_sec_job *
1589 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1591 struct rte_crypto_sym_op *sym = op->sym;
1592 struct dpaa_sec_job *cf;
1593 struct dpaa_sec_op_ctx *ctx;
1594 struct qm_sg_entry *sg;
1595 phys_addr_t src_start_addr, dst_start_addr;
1597 ctx = dpaa_sec_alloc_ctx(ses, 2);
1603 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1606 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1608 dst_start_addr = src_start_addr;
1612 qm_sg_entry_set64(sg, src_start_addr);
1613 sg->length = sym->m_src->pkt_len;
1617 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1620 qm_sg_entry_set64(sg, dst_start_addr);
1621 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1627 static inline struct dpaa_sec_job *
1628 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1630 struct rte_crypto_sym_op *sym = op->sym;
1631 struct dpaa_sec_job *cf;
1632 struct dpaa_sec_op_ctx *ctx;
1633 struct qm_sg_entry *sg, *out_sg, *in_sg;
1634 struct rte_mbuf *mbuf;
1636 uint32_t in_len = 0, out_len = 0;
1643 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1644 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1645 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1650 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1656 out_sg = &cf->sg[0];
1657 out_sg->extension = 1;
1658 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1662 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1665 /* Successive segs */
1666 while (mbuf->next) {
1667 sg->length = mbuf->data_len;
1668 out_len += sg->length;
1672 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1675 sg->length = mbuf->buf_len - mbuf->data_off;
1676 out_len += sg->length;
1680 out_sg->length = out_len;
1681 cpu_to_hw_sg(out_sg);
1686 in_sg->extension = 1;
1688 in_len = mbuf->data_len;
1691 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1694 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1695 sg->length = mbuf->data_len;
1698 /* Successive segs */
1703 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1704 sg->length = mbuf->data_len;
1706 in_len += sg->length;
1712 in_sg->length = in_len;
1713 cpu_to_hw_sg(in_sg);
1715 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1722 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1725 /* Function to transmit the frames to given device and queuepair */
1727 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1728 uint16_t num_tx = 0;
1729 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1730 uint32_t frames_to_send;
1731 struct rte_crypto_op *op;
1732 struct dpaa_sec_job *cf;
1733 dpaa_sec_session *ses;
1734 uint16_t auth_hdr_len, auth_tail_len;
1735 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1736 struct qman_fq *inq[DPAA_SEC_BURST];
1739 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1740 DPAA_SEC_BURST : nb_ops;
1741 for (loop = 0; loop < frames_to_send; loop++) {
1743 if (op->sym->m_src->seqn != 0) {
1744 index = op->sym->m_src->seqn - 1;
1745 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1746 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1747 flags[loop] = ((index & 0x0f) << 8);
1748 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1749 DPAA_PER_LCORE_DQRR_SIZE--;
1750 DPAA_PER_LCORE_DQRR_HELD &=
1755 switch (op->sess_type) {
1756 case RTE_CRYPTO_OP_WITH_SESSION:
1757 ses = (dpaa_sec_session *)
1758 get_sym_session_private_data(
1760 cryptodev_driver_id);
1762 #ifdef RTE_LIBRTE_SECURITY
1763 case RTE_CRYPTO_OP_SECURITY_SESSION:
1764 ses = (dpaa_sec_session *)
1765 get_sec_session_private_data(
1766 op->sym->sec_session);
1771 "sessionless crypto op not supported");
1772 frames_to_send = loop;
1778 DPAA_SEC_DP_ERR("session not available");
1779 frames_to_send = loop;
1784 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1785 if (dpaa_sec_attach_sess_q(qp, ses)) {
1786 frames_to_send = loop;
1790 } else if (unlikely(ses->qp[rte_lcore_id() %
1791 MAX_DPAA_CORES] != qp)) {
1792 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1794 ses->qp[rte_lcore_id() %
1795 MAX_DPAA_CORES], qp);
1796 frames_to_send = loop;
1801 auth_hdr_len = op->sym->auth.data.length -
1802 op->sym->cipher.data.length;
1805 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1806 ((op->sym->m_dst == NULL) ||
1807 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1808 switch (ses->ctxt) {
1809 #ifdef RTE_LIBRTE_SECURITY
1811 case DPAA_SEC_IPSEC:
1812 cf = build_proto(op, ses);
1816 cf = build_auth_only(op, ses);
1818 case DPAA_SEC_CIPHER:
1819 cf = build_cipher_only(op, ses);
1822 cf = build_cipher_auth_gcm(op, ses);
1823 auth_hdr_len = ses->auth_only_len;
1825 case DPAA_SEC_CIPHER_HASH:
1827 op->sym->cipher.data.offset
1828 - op->sym->auth.data.offset;
1830 op->sym->auth.data.length
1831 - op->sym->cipher.data.length
1833 cf = build_cipher_auth(op, ses);
1836 DPAA_SEC_DP_ERR("not supported ops");
1837 frames_to_send = loop;
1842 switch (ses->ctxt) {
1843 #ifdef RTE_LIBRTE_SECURITY
1845 case DPAA_SEC_IPSEC:
1846 cf = build_proto_sg(op, ses);
1850 cf = build_auth_only_sg(op, ses);
1852 case DPAA_SEC_CIPHER:
1853 cf = build_cipher_only_sg(op, ses);
1856 cf = build_cipher_auth_gcm_sg(op, ses);
1857 auth_hdr_len = ses->auth_only_len;
1859 case DPAA_SEC_CIPHER_HASH:
1861 op->sym->cipher.data.offset
1862 - op->sym->auth.data.offset;
1864 op->sym->auth.data.length
1865 - op->sym->cipher.data.length
1867 cf = build_cipher_auth_sg(op, ses);
1870 DPAA_SEC_DP_ERR("not supported ops");
1871 frames_to_send = loop;
1876 if (unlikely(!cf)) {
1877 frames_to_send = loop;
1883 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1884 fd->opaque_addr = 0;
1886 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1887 fd->_format1 = qm_fd_compound;
1888 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1890 /* Auth_only_len is set as 0 in descriptor and it is
1891 * overwritten here in the fd.cmd which will update
1894 if (auth_hdr_len || auth_tail_len) {
1895 fd->cmd = 0x80000000;
1897 ((auth_tail_len << 16) | auth_hdr_len);
1900 #ifdef RTE_LIBRTE_SECURITY
1901 /* In case of PDCP, per packet HFN is stored in
1902 * mbuf priv after sym_op.
1904 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1905 fd->cmd = 0x80000000 |
1906 *((uint32_t *)((uint8_t *)op +
1907 ses->pdcp.hfn_ovd_offset));
1908 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1909 *((uint32_t *)((uint8_t *)op +
1910 ses->pdcp.hfn_ovd_offset)),
1917 while (loop < frames_to_send) {
1918 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1919 &flags[loop], frames_to_send - loop);
1921 nb_ops -= frames_to_send;
1922 num_tx += frames_to_send;
1925 dpaa_qp->tx_pkts += num_tx;
1926 dpaa_qp->tx_errs += nb_ops - num_tx;
1932 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1936 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1938 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1940 dpaa_qp->rx_pkts += num_rx;
1941 dpaa_qp->rx_errs += nb_ops - num_rx;
1943 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1948 /** Release queue pair */
1950 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1953 struct dpaa_sec_dev_private *internals;
1954 struct dpaa_sec_qp *qp = NULL;
1956 PMD_INIT_FUNC_TRACE();
1958 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1960 internals = dev->data->dev_private;
1961 if (qp_id >= internals->max_nb_queue_pairs) {
1962 DPAA_SEC_ERR("Max supported qpid %d",
1963 internals->max_nb_queue_pairs);
1967 qp = &internals->qps[qp_id];
1968 rte_mempool_free(qp->ctx_pool);
1969 qp->internals = NULL;
1970 dev->data->queue_pairs[qp_id] = NULL;
1975 /** Setup a queue pair */
1977 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1978 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1979 __rte_unused int socket_id)
1981 struct dpaa_sec_dev_private *internals;
1982 struct dpaa_sec_qp *qp = NULL;
1985 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1987 internals = dev->data->dev_private;
1988 if (qp_id >= internals->max_nb_queue_pairs) {
1989 DPAA_SEC_ERR("Max supported qpid %d",
1990 internals->max_nb_queue_pairs);
1994 qp = &internals->qps[qp_id];
1995 qp->internals = internals;
1996 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1997 dev->data->dev_id, qp_id);
1998 if (!qp->ctx_pool) {
1999 qp->ctx_pool = rte_mempool_create((const char *)str,
2002 CTX_POOL_CACHE_SIZE, 0,
2003 NULL, NULL, NULL, NULL,
2005 if (!qp->ctx_pool) {
2006 DPAA_SEC_ERR("%s create failed\n", str);
2010 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2011 dev->data->dev_id, qp_id);
2012 dev->data->queue_pairs[qp_id] = qp;
2017 /** Return the number of allocated queue pairs */
2019 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2021 PMD_INIT_FUNC_TRACE();
2023 return dev->data->nb_queue_pairs;
2026 /** Returns the size of session structure */
2028 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2030 PMD_INIT_FUNC_TRACE();
2032 return sizeof(dpaa_sec_session);
2036 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2037 struct rte_crypto_sym_xform *xform,
2038 dpaa_sec_session *session)
2040 session->ctxt = DPAA_SEC_CIPHER;
2041 session->cipher_alg = xform->cipher.algo;
2042 session->iv.length = xform->cipher.iv.length;
2043 session->iv.offset = xform->cipher.iv.offset;
2044 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2045 RTE_CACHE_LINE_SIZE);
2046 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2047 DPAA_SEC_ERR("No Memory for cipher key");
2050 session->cipher_key.length = xform->cipher.key.length;
2052 memcpy(session->cipher_key.data, xform->cipher.key.data,
2053 xform->cipher.key.length);
2054 switch (xform->cipher.algo) {
2055 case RTE_CRYPTO_CIPHER_AES_CBC:
2056 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2057 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2059 case RTE_CRYPTO_CIPHER_3DES_CBC:
2060 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2061 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2063 case RTE_CRYPTO_CIPHER_AES_CTR:
2064 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2065 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2067 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2068 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2070 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2071 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2074 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2075 xform->cipher.algo);
2076 rte_free(session->cipher_key.data);
2079 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2086 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2087 struct rte_crypto_sym_xform *xform,
2088 dpaa_sec_session *session)
2090 session->ctxt = DPAA_SEC_AUTH;
2091 session->auth_alg = xform->auth.algo;
2092 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2093 RTE_CACHE_LINE_SIZE);
2094 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2095 DPAA_SEC_ERR("No Memory for auth key");
2098 session->auth_key.length = xform->auth.key.length;
2099 session->digest_length = xform->auth.digest_length;
2100 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2101 session->iv.offset = xform->auth.iv.offset;
2102 session->iv.length = xform->auth.iv.length;
2105 memcpy(session->auth_key.data, xform->auth.key.data,
2106 xform->auth.key.length);
2108 switch (xform->auth.algo) {
2109 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2110 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2111 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2113 case RTE_CRYPTO_AUTH_MD5_HMAC:
2114 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2115 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2117 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2118 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2119 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2121 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2122 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2123 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2125 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2126 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2127 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2129 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2130 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2131 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2133 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2134 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2135 session->auth_key.algmode = OP_ALG_AAI_F9;
2137 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2138 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2139 session->auth_key.algmode = OP_ALG_AAI_F9;
2142 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2144 rte_free(session->auth_key.data);
2148 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2155 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2156 struct rte_crypto_sym_xform *xform,
2157 dpaa_sec_session *session)
2160 struct rte_crypto_cipher_xform *cipher_xform;
2161 struct rte_crypto_auth_xform *auth_xform;
2163 session->ctxt = DPAA_SEC_CIPHER_HASH;
2164 if (session->auth_cipher_text) {
2165 cipher_xform = &xform->cipher;
2166 auth_xform = &xform->next->auth;
2168 cipher_xform = &xform->next->cipher;
2169 auth_xform = &xform->auth;
2172 /* Set IV parameters */
2173 session->iv.offset = cipher_xform->iv.offset;
2174 session->iv.length = cipher_xform->iv.length;
2176 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2177 RTE_CACHE_LINE_SIZE);
2178 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2179 DPAA_SEC_ERR("No Memory for cipher key");
2182 session->cipher_key.length = cipher_xform->key.length;
2183 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2184 RTE_CACHE_LINE_SIZE);
2185 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2186 DPAA_SEC_ERR("No Memory for auth key");
2187 rte_free(session->cipher_key.data);
2190 session->auth_key.length = auth_xform->key.length;
2191 memcpy(session->cipher_key.data, cipher_xform->key.data,
2192 cipher_xform->key.length);
2193 memcpy(session->auth_key.data, auth_xform->key.data,
2194 auth_xform->key.length);
2196 session->digest_length = auth_xform->digest_length;
2197 session->auth_alg = auth_xform->algo;
2199 switch (auth_xform->algo) {
2200 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2201 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2202 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2204 case RTE_CRYPTO_AUTH_MD5_HMAC:
2205 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2206 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2208 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2209 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2210 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2212 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2213 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2214 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2216 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2217 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2218 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2220 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2221 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2222 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2225 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2230 session->cipher_alg = cipher_xform->algo;
2232 switch (cipher_xform->algo) {
2233 case RTE_CRYPTO_CIPHER_AES_CBC:
2234 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2235 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2237 case RTE_CRYPTO_CIPHER_3DES_CBC:
2238 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2239 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2241 case RTE_CRYPTO_CIPHER_AES_CTR:
2242 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2243 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2246 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2247 cipher_xform->algo);
2250 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2255 rte_free(session->cipher_key.data);
2256 rte_free(session->auth_key.data);
2261 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2262 struct rte_crypto_sym_xform *xform,
2263 dpaa_sec_session *session)
2265 session->aead_alg = xform->aead.algo;
2266 session->ctxt = DPAA_SEC_AEAD;
2267 session->iv.length = xform->aead.iv.length;
2268 session->iv.offset = xform->aead.iv.offset;
2269 session->auth_only_len = xform->aead.aad_length;
2270 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2271 RTE_CACHE_LINE_SIZE);
2272 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2273 DPAA_SEC_ERR("No Memory for aead key\n");
2276 session->aead_key.length = xform->aead.key.length;
2277 session->digest_length = xform->aead.digest_length;
2279 memcpy(session->aead_key.data, xform->aead.key.data,
2280 xform->aead.key.length);
2282 switch (session->aead_alg) {
2283 case RTE_CRYPTO_AEAD_AES_GCM:
2284 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2285 session->aead_key.algmode = OP_ALG_AAI_GCM;
2288 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2289 rte_free(session->aead_key.data);
2293 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2299 static struct qman_fq *
2300 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2304 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2305 if (qi->inq_attach[i] == 0) {
2306 qi->inq_attach[i] = 1;
2310 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2316 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2320 for (i = 0; i < qi->max_nb_sessions; i++) {
2321 if (&qi->inq[i] == fq) {
2322 qman_retire_fq(fq, NULL);
2324 qi->inq_attach[i] = 0;
2332 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2336 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2337 ret = dpaa_sec_prep_cdb(sess);
2339 DPAA_SEC_ERR("Unable to prepare sec cdb");
2342 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2343 ret = rte_dpaa_portal_init((void *)0);
2345 DPAA_SEC_ERR("Failure in affining portal");
2349 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2350 dpaa_mem_vtop(&sess->cdb),
2351 qman_fq_fqid(&qp->outq));
2353 DPAA_SEC_ERR("Unable to init sec queue");
2359 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2360 struct rte_crypto_sym_xform *xform, void *sess)
2362 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2363 dpaa_sec_session *session = sess;
2367 PMD_INIT_FUNC_TRACE();
2369 if (unlikely(sess == NULL)) {
2370 DPAA_SEC_ERR("invalid session struct");
2373 memset(session, 0, sizeof(dpaa_sec_session));
2375 /* Default IV length = 0 */
2376 session->iv.length = 0;
2379 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2380 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2381 ret = dpaa_sec_cipher_init(dev, xform, session);
2383 /* Authentication Only */
2384 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2385 xform->next == NULL) {
2386 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2387 session->ctxt = DPAA_SEC_AUTH;
2388 ret = dpaa_sec_auth_init(dev, xform, session);
2390 /* Cipher then Authenticate */
2391 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2392 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2393 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2394 session->auth_cipher_text = 1;
2395 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2396 ret = dpaa_sec_auth_init(dev, xform, session);
2397 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2398 ret = dpaa_sec_cipher_init(dev, xform, session);
2400 ret = dpaa_sec_chain_init(dev, xform, session);
2402 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2405 /* Authenticate then Cipher */
2406 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2407 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2408 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2409 session->auth_cipher_text = 0;
2410 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2411 ret = dpaa_sec_cipher_init(dev, xform, session);
2412 else if (xform->next->cipher.algo
2413 == RTE_CRYPTO_CIPHER_NULL)
2414 ret = dpaa_sec_auth_init(dev, xform, session);
2416 ret = dpaa_sec_chain_init(dev, xform, session);
2418 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2422 /* AEAD operation for AES-GCM kind of Algorithms */
2423 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2424 xform->next == NULL) {
2425 ret = dpaa_sec_aead_init(dev, xform, session);
2428 DPAA_SEC_ERR("Invalid crypto type");
2432 DPAA_SEC_ERR("unable to init session");
2436 rte_spinlock_lock(&internals->lock);
2437 for (i = 0; i < MAX_DPAA_CORES; i++) {
2438 session->inq[i] = dpaa_sec_attach_rxq(internals);
2439 if (session->inq[i] == NULL) {
2440 DPAA_SEC_ERR("unable to attach sec queue");
2441 rte_spinlock_unlock(&internals->lock);
2445 rte_spinlock_unlock(&internals->lock);
2450 rte_free(session->cipher_key.data);
2451 rte_free(session->auth_key.data);
2452 memset(session, 0, sizeof(dpaa_sec_session));
2458 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2459 struct rte_crypto_sym_xform *xform,
2460 struct rte_cryptodev_sym_session *sess,
2461 struct rte_mempool *mempool)
2463 void *sess_private_data;
2466 PMD_INIT_FUNC_TRACE();
2468 if (rte_mempool_get(mempool, &sess_private_data)) {
2469 DPAA_SEC_ERR("Couldn't get object from session mempool");
2473 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2475 DPAA_SEC_ERR("failed to configure session parameters");
2477 /* Return session to mempool */
2478 rte_mempool_put(mempool, sess_private_data);
2482 set_sym_session_private_data(sess, dev->driver_id,
2490 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2492 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2493 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2496 for (i = 0; i < MAX_DPAA_CORES; i++) {
2498 dpaa_sec_detach_rxq(qi, s->inq[i]);
2502 rte_free(s->cipher_key.data);
2503 rte_free(s->auth_key.data);
2504 memset(s, 0, sizeof(dpaa_sec_session));
2505 rte_mempool_put(sess_mp, (void *)s);
2508 /** Clear the memory of session so it doesn't leave key material behind */
2510 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2511 struct rte_cryptodev_sym_session *sess)
2513 PMD_INIT_FUNC_TRACE();
2514 uint8_t index = dev->driver_id;
2515 void *sess_priv = get_sym_session_private_data(sess, index);
2516 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2519 free_session_memory(dev, s);
2520 set_sym_session_private_data(sess, index, NULL);
2524 #ifdef RTE_LIBRTE_SECURITY
2526 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2527 struct rte_security_session_conf *conf,
2530 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2531 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2532 struct rte_crypto_auth_xform *auth_xform = NULL;
2533 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2534 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2537 PMD_INIT_FUNC_TRACE();
2539 memset(session, 0, sizeof(dpaa_sec_session));
2540 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2541 cipher_xform = &conf->crypto_xform->cipher;
2542 if (conf->crypto_xform->next)
2543 auth_xform = &conf->crypto_xform->next->auth;
2545 auth_xform = &conf->crypto_xform->auth;
2546 if (conf->crypto_xform->next)
2547 cipher_xform = &conf->crypto_xform->next->cipher;
2549 session->proto_alg = conf->protocol;
2550 session->ctxt = DPAA_SEC_IPSEC;
2552 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2553 session->cipher_key.data = rte_zmalloc(NULL,
2554 cipher_xform->key.length,
2555 RTE_CACHE_LINE_SIZE);
2556 if (session->cipher_key.data == NULL &&
2557 cipher_xform->key.length > 0) {
2558 DPAA_SEC_ERR("No Memory for cipher key");
2561 memcpy(session->cipher_key.data, cipher_xform->key.data,
2562 cipher_xform->key.length);
2563 session->cipher_key.length = cipher_xform->key.length;
2565 switch (cipher_xform->algo) {
2566 case RTE_CRYPTO_CIPHER_NULL:
2567 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2569 case RTE_CRYPTO_CIPHER_AES_CBC:
2570 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2571 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2573 case RTE_CRYPTO_CIPHER_3DES_CBC:
2574 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2575 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2577 case RTE_CRYPTO_CIPHER_AES_CTR:
2578 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2579 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2582 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2583 cipher_xform->algo);
2586 session->cipher_alg = cipher_xform->algo;
2588 session->cipher_key.data = NULL;
2589 session->cipher_key.length = 0;
2590 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2593 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2594 session->auth_key.data = rte_zmalloc(NULL,
2595 auth_xform->key.length,
2596 RTE_CACHE_LINE_SIZE);
2597 if (session->auth_key.data == NULL &&
2598 auth_xform->key.length > 0) {
2599 DPAA_SEC_ERR("No Memory for auth key");
2600 rte_free(session->cipher_key.data);
2603 memcpy(session->auth_key.data, auth_xform->key.data,
2604 auth_xform->key.length);
2605 session->auth_key.length = auth_xform->key.length;
2607 switch (auth_xform->algo) {
2608 case RTE_CRYPTO_AUTH_NULL:
2609 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2610 session->digest_length = 0;
2612 case RTE_CRYPTO_AUTH_MD5_HMAC:
2613 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2614 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2616 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2617 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2618 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2620 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2621 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160;
2622 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2624 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2625 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2626 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2628 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2629 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2630 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2632 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2633 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2634 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2637 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2641 session->auth_alg = auth_xform->algo;
2643 session->auth_key.data = NULL;
2644 session->auth_key.length = 0;
2645 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2648 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2649 if (ipsec_xform->tunnel.type ==
2650 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2651 memset(&session->encap_pdb, 0,
2652 sizeof(struct ipsec_encap_pdb) +
2653 sizeof(session->ip4_hdr));
2654 session->ip4_hdr.ip_v = IPVERSION;
2655 session->ip4_hdr.ip_hl = 5;
2656 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2657 sizeof(session->ip4_hdr));
2658 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2659 session->ip4_hdr.ip_id = 0;
2660 session->ip4_hdr.ip_off = 0;
2661 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2662 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2663 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2664 IPPROTO_ESP : IPPROTO_AH;
2665 session->ip4_hdr.ip_sum = 0;
2666 session->ip4_hdr.ip_src =
2667 ipsec_xform->tunnel.ipv4.src_ip;
2668 session->ip4_hdr.ip_dst =
2669 ipsec_xform->tunnel.ipv4.dst_ip;
2670 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2671 (void *)&session->ip4_hdr,
2673 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2674 } else if (ipsec_xform->tunnel.type ==
2675 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2676 memset(&session->encap_pdb, 0,
2677 sizeof(struct ipsec_encap_pdb) +
2678 sizeof(session->ip6_hdr));
2679 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2680 DPAA_IPv6_DEFAULT_VTC_FLOW |
2681 ((ipsec_xform->tunnel.ipv6.dscp <<
2682 RTE_IPV6_HDR_TC_SHIFT) &
2683 RTE_IPV6_HDR_TC_MASK) |
2684 ((ipsec_xform->tunnel.ipv6.flabel <<
2685 RTE_IPV6_HDR_FL_SHIFT) &
2686 RTE_IPV6_HDR_FL_MASK));
2687 /* Payload length will be updated by HW */
2688 session->ip6_hdr.payload_len = 0;
2689 session->ip6_hdr.hop_limits =
2690 ipsec_xform->tunnel.ipv6.hlimit;
2691 session->ip6_hdr.proto = (ipsec_xform->proto ==
2692 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2693 IPPROTO_ESP : IPPROTO_AH;
2694 memcpy(&session->ip6_hdr.src_addr,
2695 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2696 memcpy(&session->ip6_hdr.dst_addr,
2697 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2698 session->encap_pdb.ip_hdr_len =
2699 sizeof(struct rte_ipv6_hdr);
2701 session->encap_pdb.options =
2702 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2703 PDBOPTS_ESP_OIHI_PDB_INL |
2705 PDBHMO_ESP_ENCAP_DTTL |
2707 if (ipsec_xform->options.esn)
2708 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2709 session->encap_pdb.spi = ipsec_xform->spi;
2710 session->dir = DIR_ENC;
2711 } else if (ipsec_xform->direction ==
2712 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2713 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2714 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2715 session->decap_pdb.options = sizeof(struct ip) << 16;
2717 session->decap_pdb.options =
2718 sizeof(struct rte_ipv6_hdr) << 16;
2719 if (ipsec_xform->options.esn)
2720 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2721 session->dir = DIR_DEC;
2724 rte_spinlock_lock(&internals->lock);
2725 for (i = 0; i < MAX_DPAA_CORES; i++) {
2726 session->inq[i] = dpaa_sec_attach_rxq(internals);
2727 if (session->inq[i] == NULL) {
2728 DPAA_SEC_ERR("unable to attach sec queue");
2729 rte_spinlock_unlock(&internals->lock);
2733 rte_spinlock_unlock(&internals->lock);
2737 rte_free(session->auth_key.data);
2738 rte_free(session->cipher_key.data);
2739 memset(session, 0, sizeof(dpaa_sec_session));
2744 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2745 struct rte_security_session_conf *conf,
2748 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2749 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2750 struct rte_crypto_auth_xform *auth_xform = NULL;
2751 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2752 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2753 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2756 PMD_INIT_FUNC_TRACE();
2758 memset(session, 0, sizeof(dpaa_sec_session));
2760 /* find xfrm types */
2761 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2762 cipher_xform = &xform->cipher;
2763 if (xform->next != NULL)
2764 auth_xform = &xform->next->auth;
2765 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2766 auth_xform = &xform->auth;
2767 if (xform->next != NULL)
2768 cipher_xform = &xform->next->cipher;
2770 DPAA_SEC_ERR("Invalid crypto type");
2774 session->proto_alg = conf->protocol;
2775 session->ctxt = DPAA_SEC_PDCP;
2778 switch (cipher_xform->algo) {
2779 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2780 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2782 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2783 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2785 case RTE_CRYPTO_CIPHER_AES_CTR:
2786 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2788 case RTE_CRYPTO_CIPHER_NULL:
2789 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2792 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2793 session->cipher_alg);
2797 session->cipher_key.data = rte_zmalloc(NULL,
2798 cipher_xform->key.length,
2799 RTE_CACHE_LINE_SIZE);
2800 if (session->cipher_key.data == NULL &&
2801 cipher_xform->key.length > 0) {
2802 DPAA_SEC_ERR("No Memory for cipher key");
2805 session->cipher_key.length = cipher_xform->key.length;
2806 memcpy(session->cipher_key.data, cipher_xform->key.data,
2807 cipher_xform->key.length);
2808 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2810 session->cipher_alg = cipher_xform->algo;
2812 session->cipher_key.data = NULL;
2813 session->cipher_key.length = 0;
2814 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2815 session->dir = DIR_ENC;
2818 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2819 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2820 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2822 "PDCP Seq Num size should be 5/12 bits for cmode");
2828 switch (auth_xform->algo) {
2829 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2830 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2832 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2833 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2835 case RTE_CRYPTO_AUTH_AES_CMAC:
2836 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2838 case RTE_CRYPTO_AUTH_NULL:
2839 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2842 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2844 rte_free(session->cipher_key.data);
2847 session->auth_key.data = rte_zmalloc(NULL,
2848 auth_xform->key.length,
2849 RTE_CACHE_LINE_SIZE);
2850 if (!session->auth_key.data &&
2851 auth_xform->key.length > 0) {
2852 DPAA_SEC_ERR("No Memory for auth key");
2853 rte_free(session->cipher_key.data);
2856 session->auth_key.length = auth_xform->key.length;
2857 memcpy(session->auth_key.data, auth_xform->key.data,
2858 auth_xform->key.length);
2859 session->auth_alg = auth_xform->algo;
2861 session->auth_key.data = NULL;
2862 session->auth_key.length = 0;
2863 session->auth_alg = 0;
2865 session->pdcp.domain = pdcp_xform->domain;
2866 session->pdcp.bearer = pdcp_xform->bearer;
2867 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2868 session->pdcp.sn_size = pdcp_xform->sn_size;
2869 session->pdcp.hfn = pdcp_xform->hfn;
2870 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2871 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2872 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2874 rte_spinlock_lock(&dev_priv->lock);
2875 for (i = 0; i < MAX_DPAA_CORES; i++) {
2876 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2877 if (session->inq[i] == NULL) {
2878 DPAA_SEC_ERR("unable to attach sec queue");
2879 rte_spinlock_unlock(&dev_priv->lock);
2883 rte_spinlock_unlock(&dev_priv->lock);
2886 rte_free(session->auth_key.data);
2887 rte_free(session->cipher_key.data);
2888 memset(session, 0, sizeof(dpaa_sec_session));
2893 dpaa_sec_security_session_create(void *dev,
2894 struct rte_security_session_conf *conf,
2895 struct rte_security_session *sess,
2896 struct rte_mempool *mempool)
2898 void *sess_private_data;
2899 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2902 if (rte_mempool_get(mempool, &sess_private_data)) {
2903 DPAA_SEC_ERR("Couldn't get object from session mempool");
2907 switch (conf->protocol) {
2908 case RTE_SECURITY_PROTOCOL_IPSEC:
2909 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2912 case RTE_SECURITY_PROTOCOL_PDCP:
2913 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2916 case RTE_SECURITY_PROTOCOL_MACSEC:
2922 DPAA_SEC_ERR("failed to configure session parameters");
2923 /* Return session to mempool */
2924 rte_mempool_put(mempool, sess_private_data);
2928 set_sec_session_private_data(sess, sess_private_data);
2933 /** Clear the memory of session so it doesn't leave key material behind */
2935 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2936 struct rte_security_session *sess)
2938 PMD_INIT_FUNC_TRACE();
2939 void *sess_priv = get_sec_session_private_data(sess);
2940 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2943 free_session_memory((struct rte_cryptodev *)dev, s);
2944 set_sec_session_private_data(sess, NULL);
2950 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2951 struct rte_cryptodev_config *config __rte_unused)
2953 PMD_INIT_FUNC_TRACE();
2959 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2961 PMD_INIT_FUNC_TRACE();
2966 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2968 PMD_INIT_FUNC_TRACE();
2972 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2974 PMD_INIT_FUNC_TRACE();
2983 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2984 struct rte_cryptodev_info *info)
2986 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2988 PMD_INIT_FUNC_TRACE();
2990 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2991 info->feature_flags = dev->feature_flags;
2992 info->capabilities = dpaa_sec_capabilities;
2993 info->sym.max_nb_sessions = internals->max_nb_sessions;
2994 info->driver_id = cryptodev_driver_id;
2998 static enum qman_cb_dqrr_result
2999 dpaa_sec_process_parallel_event(void *event,
3000 struct qman_portal *qm __always_unused,
3001 struct qman_fq *outq,
3002 const struct qm_dqrr_entry *dqrr,
3005 const struct qm_fd *fd;
3006 struct dpaa_sec_job *job;
3007 struct dpaa_sec_op_ctx *ctx;
3008 struct rte_event *ev = (struct rte_event *)event;
3012 /* sg is embedded in an op ctx,
3013 * sg[0] is for output
3016 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3018 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3019 ctx->fd_status = fd->status;
3020 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3021 struct qm_sg_entry *sg_out;
3024 sg_out = &job->sg[0];
3025 hw_sg_to_cpu(sg_out);
3026 len = sg_out->length;
3027 ctx->op->sym->m_src->pkt_len = len;
3028 ctx->op->sym->m_src->data_len = len;
3030 if (!ctx->fd_status) {
3031 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3033 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3034 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3036 ev->event_ptr = (void *)ctx->op;
3038 ev->flow_id = outq->ev.flow_id;
3039 ev->sub_event_type = outq->ev.sub_event_type;
3040 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3041 ev->op = RTE_EVENT_OP_NEW;
3042 ev->sched_type = outq->ev.sched_type;
3043 ev->queue_id = outq->ev.queue_id;
3044 ev->priority = outq->ev.priority;
3045 *bufs = (void *)ctx->op;
3047 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3049 return qman_cb_dqrr_consume;
3052 static enum qman_cb_dqrr_result
3053 dpaa_sec_process_atomic_event(void *event,
3054 struct qman_portal *qm __rte_unused,
3055 struct qman_fq *outq,
3056 const struct qm_dqrr_entry *dqrr,
3060 const struct qm_fd *fd;
3061 struct dpaa_sec_job *job;
3062 struct dpaa_sec_op_ctx *ctx;
3063 struct rte_event *ev = (struct rte_event *)event;
3067 /* sg is embedded in an op ctx,
3068 * sg[0] is for output
3071 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3073 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3074 ctx->fd_status = fd->status;
3075 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3076 struct qm_sg_entry *sg_out;
3079 sg_out = &job->sg[0];
3080 hw_sg_to_cpu(sg_out);
3081 len = sg_out->length;
3082 ctx->op->sym->m_src->pkt_len = len;
3083 ctx->op->sym->m_src->data_len = len;
3085 if (!ctx->fd_status) {
3086 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3088 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3089 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3091 ev->event_ptr = (void *)ctx->op;
3092 ev->flow_id = outq->ev.flow_id;
3093 ev->sub_event_type = outq->ev.sub_event_type;
3094 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3095 ev->op = RTE_EVENT_OP_NEW;
3096 ev->sched_type = outq->ev.sched_type;
3097 ev->queue_id = outq->ev.queue_id;
3098 ev->priority = outq->ev.priority;
3100 /* Save active dqrr entries */
3101 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3102 DPAA_PER_LCORE_DQRR_SIZE++;
3103 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3104 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3105 ev->impl_opaque = index + 1;
3106 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3107 *bufs = (void *)ctx->op;
3109 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3111 return qman_cb_dqrr_defer;
3115 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3118 const struct rte_event *event)
3120 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3121 struct qm_mcc_initfq opts = {0};
3125 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3126 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3127 opts.fqd.dest.channel = ch_id;
3129 switch (event->sched_type) {
3130 case RTE_SCHED_TYPE_ATOMIC:
3131 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3132 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3133 * configuration with HOLD_ACTIVE setting
3135 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3136 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3138 case RTE_SCHED_TYPE_ORDERED:
3139 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3142 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3143 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3147 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3148 if (unlikely(ret)) {
3149 DPAA_SEC_ERR("unable to init caam source fq!");
3153 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3159 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3162 struct qm_mcc_initfq opts = {0};
3164 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3166 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3167 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3168 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3169 qp->outq.cb.ern = ern_sec_fq_handler;
3170 qman_retire_fq(&qp->outq, NULL);
3171 qman_oos_fq(&qp->outq);
3172 ret = qman_init_fq(&qp->outq, 0, &opts);
3174 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3175 qp->outq.cb.dqrr = NULL;
3180 static struct rte_cryptodev_ops crypto_ops = {
3181 .dev_configure = dpaa_sec_dev_configure,
3182 .dev_start = dpaa_sec_dev_start,
3183 .dev_stop = dpaa_sec_dev_stop,
3184 .dev_close = dpaa_sec_dev_close,
3185 .dev_infos_get = dpaa_sec_dev_infos_get,
3186 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3187 .queue_pair_release = dpaa_sec_queue_pair_release,
3188 .queue_pair_count = dpaa_sec_queue_pair_count,
3189 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3190 .sym_session_configure = dpaa_sec_sym_session_configure,
3191 .sym_session_clear = dpaa_sec_sym_session_clear
3194 #ifdef RTE_LIBRTE_SECURITY
3195 static const struct rte_security_capability *
3196 dpaa_sec_capabilities_get(void *device __rte_unused)
3198 return dpaa_sec_security_cap;
3201 static const struct rte_security_ops dpaa_sec_security_ops = {
3202 .session_create = dpaa_sec_security_session_create,
3203 .session_update = NULL,
3204 .session_stats_get = NULL,
3205 .session_destroy = dpaa_sec_security_session_destroy,
3206 .set_pkt_metadata = NULL,
3207 .capabilities_get = dpaa_sec_capabilities_get
3211 dpaa_sec_uninit(struct rte_cryptodev *dev)
3213 struct dpaa_sec_dev_private *internals;
3218 internals = dev->data->dev_private;
3219 rte_free(dev->security_ctx);
3221 rte_free(internals);
3223 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3224 dev->data->name, rte_socket_id());
3230 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3232 struct dpaa_sec_dev_private *internals;
3233 #ifdef RTE_LIBRTE_SECURITY
3234 struct rte_security_ctx *security_instance;
3236 struct dpaa_sec_qp *qp;
3240 PMD_INIT_FUNC_TRACE();
3242 cryptodev->driver_id = cryptodev_driver_id;
3243 cryptodev->dev_ops = &crypto_ops;
3245 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3246 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3247 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3248 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3249 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3250 RTE_CRYPTODEV_FF_SECURITY |
3251 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3252 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3253 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3254 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3255 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3257 internals = cryptodev->data->dev_private;
3258 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3259 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3262 * For secondary processes, we don't initialise any further as primary
3263 * has already done this work. Only check we don't need a different
3266 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3267 DPAA_SEC_WARN("Device already init by primary process");
3270 #ifdef RTE_LIBRTE_SECURITY
3271 /* Initialize security_ctx only for primary process*/
3272 security_instance = rte_malloc("rte_security_instances_ops",
3273 sizeof(struct rte_security_ctx), 0);
3274 if (security_instance == NULL)
3276 security_instance->device = (void *)cryptodev;
3277 security_instance->ops = &dpaa_sec_security_ops;
3278 security_instance->sess_cnt = 0;
3279 cryptodev->security_ctx = security_instance;
3281 rte_spinlock_init(&internals->lock);
3282 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3283 /* init qman fq for queue pair */
3284 qp = &internals->qps[i];
3285 ret = dpaa_sec_init_tx(&qp->outq);
3287 DPAA_SEC_ERR("config tx of queue pair %d", i);
3292 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3293 QMAN_FQ_FLAG_TO_DCPORTAL;
3294 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3295 /* create rx qman fq for sessions*/
3296 ret = qman_create_fq(0, flags, &internals->inq[i]);
3297 if (unlikely(ret != 0)) {
3298 DPAA_SEC_ERR("sec qman_create_fq failed");
3303 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3307 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3309 dpaa_sec_uninit(cryptodev);
3314 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3315 struct rte_dpaa_device *dpaa_dev)
3317 struct rte_cryptodev *cryptodev;
3318 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3322 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3324 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3325 if (cryptodev == NULL)
3328 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3329 cryptodev->data->dev_private = rte_zmalloc_socket(
3330 "cryptodev private structure",
3331 sizeof(struct dpaa_sec_dev_private),
3332 RTE_CACHE_LINE_SIZE,
3335 if (cryptodev->data->dev_private == NULL)
3336 rte_panic("Cannot allocate memzone for private "
3340 dpaa_dev->crypto_dev = cryptodev;
3341 cryptodev->device = &dpaa_dev->device;
3343 /* init user callbacks */
3344 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3346 /* if sec device version is not configured */
3347 if (!rta_get_sec_era()) {
3348 const struct device_node *caam_node;
3350 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3351 const uint32_t *prop = of_get_property(caam_node,
3356 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3362 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3363 retval = rte_dpaa_portal_init((void *)1);
3365 DPAA_SEC_ERR("Unable to initialize portal");
3370 /* Invoke PMD device initialization function */
3371 retval = dpaa_sec_dev_init(cryptodev);
3375 /* In case of error, cleanup is done */
3376 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3377 rte_free(cryptodev->data->dev_private);
3379 rte_cryptodev_pmd_release_device(cryptodev);
3385 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3387 struct rte_cryptodev *cryptodev;
3390 cryptodev = dpaa_dev->crypto_dev;
3391 if (cryptodev == NULL)
3394 ret = dpaa_sec_uninit(cryptodev);
3398 return rte_cryptodev_pmd_destroy(cryptodev);
3401 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3402 .drv_type = FSL_DPAA_CRYPTO,
3404 .name = "DPAA SEC PMD"
3406 .probe = cryptodev_dpaa_sec_probe,
3407 .remove = cryptodev_dpaa_sec_remove,
3410 static struct cryptodev_driver dpaa_sec_crypto_drv;
3412 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3413 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3414 cryptodev_driver_id);
3416 RTE_INIT(dpaa_sec_init_log)
3418 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3419 if (dpaa_logtype_sec >= 0)
3420 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);