1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 enum rta_sec_era rta_sec_era;
50 static uint8_t cryptodev_driver_id;
52 static __thread struct rte_crypto_op **dpaa_sec_ops;
53 static __thread int dpaa_sec_op_nb;
56 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
59 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
61 if (!ctx->fd_status) {
62 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
64 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
65 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
69 static inline struct dpaa_sec_op_ctx *
70 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
72 struct dpaa_sec_op_ctx *ctx;
75 retval = rte_mempool_get(
76 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
79 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
83 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
84 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
85 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
86 * each packet, memset is costlier than dcbz_64().
88 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
89 dcbz_64(&ctx->job.sg[i]);
91 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
92 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
97 static inline rte_iova_t
98 dpaa_mem_vtop(void *vaddr)
100 const struct rte_memseg *ms;
102 ms = rte_mem_virt2memseg(vaddr, NULL);
104 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
105 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
111 dpaa_mem_ptov(rte_iova_t paddr)
115 va = (void *)dpaax_iova_table_get_va(paddr);
119 return rte_mem_iova2virt(paddr);
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
125 const struct qm_mr_entry *msg)
127 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
128 fq->fqid, msg->ern.rc, msg->ern.seqnum);
131 /* initialize the queue with dest chan as caam chan so that
132 * all the packets in this queue could be dispatched into caam
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
138 struct qm_mcc_initfq fq_opts;
142 /* Clear FQ options */
143 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
145 flags = QMAN_INITFQ_FLAG_SCHED;
146 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
147 QM_INITFQ_WE_CONTEXTB;
149 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
150 fq_opts.fqd.context_b = fqid_out;
151 fq_opts.fqd.dest.channel = qm_channel_caam;
152 fq_opts.fqd.dest.wq = 0;
154 fq_in->cb.ern = ern_sec_fq_handler;
156 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
158 ret = qman_init_fq(fq_in, flags, &fq_opts);
159 if (unlikely(ret != 0))
160 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
165 /* something is put into in_fq and caam put the crypto result into out_fq */
166 static enum qman_cb_dqrr_result
167 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
168 struct qman_fq *fq __always_unused,
169 const struct qm_dqrr_entry *dqrr)
171 const struct qm_fd *fd;
172 struct dpaa_sec_job *job;
173 struct dpaa_sec_op_ctx *ctx;
175 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
176 return qman_cb_dqrr_defer;
178 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
179 return qman_cb_dqrr_consume;
182 /* sg is embedded in an op ctx,
183 * sg[0] is for output
186 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
188 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
189 ctx->fd_status = fd->status;
190 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
191 struct qm_sg_entry *sg_out;
193 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
194 ctx->op->sym->m_src : ctx->op->sym->m_dst;
196 sg_out = &job->sg[0];
197 hw_sg_to_cpu(sg_out);
198 len = sg_out->length;
200 while (mbuf->next != NULL) {
201 len -= mbuf->data_len;
204 mbuf->data_len = len;
206 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
207 dpaa_sec_op_ending(ctx);
209 return qman_cb_dqrr_consume;
212 /* caam result is put into this queue */
214 dpaa_sec_init_tx(struct qman_fq *fq)
217 struct qm_mcc_initfq opts;
220 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
221 QMAN_FQ_FLAG_DYNAMIC_FQID;
223 ret = qman_create_fq(0, flags, fq);
225 DPAA_SEC_ERR("qman_create_fq failed");
229 memset(&opts, 0, sizeof(opts));
230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
233 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
235 fq->cb.dqrr = dqrr_out_fq_cb_rx;
236 fq->cb.ern = ern_sec_fq_handler;
238 ret = qman_init_fq(fq, 0, &opts);
240 DPAA_SEC_ERR("unable to init caam source fq!");
247 static inline int is_encode(dpaa_sec_session *ses)
249 return ses->dir == DIR_ENC;
252 static inline int is_decode(dpaa_sec_session *ses)
254 return ses->dir == DIR_DEC;
257 #ifdef RTE_LIBRTE_SECURITY
259 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
261 struct alginfo authdata = {0}, cipherdata = {0};
262 struct sec_cdb *cdb = &ses->cdb;
263 struct alginfo *p_authdata = NULL;
264 int32_t shared_desc_len = 0;
266 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
272 cipherdata.key = (size_t)ses->cipher_key.data;
273 cipherdata.keylen = ses->cipher_key.length;
274 cipherdata.key_enc_flags = 0;
275 cipherdata.key_type = RTA_DATA_IMM;
276 cipherdata.algtype = ses->cipher_key.alg;
277 cipherdata.algmode = ses->cipher_key.algmode;
279 cdb->sh_desc[0] = cipherdata.keylen;
284 authdata.key = (size_t)ses->auth_key.data;
285 authdata.keylen = ses->auth_key.length;
286 authdata.key_enc_flags = 0;
287 authdata.key_type = RTA_DATA_IMM;
288 authdata.algtype = ses->auth_key.alg;
289 authdata.algmode = ses->auth_key.algmode;
291 p_authdata = &authdata;
293 cdb->sh_desc[1] = authdata.keylen;
296 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
298 (unsigned int *)cdb->sh_desc,
299 &cdb->sh_desc[2], 2);
301 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
305 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
307 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
308 cipherdata.key_type = RTA_DATA_PTR;
310 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
312 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
313 authdata.key_type = RTA_DATA_PTR;
320 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
321 if (ses->dir == DIR_ENC)
322 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, &authdata,
331 else if (ses->dir == DIR_DEC)
332 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, &authdata,
342 if (ses->dir == DIR_ENC)
343 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
344 cdb->sh_desc, 1, swap,
349 ses->pdcp.hfn_threshold,
350 &cipherdata, p_authdata, 0);
351 else if (ses->dir == DIR_DEC)
352 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
353 cdb->sh_desc, 1, swap,
358 ses->pdcp.hfn_threshold,
359 &cipherdata, p_authdata, 0);
361 return shared_desc_len;
364 /* prepare ipsec proto command block of the session */
366 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
368 struct alginfo cipherdata = {0}, authdata = {0};
369 struct sec_cdb *cdb = &ses->cdb;
370 int32_t shared_desc_len = 0;
372 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378 cipherdata.key = (size_t)ses->cipher_key.data;
379 cipherdata.keylen = ses->cipher_key.length;
380 cipherdata.key_enc_flags = 0;
381 cipherdata.key_type = RTA_DATA_IMM;
382 cipherdata.algtype = ses->cipher_key.alg;
383 cipherdata.algmode = ses->cipher_key.algmode;
385 authdata.key = (size_t)ses->auth_key.data;
386 authdata.keylen = ses->auth_key.length;
387 authdata.key_enc_flags = 0;
388 authdata.key_type = RTA_DATA_IMM;
389 authdata.algtype = ses->auth_key.alg;
390 authdata.algmode = ses->auth_key.algmode;
392 cdb->sh_desc[0] = cipherdata.keylen;
393 cdb->sh_desc[1] = authdata.keylen;
394 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
396 (unsigned int *)cdb->sh_desc,
397 &cdb->sh_desc[2], 2);
400 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
403 if (cdb->sh_desc[2] & 1)
404 cipherdata.key_type = RTA_DATA_IMM;
406 cipherdata.key = (size_t)dpaa_mem_vtop(
407 (void *)(size_t)cipherdata.key);
408 cipherdata.key_type = RTA_DATA_PTR;
410 if (cdb->sh_desc[2] & (1<<1))
411 authdata.key_type = RTA_DATA_IMM;
413 authdata.key = (size_t)dpaa_mem_vtop(
414 (void *)(size_t)authdata.key);
415 authdata.key_type = RTA_DATA_PTR;
421 if (ses->dir == DIR_ENC) {
422 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
424 true, swap, SHR_SERIAL,
426 (uint8_t *)&ses->ip4_hdr,
427 &cipherdata, &authdata);
428 } else if (ses->dir == DIR_DEC) {
429 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
431 true, swap, SHR_SERIAL,
433 &cipherdata, &authdata);
435 return shared_desc_len;
438 /* prepare command block of the session */
440 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
442 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
443 int32_t shared_desc_len = 0;
444 struct sec_cdb *cdb = &ses->cdb;
446 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
452 memset(cdb, 0, sizeof(struct sec_cdb));
455 #ifdef RTE_LIBRTE_SECURITY
457 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
460 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
463 case DPAA_SEC_CIPHER:
464 alginfo_c.key = (size_t)ses->cipher_key.data;
465 alginfo_c.keylen = ses->cipher_key.length;
466 alginfo_c.key_enc_flags = 0;
467 alginfo_c.key_type = RTA_DATA_IMM;
468 alginfo_c.algtype = ses->cipher_key.alg;
469 alginfo_c.algmode = ses->cipher_key.algmode;
471 switch (ses->cipher_alg) {
472 case RTE_CRYPTO_CIPHER_AES_CBC:
473 case RTE_CRYPTO_CIPHER_3DES_CBC:
474 case RTE_CRYPTO_CIPHER_AES_CTR:
475 case RTE_CRYPTO_CIPHER_3DES_CTR:
476 shared_desc_len = cnstr_shdsc_blkcipher(
478 swap, SHR_NEVER, &alginfo_c,
483 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
484 shared_desc_len = cnstr_shdsc_snow_f8(
485 cdb->sh_desc, true, swap,
489 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
490 shared_desc_len = cnstr_shdsc_zuce(
491 cdb->sh_desc, true, swap,
496 DPAA_SEC_ERR("unsupported cipher alg %d",
502 alginfo_a.key = (size_t)ses->auth_key.data;
503 alginfo_a.keylen = ses->auth_key.length;
504 alginfo_a.key_enc_flags = 0;
505 alginfo_a.key_type = RTA_DATA_IMM;
506 alginfo_a.algtype = ses->auth_key.alg;
507 alginfo_a.algmode = ses->auth_key.algmode;
508 switch (ses->auth_alg) {
509 case RTE_CRYPTO_AUTH_MD5_HMAC:
510 case RTE_CRYPTO_AUTH_SHA1_HMAC:
511 case RTE_CRYPTO_AUTH_SHA224_HMAC:
512 case RTE_CRYPTO_AUTH_SHA256_HMAC:
513 case RTE_CRYPTO_AUTH_SHA384_HMAC:
514 case RTE_CRYPTO_AUTH_SHA512_HMAC:
515 shared_desc_len = cnstr_shdsc_hmac(
517 swap, SHR_NEVER, &alginfo_a,
521 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
522 shared_desc_len = cnstr_shdsc_snow_f9(
523 cdb->sh_desc, true, swap,
528 case RTE_CRYPTO_AUTH_ZUC_EIA3:
529 shared_desc_len = cnstr_shdsc_zuca(
530 cdb->sh_desc, true, swap,
536 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
540 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
541 DPAA_SEC_ERR("not supported aead alg");
544 alginfo.key = (size_t)ses->aead_key.data;
545 alginfo.keylen = ses->aead_key.length;
546 alginfo.key_enc_flags = 0;
547 alginfo.key_type = RTA_DATA_IMM;
548 alginfo.algtype = ses->aead_key.alg;
549 alginfo.algmode = ses->aead_key.algmode;
551 if (ses->dir == DIR_ENC)
552 shared_desc_len = cnstr_shdsc_gcm_encap(
553 cdb->sh_desc, true, swap, SHR_NEVER,
558 shared_desc_len = cnstr_shdsc_gcm_decap(
559 cdb->sh_desc, true, swap, SHR_NEVER,
564 case DPAA_SEC_CIPHER_HASH:
565 alginfo_c.key = (size_t)ses->cipher_key.data;
566 alginfo_c.keylen = ses->cipher_key.length;
567 alginfo_c.key_enc_flags = 0;
568 alginfo_c.key_type = RTA_DATA_IMM;
569 alginfo_c.algtype = ses->cipher_key.alg;
570 alginfo_c.algmode = ses->cipher_key.algmode;
572 alginfo_a.key = (size_t)ses->auth_key.data;
573 alginfo_a.keylen = ses->auth_key.length;
574 alginfo_a.key_enc_flags = 0;
575 alginfo_a.key_type = RTA_DATA_IMM;
576 alginfo_a.algtype = ses->auth_key.alg;
577 alginfo_a.algmode = ses->auth_key.algmode;
579 cdb->sh_desc[0] = alginfo_c.keylen;
580 cdb->sh_desc[1] = alginfo_a.keylen;
581 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
583 (unsigned int *)cdb->sh_desc,
584 &cdb->sh_desc[2], 2);
587 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
590 if (cdb->sh_desc[2] & 1)
591 alginfo_c.key_type = RTA_DATA_IMM;
593 alginfo_c.key = (size_t)dpaa_mem_vtop(
594 (void *)(size_t)alginfo_c.key);
595 alginfo_c.key_type = RTA_DATA_PTR;
597 if (cdb->sh_desc[2] & (1<<1))
598 alginfo_a.key_type = RTA_DATA_IMM;
600 alginfo_a.key = (size_t)dpaa_mem_vtop(
601 (void *)(size_t)alginfo_a.key);
602 alginfo_a.key_type = RTA_DATA_PTR;
607 /* Auth_only_len is set as 0 here and it will be
608 * overwritten in fd for each packet.
610 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
611 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
613 ses->digest_length, ses->dir);
615 case DPAA_SEC_HASH_CIPHER:
617 DPAA_SEC_ERR("error: Unsupported session");
621 if (shared_desc_len < 0) {
622 DPAA_SEC_ERR("error in preparing command block");
623 return shared_desc_len;
626 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
627 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
628 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
633 /* qp is lockless, should be accessed by only one thread */
635 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
638 unsigned int pkts = 0;
639 int num_rx_bufs, ret;
640 struct qm_dqrr_entry *dq;
641 uint32_t vdqcr_flags = 0;
645 * Until request for four buffers, we provide exact number of buffers.
646 * Otherwise we do not set the QM_VDQCR_EXACT flag.
647 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
648 * requested, so we request two less in this case.
651 vdqcr_flags = QM_VDQCR_EXACT;
652 num_rx_bufs = nb_ops;
654 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
655 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
657 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
662 const struct qm_fd *fd;
663 struct dpaa_sec_job *job;
664 struct dpaa_sec_op_ctx *ctx;
665 struct rte_crypto_op *op;
667 dq = qman_dequeue(fq);
672 /* sg is embedded in an op ctx,
673 * sg[0] is for output
676 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
678 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
679 ctx->fd_status = fd->status;
681 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
682 struct qm_sg_entry *sg_out;
684 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
685 op->sym->m_src : op->sym->m_dst;
687 sg_out = &job->sg[0];
688 hw_sg_to_cpu(sg_out);
689 len = sg_out->length;
691 while (mbuf->next != NULL) {
692 len -= mbuf->data_len;
695 mbuf->data_len = len;
697 if (!ctx->fd_status) {
698 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
700 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
701 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
705 /* report op status to sym->op and then free the ctx memeory */
706 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
708 qman_dqrr_consume(fq, dq);
709 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
714 static inline struct dpaa_sec_job *
715 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
717 struct rte_crypto_sym_op *sym = op->sym;
718 struct rte_mbuf *mbuf = sym->m_src;
719 struct dpaa_sec_job *cf;
720 struct dpaa_sec_op_ctx *ctx;
721 struct qm_sg_entry *sg, *out_sg, *in_sg;
722 phys_addr_t start_addr;
723 uint8_t *old_digest, extra_segs;
724 int data_len, data_offset;
726 data_len = sym->auth.data.length;
727 data_offset = sym->auth.data.offset;
729 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
730 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
731 if ((data_len & 7) || (data_offset & 7)) {
732 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
736 data_len = data_len >> 3;
737 data_offset = data_offset >> 3;
745 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
746 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
750 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
756 old_digest = ctx->digest;
760 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
761 out_sg->length = ses->digest_length;
762 cpu_to_hw_sg(out_sg);
766 /* need to extend the input to a compound frame */
767 in_sg->extension = 1;
769 in_sg->length = data_len;
770 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
775 if (ses->iv.length) {
778 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
781 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
782 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
784 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
785 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
788 sg->length = ses->iv.length;
790 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
791 in_sg->length += sg->length;
796 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
797 sg->offset = data_offset;
799 if (data_len <= (mbuf->data_len - data_offset)) {
800 sg->length = data_len;
802 sg->length = mbuf->data_len - data_offset;
804 /* remaining i/p segs */
805 while ((data_len = data_len - sg->length) &&
806 (mbuf = mbuf->next)) {
809 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
810 if (data_len > mbuf->data_len)
811 sg->length = mbuf->data_len;
813 sg->length = data_len;
817 if (is_decode(ses)) {
818 /* Digest verification case */
821 rte_memcpy(old_digest, sym->auth.digest.data,
823 start_addr = dpaa_mem_vtop(old_digest);
824 qm_sg_entry_set64(sg, start_addr);
825 sg->length = ses->digest_length;
826 in_sg->length += ses->digest_length;
837 * |<----data_len------->|
838 * |ip_header|ah_header|icv|payload|
843 static inline struct dpaa_sec_job *
844 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
846 struct rte_crypto_sym_op *sym = op->sym;
847 struct rte_mbuf *mbuf = sym->m_src;
848 struct dpaa_sec_job *cf;
849 struct dpaa_sec_op_ctx *ctx;
850 struct qm_sg_entry *sg, *in_sg;
851 rte_iova_t start_addr;
853 int data_len, data_offset;
855 data_len = sym->auth.data.length;
856 data_offset = sym->auth.data.offset;
858 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
859 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
860 if ((data_len & 7) || (data_offset & 7)) {
861 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
865 data_len = data_len >> 3;
866 data_offset = data_offset >> 3;
869 ctx = dpaa_sec_alloc_ctx(ses, 4);
875 old_digest = ctx->digest;
877 start_addr = rte_pktmbuf_iova(mbuf);
880 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
881 sg->length = ses->digest_length;
886 /* need to extend the input to a compound frame */
887 in_sg->extension = 1;
889 in_sg->length = data_len;
890 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
893 if (ses->iv.length) {
896 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
899 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
900 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
902 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
903 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
906 sg->length = ses->iv.length;
908 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
909 in_sg->length += sg->length;
914 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
915 sg->offset = data_offset;
916 sg->length = data_len;
918 if (is_decode(ses)) {
919 /* Digest verification case */
921 /* hash result or digest, save digest first */
922 rte_memcpy(old_digest, sym->auth.digest.data,
924 /* let's check digest by hw */
925 start_addr = dpaa_mem_vtop(old_digest);
927 qm_sg_entry_set64(sg, start_addr);
928 sg->length = ses->digest_length;
929 in_sg->length += ses->digest_length;
938 static inline struct dpaa_sec_job *
939 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
941 struct rte_crypto_sym_op *sym = op->sym;
942 struct dpaa_sec_job *cf;
943 struct dpaa_sec_op_ctx *ctx;
944 struct qm_sg_entry *sg, *out_sg, *in_sg;
945 struct rte_mbuf *mbuf;
947 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
949 int data_len, data_offset;
951 data_len = sym->cipher.data.length;
952 data_offset = sym->cipher.data.offset;
954 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
955 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
956 if ((data_len & 7) || (data_offset & 7)) {
957 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
961 data_len = data_len >> 3;
962 data_offset = data_offset >> 3;
967 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
970 req_segs = mbuf->nb_segs * 2 + 3;
972 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
973 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
978 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
987 out_sg->extension = 1;
988 out_sg->length = data_len;
989 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
990 cpu_to_hw_sg(out_sg);
994 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
995 sg->length = mbuf->data_len - data_offset;
996 sg->offset = data_offset;
998 /* Successive segs */
1003 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004 sg->length = mbuf->data_len;
1013 in_sg->extension = 1;
1015 in_sg->length = data_len + ses->iv.length;
1018 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1019 cpu_to_hw_sg(in_sg);
1022 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1023 sg->length = ses->iv.length;
1028 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1029 sg->length = mbuf->data_len - data_offset;
1030 sg->offset = data_offset;
1032 /* Successive segs */
1037 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1038 sg->length = mbuf->data_len;
1047 static inline struct dpaa_sec_job *
1048 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1050 struct rte_crypto_sym_op *sym = op->sym;
1051 struct dpaa_sec_job *cf;
1052 struct dpaa_sec_op_ctx *ctx;
1053 struct qm_sg_entry *sg;
1054 rte_iova_t src_start_addr, dst_start_addr;
1055 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1057 int data_len, data_offset;
1059 data_len = sym->cipher.data.length;
1060 data_offset = sym->cipher.data.offset;
1062 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1063 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1064 if ((data_len & 7) || (data_offset & 7)) {
1065 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1069 data_len = data_len >> 3;
1070 data_offset = data_offset >> 3;
1073 ctx = dpaa_sec_alloc_ctx(ses, 4);
1080 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1083 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1085 dst_start_addr = src_start_addr;
1089 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1090 sg->length = data_len + ses->iv.length;
1096 /* need to extend the input to a compound frame */
1099 sg->length = data_len + ses->iv.length;
1100 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1104 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1105 sg->length = ses->iv.length;
1109 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1110 sg->length = data_len;
1117 static inline struct dpaa_sec_job *
1118 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1120 struct rte_crypto_sym_op *sym = op->sym;
1121 struct dpaa_sec_job *cf;
1122 struct dpaa_sec_op_ctx *ctx;
1123 struct qm_sg_entry *sg, *out_sg, *in_sg;
1124 struct rte_mbuf *mbuf;
1126 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1131 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1134 req_segs = mbuf->nb_segs * 2 + 4;
1137 if (ses->auth_only_len)
1140 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1141 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1146 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1153 rte_prefetch0(cf->sg);
1156 out_sg = &cf->sg[0];
1157 out_sg->extension = 1;
1159 out_sg->length = sym->aead.data.length + ses->digest_length;
1161 out_sg->length = sym->aead.data.length;
1163 /* output sg entries */
1165 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1166 cpu_to_hw_sg(out_sg);
1169 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1170 sg->length = mbuf->data_len - sym->aead.data.offset;
1171 sg->offset = sym->aead.data.offset;
1173 /* Successive segs */
1178 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1179 sg->length = mbuf->data_len;
1182 sg->length -= ses->digest_length;
1184 if (is_encode(ses)) {
1186 /* set auth output */
1188 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1189 sg->length = ses->digest_length;
1197 in_sg->extension = 1;
1200 in_sg->length = ses->iv.length + sym->aead.data.length
1201 + ses->auth_only_len;
1203 in_sg->length = ses->iv.length + sym->aead.data.length
1204 + ses->auth_only_len + ses->digest_length;
1206 /* input sg entries */
1208 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1209 cpu_to_hw_sg(in_sg);
1212 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1213 sg->length = ses->iv.length;
1216 /* 2nd seg auth only */
1217 if (ses->auth_only_len) {
1219 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1220 sg->length = ses->auth_only_len;
1226 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1227 sg->length = mbuf->data_len - sym->aead.data.offset;
1228 sg->offset = sym->aead.data.offset;
1230 /* Successive segs */
1235 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1236 sg->length = mbuf->data_len;
1240 if (is_decode(ses)) {
1243 memcpy(ctx->digest, sym->aead.digest.data,
1244 ses->digest_length);
1245 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1246 sg->length = ses->digest_length;
1254 static inline struct dpaa_sec_job *
1255 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1257 struct rte_crypto_sym_op *sym = op->sym;
1258 struct dpaa_sec_job *cf;
1259 struct dpaa_sec_op_ctx *ctx;
1260 struct qm_sg_entry *sg;
1261 uint32_t length = 0;
1262 rte_iova_t src_start_addr, dst_start_addr;
1263 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1266 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1269 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1271 dst_start_addr = src_start_addr;
1273 ctx = dpaa_sec_alloc_ctx(ses, 7);
1281 rte_prefetch0(cf->sg);
1283 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1284 if (is_encode(ses)) {
1285 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1286 sg->length = ses->iv.length;
1287 length += sg->length;
1291 if (ses->auth_only_len) {
1292 qm_sg_entry_set64(sg,
1293 dpaa_mem_vtop(sym->aead.aad.data));
1294 sg->length = ses->auth_only_len;
1295 length += sg->length;
1299 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1300 sg->length = sym->aead.data.length;
1301 length += sg->length;
1305 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1306 sg->length = ses->iv.length;
1307 length += sg->length;
1311 if (ses->auth_only_len) {
1312 qm_sg_entry_set64(sg,
1313 dpaa_mem_vtop(sym->aead.aad.data));
1314 sg->length = ses->auth_only_len;
1315 length += sg->length;
1319 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1320 sg->length = sym->aead.data.length;
1321 length += sg->length;
1324 memcpy(ctx->digest, sym->aead.digest.data,
1325 ses->digest_length);
1328 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1329 sg->length = ses->digest_length;
1330 length += sg->length;
1334 /* input compound frame */
1335 cf->sg[1].length = length;
1336 cf->sg[1].extension = 1;
1337 cf->sg[1].final = 1;
1338 cpu_to_hw_sg(&cf->sg[1]);
1342 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1343 qm_sg_entry_set64(sg,
1344 dst_start_addr + sym->aead.data.offset);
1345 sg->length = sym->aead.data.length;
1346 length = sg->length;
1347 if (is_encode(ses)) {
1349 /* set auth output */
1351 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1352 sg->length = ses->digest_length;
1353 length += sg->length;
1358 /* output compound frame */
1359 cf->sg[0].length = length;
1360 cf->sg[0].extension = 1;
1361 cpu_to_hw_sg(&cf->sg[0]);
1366 static inline struct dpaa_sec_job *
1367 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1369 struct rte_crypto_sym_op *sym = op->sym;
1370 struct dpaa_sec_job *cf;
1371 struct dpaa_sec_op_ctx *ctx;
1372 struct qm_sg_entry *sg, *out_sg, *in_sg;
1373 struct rte_mbuf *mbuf;
1375 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1380 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1383 req_segs = mbuf->nb_segs * 2 + 4;
1386 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1387 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1392 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1399 rte_prefetch0(cf->sg);
1402 out_sg = &cf->sg[0];
1403 out_sg->extension = 1;
1405 out_sg->length = sym->auth.data.length + ses->digest_length;
1407 out_sg->length = sym->auth.data.length;
1409 /* output sg entries */
1411 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1412 cpu_to_hw_sg(out_sg);
1415 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1416 sg->length = mbuf->data_len - sym->auth.data.offset;
1417 sg->offset = sym->auth.data.offset;
1419 /* Successive segs */
1424 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1425 sg->length = mbuf->data_len;
1428 sg->length -= ses->digest_length;
1430 if (is_encode(ses)) {
1432 /* set auth output */
1434 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1435 sg->length = ses->digest_length;
1443 in_sg->extension = 1;
1446 in_sg->length = ses->iv.length + sym->auth.data.length;
1448 in_sg->length = ses->iv.length + sym->auth.data.length
1449 + ses->digest_length;
1451 /* input sg entries */
1453 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1454 cpu_to_hw_sg(in_sg);
1457 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1458 sg->length = ses->iv.length;
1463 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1464 sg->length = mbuf->data_len - sym->auth.data.offset;
1465 sg->offset = sym->auth.data.offset;
1467 /* Successive segs */
1472 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1473 sg->length = mbuf->data_len;
1477 sg->length -= ses->digest_length;
1478 if (is_decode(ses)) {
1481 memcpy(ctx->digest, sym->auth.digest.data,
1482 ses->digest_length);
1483 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1484 sg->length = ses->digest_length;
1492 static inline struct dpaa_sec_job *
1493 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1495 struct rte_crypto_sym_op *sym = op->sym;
1496 struct dpaa_sec_job *cf;
1497 struct dpaa_sec_op_ctx *ctx;
1498 struct qm_sg_entry *sg;
1499 rte_iova_t src_start_addr, dst_start_addr;
1500 uint32_t length = 0;
1501 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1504 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1506 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1508 dst_start_addr = src_start_addr;
1510 ctx = dpaa_sec_alloc_ctx(ses, 7);
1518 rte_prefetch0(cf->sg);
1520 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1521 if (is_encode(ses)) {
1522 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1523 sg->length = ses->iv.length;
1524 length += sg->length;
1528 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1529 sg->length = sym->auth.data.length;
1530 length += sg->length;
1534 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1535 sg->length = ses->iv.length;
1536 length += sg->length;
1541 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1542 sg->length = sym->auth.data.length;
1543 length += sg->length;
1546 memcpy(ctx->digest, sym->auth.digest.data,
1547 ses->digest_length);
1550 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1551 sg->length = ses->digest_length;
1552 length += sg->length;
1556 /* input compound frame */
1557 cf->sg[1].length = length;
1558 cf->sg[1].extension = 1;
1559 cf->sg[1].final = 1;
1560 cpu_to_hw_sg(&cf->sg[1]);
1564 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1565 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1566 sg->length = sym->cipher.data.length;
1567 length = sg->length;
1568 if (is_encode(ses)) {
1570 /* set auth output */
1572 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1573 sg->length = ses->digest_length;
1574 length += sg->length;
1579 /* output compound frame */
1580 cf->sg[0].length = length;
1581 cf->sg[0].extension = 1;
1582 cpu_to_hw_sg(&cf->sg[0]);
1587 #ifdef RTE_LIBRTE_SECURITY
1588 static inline struct dpaa_sec_job *
1589 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1591 struct rte_crypto_sym_op *sym = op->sym;
1592 struct dpaa_sec_job *cf;
1593 struct dpaa_sec_op_ctx *ctx;
1594 struct qm_sg_entry *sg;
1595 phys_addr_t src_start_addr, dst_start_addr;
1597 ctx = dpaa_sec_alloc_ctx(ses, 2);
1603 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1606 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1608 dst_start_addr = src_start_addr;
1612 qm_sg_entry_set64(sg, src_start_addr);
1613 sg->length = sym->m_src->pkt_len;
1617 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1620 qm_sg_entry_set64(sg, dst_start_addr);
1621 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1627 static inline struct dpaa_sec_job *
1628 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1630 struct rte_crypto_sym_op *sym = op->sym;
1631 struct dpaa_sec_job *cf;
1632 struct dpaa_sec_op_ctx *ctx;
1633 struct qm_sg_entry *sg, *out_sg, *in_sg;
1634 struct rte_mbuf *mbuf;
1636 uint32_t in_len = 0, out_len = 0;
1643 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1644 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1645 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1650 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1656 out_sg = &cf->sg[0];
1657 out_sg->extension = 1;
1658 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1662 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1665 /* Successive segs */
1666 while (mbuf->next) {
1667 sg->length = mbuf->data_len;
1668 out_len += sg->length;
1672 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1675 sg->length = mbuf->buf_len - mbuf->data_off;
1676 out_len += sg->length;
1680 out_sg->length = out_len;
1681 cpu_to_hw_sg(out_sg);
1686 in_sg->extension = 1;
1688 in_len = mbuf->data_len;
1691 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1694 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1695 sg->length = mbuf->data_len;
1698 /* Successive segs */
1703 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1704 sg->length = mbuf->data_len;
1706 in_len += sg->length;
1712 in_sg->length = in_len;
1713 cpu_to_hw_sg(in_sg);
1715 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1722 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1725 /* Function to transmit the frames to given device and queuepair */
1727 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1728 uint16_t num_tx = 0;
1729 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1730 uint32_t frames_to_send;
1731 struct rte_crypto_op *op;
1732 struct dpaa_sec_job *cf;
1733 dpaa_sec_session *ses;
1734 uint16_t auth_hdr_len, auth_tail_len;
1735 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1736 struct qman_fq *inq[DPAA_SEC_BURST];
1739 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1740 DPAA_SEC_BURST : nb_ops;
1741 for (loop = 0; loop < frames_to_send; loop++) {
1743 if (op->sym->m_src->seqn != 0) {
1744 index = op->sym->m_src->seqn - 1;
1745 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1746 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1747 flags[loop] = ((index & 0x0f) << 8);
1748 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1749 DPAA_PER_LCORE_DQRR_SIZE--;
1750 DPAA_PER_LCORE_DQRR_HELD &=
1755 switch (op->sess_type) {
1756 case RTE_CRYPTO_OP_WITH_SESSION:
1757 ses = (dpaa_sec_session *)
1758 get_sym_session_private_data(
1760 cryptodev_driver_id);
1762 #ifdef RTE_LIBRTE_SECURITY
1763 case RTE_CRYPTO_OP_SECURITY_SESSION:
1764 ses = (dpaa_sec_session *)
1765 get_sec_session_private_data(
1766 op->sym->sec_session);
1771 "sessionless crypto op not supported");
1772 frames_to_send = loop;
1776 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1777 if (dpaa_sec_attach_sess_q(qp, ses)) {
1778 frames_to_send = loop;
1782 } else if (unlikely(ses->qp[rte_lcore_id() %
1783 MAX_DPAA_CORES] != qp)) {
1784 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1786 ses->qp[rte_lcore_id() %
1787 MAX_DPAA_CORES], qp);
1788 frames_to_send = loop;
1793 auth_hdr_len = op->sym->auth.data.length -
1794 op->sym->cipher.data.length;
1797 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1798 ((op->sym->m_dst == NULL) ||
1799 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1800 switch (ses->ctxt) {
1801 #ifdef RTE_LIBRTE_SECURITY
1803 case DPAA_SEC_IPSEC:
1804 cf = build_proto(op, ses);
1808 cf = build_auth_only(op, ses);
1810 case DPAA_SEC_CIPHER:
1811 cf = build_cipher_only(op, ses);
1814 cf = build_cipher_auth_gcm(op, ses);
1815 auth_hdr_len = ses->auth_only_len;
1817 case DPAA_SEC_CIPHER_HASH:
1819 op->sym->cipher.data.offset
1820 - op->sym->auth.data.offset;
1822 op->sym->auth.data.length
1823 - op->sym->cipher.data.length
1825 cf = build_cipher_auth(op, ses);
1828 DPAA_SEC_DP_ERR("not supported ops");
1829 frames_to_send = loop;
1834 switch (ses->ctxt) {
1835 #ifdef RTE_LIBRTE_SECURITY
1837 case DPAA_SEC_IPSEC:
1838 cf = build_proto_sg(op, ses);
1842 cf = build_auth_only_sg(op, ses);
1844 case DPAA_SEC_CIPHER:
1845 cf = build_cipher_only_sg(op, ses);
1848 cf = build_cipher_auth_gcm_sg(op, ses);
1849 auth_hdr_len = ses->auth_only_len;
1851 case DPAA_SEC_CIPHER_HASH:
1853 op->sym->cipher.data.offset
1854 - op->sym->auth.data.offset;
1856 op->sym->auth.data.length
1857 - op->sym->cipher.data.length
1859 cf = build_cipher_auth_sg(op, ses);
1862 DPAA_SEC_DP_ERR("not supported ops");
1863 frames_to_send = loop;
1868 if (unlikely(!cf)) {
1869 frames_to_send = loop;
1875 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1876 fd->opaque_addr = 0;
1878 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1879 fd->_format1 = qm_fd_compound;
1880 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1882 /* Auth_only_len is set as 0 in descriptor and it is
1883 * overwritten here in the fd.cmd which will update
1886 if (auth_hdr_len || auth_tail_len) {
1887 fd->cmd = 0x80000000;
1889 ((auth_tail_len << 16) | auth_hdr_len);
1892 #ifdef RTE_LIBRTE_SECURITY
1893 /* In case of PDCP, per packet HFN is stored in
1894 * mbuf priv after sym_op.
1896 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1897 fd->cmd = 0x80000000 |
1898 *((uint32_t *)((uint8_t *)op +
1899 ses->pdcp.hfn_ovd_offset));
1900 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1901 *((uint32_t *)((uint8_t *)op +
1902 ses->pdcp.hfn_ovd_offset)),
1909 while (loop < frames_to_send) {
1910 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1911 &flags[loop], frames_to_send - loop);
1913 nb_ops -= frames_to_send;
1914 num_tx += frames_to_send;
1917 dpaa_qp->tx_pkts += num_tx;
1918 dpaa_qp->tx_errs += nb_ops - num_tx;
1924 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1928 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1930 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1932 dpaa_qp->rx_pkts += num_rx;
1933 dpaa_qp->rx_errs += nb_ops - num_rx;
1935 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1940 /** Release queue pair */
1942 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1945 struct dpaa_sec_dev_private *internals;
1946 struct dpaa_sec_qp *qp = NULL;
1948 PMD_INIT_FUNC_TRACE();
1950 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1952 internals = dev->data->dev_private;
1953 if (qp_id >= internals->max_nb_queue_pairs) {
1954 DPAA_SEC_ERR("Max supported qpid %d",
1955 internals->max_nb_queue_pairs);
1959 qp = &internals->qps[qp_id];
1960 rte_mempool_free(qp->ctx_pool);
1961 qp->internals = NULL;
1962 dev->data->queue_pairs[qp_id] = NULL;
1967 /** Setup a queue pair */
1969 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1970 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1971 __rte_unused int socket_id)
1973 struct dpaa_sec_dev_private *internals;
1974 struct dpaa_sec_qp *qp = NULL;
1977 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1979 internals = dev->data->dev_private;
1980 if (qp_id >= internals->max_nb_queue_pairs) {
1981 DPAA_SEC_ERR("Max supported qpid %d",
1982 internals->max_nb_queue_pairs);
1986 qp = &internals->qps[qp_id];
1987 qp->internals = internals;
1988 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1989 dev->data->dev_id, qp_id);
1990 if (!qp->ctx_pool) {
1991 qp->ctx_pool = rte_mempool_create((const char *)str,
1994 CTX_POOL_CACHE_SIZE, 0,
1995 NULL, NULL, NULL, NULL,
1997 if (!qp->ctx_pool) {
1998 DPAA_SEC_ERR("%s create failed\n", str);
2002 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2003 dev->data->dev_id, qp_id);
2004 dev->data->queue_pairs[qp_id] = qp;
2009 /** Return the number of allocated queue pairs */
2011 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2013 PMD_INIT_FUNC_TRACE();
2015 return dev->data->nb_queue_pairs;
2018 /** Returns the size of session structure */
2020 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2022 PMD_INIT_FUNC_TRACE();
2024 return sizeof(dpaa_sec_session);
2028 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2029 struct rte_crypto_sym_xform *xform,
2030 dpaa_sec_session *session)
2032 session->cipher_alg = xform->cipher.algo;
2033 session->iv.length = xform->cipher.iv.length;
2034 session->iv.offset = xform->cipher.iv.offset;
2035 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2036 RTE_CACHE_LINE_SIZE);
2037 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2038 DPAA_SEC_ERR("No Memory for cipher key");
2041 session->cipher_key.length = xform->cipher.key.length;
2043 memcpy(session->cipher_key.data, xform->cipher.key.data,
2044 xform->cipher.key.length);
2045 switch (xform->cipher.algo) {
2046 case RTE_CRYPTO_CIPHER_AES_CBC:
2047 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2048 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2050 case RTE_CRYPTO_CIPHER_3DES_CBC:
2051 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2052 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2054 case RTE_CRYPTO_CIPHER_AES_CTR:
2055 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2056 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2058 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2059 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2061 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2062 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2065 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2066 xform->cipher.algo);
2067 rte_free(session->cipher_key.data);
2070 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2077 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2078 struct rte_crypto_sym_xform *xform,
2079 dpaa_sec_session *session)
2081 session->auth_alg = xform->auth.algo;
2082 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2083 RTE_CACHE_LINE_SIZE);
2084 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2085 DPAA_SEC_ERR("No Memory for auth key");
2088 session->auth_key.length = xform->auth.key.length;
2089 session->digest_length = xform->auth.digest_length;
2090 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2091 session->iv.offset = xform->auth.iv.offset;
2092 session->iv.length = xform->auth.iv.length;
2095 memcpy(session->auth_key.data, xform->auth.key.data,
2096 xform->auth.key.length);
2098 switch (xform->auth.algo) {
2099 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2100 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2101 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2103 case RTE_CRYPTO_AUTH_MD5_HMAC:
2104 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2105 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2107 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2108 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2109 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2111 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2112 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2113 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2115 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2116 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2117 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2119 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2120 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2121 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2123 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2124 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2125 session->auth_key.algmode = OP_ALG_AAI_F9;
2127 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2128 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2129 session->auth_key.algmode = OP_ALG_AAI_F9;
2132 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2134 rte_free(session->auth_key.data);
2138 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2145 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2146 struct rte_crypto_sym_xform *xform,
2147 dpaa_sec_session *session)
2150 struct rte_crypto_cipher_xform *cipher_xform;
2151 struct rte_crypto_auth_xform *auth_xform;
2153 if (session->auth_cipher_text) {
2154 cipher_xform = &xform->cipher;
2155 auth_xform = &xform->next->auth;
2157 cipher_xform = &xform->next->cipher;
2158 auth_xform = &xform->auth;
2161 /* Set IV parameters */
2162 session->iv.offset = cipher_xform->iv.offset;
2163 session->iv.length = cipher_xform->iv.length;
2165 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2166 RTE_CACHE_LINE_SIZE);
2167 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2168 DPAA_SEC_ERR("No Memory for cipher key");
2171 session->cipher_key.length = cipher_xform->key.length;
2172 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2173 RTE_CACHE_LINE_SIZE);
2174 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2175 DPAA_SEC_ERR("No Memory for auth key");
2176 rte_free(session->cipher_key.data);
2179 session->auth_key.length = auth_xform->key.length;
2180 memcpy(session->cipher_key.data, cipher_xform->key.data,
2181 cipher_xform->key.length);
2182 memcpy(session->auth_key.data, auth_xform->key.data,
2183 auth_xform->key.length);
2185 session->digest_length = auth_xform->digest_length;
2186 session->auth_alg = auth_xform->algo;
2188 switch (auth_xform->algo) {
2189 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2190 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2191 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2193 case RTE_CRYPTO_AUTH_MD5_HMAC:
2194 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2195 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2197 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2198 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2199 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2201 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2202 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2203 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2205 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2206 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2207 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2209 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2210 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2211 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2214 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2219 session->cipher_alg = cipher_xform->algo;
2221 switch (cipher_xform->algo) {
2222 case RTE_CRYPTO_CIPHER_AES_CBC:
2223 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2224 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2226 case RTE_CRYPTO_CIPHER_3DES_CBC:
2227 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2228 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2230 case RTE_CRYPTO_CIPHER_AES_CTR:
2231 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2232 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2235 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2236 cipher_xform->algo);
2239 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2244 rte_free(session->cipher_key.data);
2245 rte_free(session->auth_key.data);
2250 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2251 struct rte_crypto_sym_xform *xform,
2252 dpaa_sec_session *session)
2254 session->aead_alg = xform->aead.algo;
2255 session->ctxt = DPAA_SEC_AEAD;
2256 session->iv.length = xform->aead.iv.length;
2257 session->iv.offset = xform->aead.iv.offset;
2258 session->auth_only_len = xform->aead.aad_length;
2259 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2260 RTE_CACHE_LINE_SIZE);
2261 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2262 DPAA_SEC_ERR("No Memory for aead key\n");
2265 session->aead_key.length = xform->aead.key.length;
2266 session->digest_length = xform->aead.digest_length;
2268 memcpy(session->aead_key.data, xform->aead.key.data,
2269 xform->aead.key.length);
2271 switch (session->aead_alg) {
2272 case RTE_CRYPTO_AEAD_AES_GCM:
2273 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2274 session->aead_key.algmode = OP_ALG_AAI_GCM;
2277 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2278 rte_free(session->aead_key.data);
2282 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2288 static struct qman_fq *
2289 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2293 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2294 if (qi->inq_attach[i] == 0) {
2295 qi->inq_attach[i] = 1;
2299 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2305 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2309 for (i = 0; i < qi->max_nb_sessions; i++) {
2310 if (&qi->inq[i] == fq) {
2311 qman_retire_fq(fq, NULL);
2313 qi->inq_attach[i] = 0;
2321 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2325 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2326 ret = dpaa_sec_prep_cdb(sess);
2328 DPAA_SEC_ERR("Unable to prepare sec cdb");
2331 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2332 ret = rte_dpaa_portal_init((void *)0);
2334 DPAA_SEC_ERR("Failure in affining portal");
2338 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2339 dpaa_mem_vtop(&sess->cdb),
2340 qman_fq_fqid(&qp->outq));
2342 DPAA_SEC_ERR("Unable to init sec queue");
2348 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2349 struct rte_crypto_sym_xform *xform, void *sess)
2351 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2352 dpaa_sec_session *session = sess;
2355 PMD_INIT_FUNC_TRACE();
2357 if (unlikely(sess == NULL)) {
2358 DPAA_SEC_ERR("invalid session struct");
2361 memset(session, 0, sizeof(dpaa_sec_session));
2363 /* Default IV length = 0 */
2364 session->iv.length = 0;
2367 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2368 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2369 session->ctxt = DPAA_SEC_CIPHER;
2370 dpaa_sec_cipher_init(dev, xform, session);
2372 /* Authentication Only */
2373 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2374 xform->next == NULL) {
2375 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2376 session->ctxt = DPAA_SEC_AUTH;
2377 dpaa_sec_auth_init(dev, xform, session);
2379 /* Cipher then Authenticate */
2380 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2381 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2382 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2383 session->ctxt = DPAA_SEC_CIPHER_HASH;
2384 session->auth_cipher_text = 1;
2385 dpaa_sec_chain_init(dev, xform, session);
2387 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2390 /* Authenticate then Cipher */
2391 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2392 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2393 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2394 session->ctxt = DPAA_SEC_CIPHER_HASH;
2395 session->auth_cipher_text = 0;
2396 dpaa_sec_chain_init(dev, xform, session);
2398 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2402 /* AEAD operation for AES-GCM kind of Algorithms */
2403 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2404 xform->next == NULL) {
2405 dpaa_sec_aead_init(dev, xform, session);
2408 DPAA_SEC_ERR("Invalid crypto type");
2411 rte_spinlock_lock(&internals->lock);
2412 for (i = 0; i < MAX_DPAA_CORES; i++) {
2413 session->inq[i] = dpaa_sec_attach_rxq(internals);
2414 if (session->inq[i] == NULL) {
2415 DPAA_SEC_ERR("unable to attach sec queue");
2416 rte_spinlock_unlock(&internals->lock);
2420 rte_spinlock_unlock(&internals->lock);
2425 rte_free(session->cipher_key.data);
2426 rte_free(session->auth_key.data);
2427 memset(session, 0, sizeof(dpaa_sec_session));
2433 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2434 struct rte_crypto_sym_xform *xform,
2435 struct rte_cryptodev_sym_session *sess,
2436 struct rte_mempool *mempool)
2438 void *sess_private_data;
2441 PMD_INIT_FUNC_TRACE();
2443 if (rte_mempool_get(mempool, &sess_private_data)) {
2444 DPAA_SEC_ERR("Couldn't get object from session mempool");
2448 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2450 DPAA_SEC_ERR("failed to configure session parameters");
2452 /* Return session to mempool */
2453 rte_mempool_put(mempool, sess_private_data);
2457 set_sym_session_private_data(sess, dev->driver_id,
2465 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2467 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2468 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2471 for (i = 0; i < MAX_DPAA_CORES; i++) {
2473 dpaa_sec_detach_rxq(qi, s->inq[i]);
2477 rte_free(s->cipher_key.data);
2478 rte_free(s->auth_key.data);
2479 memset(s, 0, sizeof(dpaa_sec_session));
2480 rte_mempool_put(sess_mp, (void *)s);
2483 /** Clear the memory of session so it doesn't leave key material behind */
2485 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2486 struct rte_cryptodev_sym_session *sess)
2488 PMD_INIT_FUNC_TRACE();
2489 uint8_t index = dev->driver_id;
2490 void *sess_priv = get_sym_session_private_data(sess, index);
2491 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2494 free_session_memory(dev, s);
2495 set_sym_session_private_data(sess, index, NULL);
2499 #ifdef RTE_LIBRTE_SECURITY
2501 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2502 struct rte_security_session_conf *conf,
2505 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2506 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2507 struct rte_crypto_auth_xform *auth_xform = NULL;
2508 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2509 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2512 PMD_INIT_FUNC_TRACE();
2514 memset(session, 0, sizeof(dpaa_sec_session));
2515 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2516 cipher_xform = &conf->crypto_xform->cipher;
2517 if (conf->crypto_xform->next)
2518 auth_xform = &conf->crypto_xform->next->auth;
2520 auth_xform = &conf->crypto_xform->auth;
2521 if (conf->crypto_xform->next)
2522 cipher_xform = &conf->crypto_xform->next->cipher;
2524 session->proto_alg = conf->protocol;
2525 session->ctxt = DPAA_SEC_IPSEC;
2527 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2528 session->cipher_key.data = rte_zmalloc(NULL,
2529 cipher_xform->key.length,
2530 RTE_CACHE_LINE_SIZE);
2531 if (session->cipher_key.data == NULL &&
2532 cipher_xform->key.length > 0) {
2533 DPAA_SEC_ERR("No Memory for cipher key");
2536 memcpy(session->cipher_key.data, cipher_xform->key.data,
2537 cipher_xform->key.length);
2538 session->cipher_key.length = cipher_xform->key.length;
2540 switch (cipher_xform->algo) {
2541 case RTE_CRYPTO_CIPHER_NULL:
2542 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2544 case RTE_CRYPTO_CIPHER_AES_CBC:
2545 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2546 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2548 case RTE_CRYPTO_CIPHER_3DES_CBC:
2549 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2550 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2552 case RTE_CRYPTO_CIPHER_AES_CTR:
2553 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2554 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2557 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2558 cipher_xform->algo);
2561 session->cipher_alg = cipher_xform->algo;
2563 session->cipher_key.data = NULL;
2564 session->cipher_key.length = 0;
2565 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2568 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2569 session->auth_key.data = rte_zmalloc(NULL,
2570 auth_xform->key.length,
2571 RTE_CACHE_LINE_SIZE);
2572 if (session->auth_key.data == NULL &&
2573 auth_xform->key.length > 0) {
2574 DPAA_SEC_ERR("No Memory for auth key");
2575 rte_free(session->cipher_key.data);
2578 memcpy(session->auth_key.data, auth_xform->key.data,
2579 auth_xform->key.length);
2580 session->auth_key.length = auth_xform->key.length;
2582 switch (auth_xform->algo) {
2583 case RTE_CRYPTO_AUTH_NULL:
2584 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2585 session->digest_length = 0;
2587 case RTE_CRYPTO_AUTH_MD5_HMAC:
2588 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2589 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2591 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2592 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2593 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2595 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2596 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160;
2597 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2599 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2600 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2601 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2603 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2604 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2605 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2607 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2608 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2609 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2612 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2616 session->auth_alg = auth_xform->algo;
2618 session->auth_key.data = NULL;
2619 session->auth_key.length = 0;
2620 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2623 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2624 if (ipsec_xform->tunnel.type ==
2625 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2626 memset(&session->encap_pdb, 0,
2627 sizeof(struct ipsec_encap_pdb) +
2628 sizeof(session->ip4_hdr));
2629 session->ip4_hdr.ip_v = IPVERSION;
2630 session->ip4_hdr.ip_hl = 5;
2631 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2632 sizeof(session->ip4_hdr));
2633 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2634 session->ip4_hdr.ip_id = 0;
2635 session->ip4_hdr.ip_off = 0;
2636 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2637 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2638 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2639 IPPROTO_ESP : IPPROTO_AH;
2640 session->ip4_hdr.ip_sum = 0;
2641 session->ip4_hdr.ip_src =
2642 ipsec_xform->tunnel.ipv4.src_ip;
2643 session->ip4_hdr.ip_dst =
2644 ipsec_xform->tunnel.ipv4.dst_ip;
2645 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2646 (void *)&session->ip4_hdr,
2648 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2649 } else if (ipsec_xform->tunnel.type ==
2650 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2651 memset(&session->encap_pdb, 0,
2652 sizeof(struct ipsec_encap_pdb) +
2653 sizeof(session->ip6_hdr));
2654 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2655 DPAA_IPv6_DEFAULT_VTC_FLOW |
2656 ((ipsec_xform->tunnel.ipv6.dscp <<
2657 RTE_IPV6_HDR_TC_SHIFT) &
2658 RTE_IPV6_HDR_TC_MASK) |
2659 ((ipsec_xform->tunnel.ipv6.flabel <<
2660 RTE_IPV6_HDR_FL_SHIFT) &
2661 RTE_IPV6_HDR_FL_MASK));
2662 /* Payload length will be updated by HW */
2663 session->ip6_hdr.payload_len = 0;
2664 session->ip6_hdr.hop_limits =
2665 ipsec_xform->tunnel.ipv6.hlimit;
2666 session->ip6_hdr.proto = (ipsec_xform->proto ==
2667 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2668 IPPROTO_ESP : IPPROTO_AH;
2669 memcpy(&session->ip6_hdr.src_addr,
2670 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2671 memcpy(&session->ip6_hdr.dst_addr,
2672 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2673 session->encap_pdb.ip_hdr_len =
2674 sizeof(struct rte_ipv6_hdr);
2676 session->encap_pdb.options =
2677 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2678 PDBOPTS_ESP_OIHI_PDB_INL |
2680 PDBHMO_ESP_ENCAP_DTTL |
2682 if (ipsec_xform->options.esn)
2683 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2684 session->encap_pdb.spi = ipsec_xform->spi;
2685 session->dir = DIR_ENC;
2686 } else if (ipsec_xform->direction ==
2687 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2688 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2689 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2690 session->decap_pdb.options = sizeof(struct ip) << 16;
2692 session->decap_pdb.options =
2693 sizeof(struct rte_ipv6_hdr) << 16;
2694 if (ipsec_xform->options.esn)
2695 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2696 session->dir = DIR_DEC;
2699 rte_spinlock_lock(&internals->lock);
2700 for (i = 0; i < MAX_DPAA_CORES; i++) {
2701 session->inq[i] = dpaa_sec_attach_rxq(internals);
2702 if (session->inq[i] == NULL) {
2703 DPAA_SEC_ERR("unable to attach sec queue");
2704 rte_spinlock_unlock(&internals->lock);
2708 rte_spinlock_unlock(&internals->lock);
2712 rte_free(session->auth_key.data);
2713 rte_free(session->cipher_key.data);
2714 memset(session, 0, sizeof(dpaa_sec_session));
2719 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2720 struct rte_security_session_conf *conf,
2723 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2724 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2725 struct rte_crypto_auth_xform *auth_xform = NULL;
2726 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2727 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2728 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2731 PMD_INIT_FUNC_TRACE();
2733 memset(session, 0, sizeof(dpaa_sec_session));
2735 /* find xfrm types */
2736 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2737 cipher_xform = &xform->cipher;
2738 if (xform->next != NULL)
2739 auth_xform = &xform->next->auth;
2740 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2741 auth_xform = &xform->auth;
2742 if (xform->next != NULL)
2743 cipher_xform = &xform->next->cipher;
2745 DPAA_SEC_ERR("Invalid crypto type");
2749 session->proto_alg = conf->protocol;
2750 session->ctxt = DPAA_SEC_PDCP;
2753 switch (cipher_xform->algo) {
2754 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2755 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2757 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2758 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2760 case RTE_CRYPTO_CIPHER_AES_CTR:
2761 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2763 case RTE_CRYPTO_CIPHER_NULL:
2764 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2767 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2768 session->cipher_alg);
2772 session->cipher_key.data = rte_zmalloc(NULL,
2773 cipher_xform->key.length,
2774 RTE_CACHE_LINE_SIZE);
2775 if (session->cipher_key.data == NULL &&
2776 cipher_xform->key.length > 0) {
2777 DPAA_SEC_ERR("No Memory for cipher key");
2780 session->cipher_key.length = cipher_xform->key.length;
2781 memcpy(session->cipher_key.data, cipher_xform->key.data,
2782 cipher_xform->key.length);
2783 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2785 session->cipher_alg = cipher_xform->algo;
2787 session->cipher_key.data = NULL;
2788 session->cipher_key.length = 0;
2789 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2790 session->dir = DIR_ENC;
2793 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2794 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2795 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2797 "PDCP Seq Num size should be 5/12 bits for cmode");
2803 switch (auth_xform->algo) {
2804 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2805 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2807 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2808 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2810 case RTE_CRYPTO_AUTH_AES_CMAC:
2811 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2813 case RTE_CRYPTO_AUTH_NULL:
2814 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2817 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2819 rte_free(session->cipher_key.data);
2822 session->auth_key.data = rte_zmalloc(NULL,
2823 auth_xform->key.length,
2824 RTE_CACHE_LINE_SIZE);
2825 if (!session->auth_key.data &&
2826 auth_xform->key.length > 0) {
2827 DPAA_SEC_ERR("No Memory for auth key");
2828 rte_free(session->cipher_key.data);
2831 session->auth_key.length = auth_xform->key.length;
2832 memcpy(session->auth_key.data, auth_xform->key.data,
2833 auth_xform->key.length);
2834 session->auth_alg = auth_xform->algo;
2836 session->auth_key.data = NULL;
2837 session->auth_key.length = 0;
2838 session->auth_alg = 0;
2840 session->pdcp.domain = pdcp_xform->domain;
2841 session->pdcp.bearer = pdcp_xform->bearer;
2842 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2843 session->pdcp.sn_size = pdcp_xform->sn_size;
2844 session->pdcp.hfn = pdcp_xform->hfn;
2845 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2846 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2847 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2849 rte_spinlock_lock(&dev_priv->lock);
2850 for (i = 0; i < MAX_DPAA_CORES; i++) {
2851 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2852 if (session->inq[i] == NULL) {
2853 DPAA_SEC_ERR("unable to attach sec queue");
2854 rte_spinlock_unlock(&dev_priv->lock);
2858 rte_spinlock_unlock(&dev_priv->lock);
2861 rte_free(session->auth_key.data);
2862 rte_free(session->cipher_key.data);
2863 memset(session, 0, sizeof(dpaa_sec_session));
2868 dpaa_sec_security_session_create(void *dev,
2869 struct rte_security_session_conf *conf,
2870 struct rte_security_session *sess,
2871 struct rte_mempool *mempool)
2873 void *sess_private_data;
2874 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2877 if (rte_mempool_get(mempool, &sess_private_data)) {
2878 DPAA_SEC_ERR("Couldn't get object from session mempool");
2882 switch (conf->protocol) {
2883 case RTE_SECURITY_PROTOCOL_IPSEC:
2884 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2887 case RTE_SECURITY_PROTOCOL_PDCP:
2888 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2891 case RTE_SECURITY_PROTOCOL_MACSEC:
2897 DPAA_SEC_ERR("failed to configure session parameters");
2898 /* Return session to mempool */
2899 rte_mempool_put(mempool, sess_private_data);
2903 set_sec_session_private_data(sess, sess_private_data);
2908 /** Clear the memory of session so it doesn't leave key material behind */
2910 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2911 struct rte_security_session *sess)
2913 PMD_INIT_FUNC_TRACE();
2914 void *sess_priv = get_sec_session_private_data(sess);
2915 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2918 free_session_memory((struct rte_cryptodev *)dev, s);
2919 set_sec_session_private_data(sess, NULL);
2925 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2926 struct rte_cryptodev_config *config __rte_unused)
2928 PMD_INIT_FUNC_TRACE();
2934 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2936 PMD_INIT_FUNC_TRACE();
2941 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2943 PMD_INIT_FUNC_TRACE();
2947 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2949 PMD_INIT_FUNC_TRACE();
2958 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2959 struct rte_cryptodev_info *info)
2961 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2963 PMD_INIT_FUNC_TRACE();
2965 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2966 info->feature_flags = dev->feature_flags;
2967 info->capabilities = dpaa_sec_capabilities;
2968 info->sym.max_nb_sessions = internals->max_nb_sessions;
2969 info->driver_id = cryptodev_driver_id;
2973 static enum qman_cb_dqrr_result
2974 dpaa_sec_process_parallel_event(void *event,
2975 struct qman_portal *qm __always_unused,
2976 struct qman_fq *outq,
2977 const struct qm_dqrr_entry *dqrr,
2980 const struct qm_fd *fd;
2981 struct dpaa_sec_job *job;
2982 struct dpaa_sec_op_ctx *ctx;
2983 struct rte_event *ev = (struct rte_event *)event;
2987 /* sg is embedded in an op ctx,
2988 * sg[0] is for output
2991 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2993 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2994 ctx->fd_status = fd->status;
2995 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2996 struct qm_sg_entry *sg_out;
2999 sg_out = &job->sg[0];
3000 hw_sg_to_cpu(sg_out);
3001 len = sg_out->length;
3002 ctx->op->sym->m_src->pkt_len = len;
3003 ctx->op->sym->m_src->data_len = len;
3005 if (!ctx->fd_status) {
3006 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3008 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3009 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3011 ev->event_ptr = (void *)ctx->op;
3013 ev->flow_id = outq->ev.flow_id;
3014 ev->sub_event_type = outq->ev.sub_event_type;
3015 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3016 ev->op = RTE_EVENT_OP_NEW;
3017 ev->sched_type = outq->ev.sched_type;
3018 ev->queue_id = outq->ev.queue_id;
3019 ev->priority = outq->ev.priority;
3020 *bufs = (void *)ctx->op;
3022 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3024 return qman_cb_dqrr_consume;
3027 static enum qman_cb_dqrr_result
3028 dpaa_sec_process_atomic_event(void *event,
3029 struct qman_portal *qm __rte_unused,
3030 struct qman_fq *outq,
3031 const struct qm_dqrr_entry *dqrr,
3035 const struct qm_fd *fd;
3036 struct dpaa_sec_job *job;
3037 struct dpaa_sec_op_ctx *ctx;
3038 struct rte_event *ev = (struct rte_event *)event;
3042 /* sg is embedded in an op ctx,
3043 * sg[0] is for output
3046 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3048 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3049 ctx->fd_status = fd->status;
3050 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3051 struct qm_sg_entry *sg_out;
3054 sg_out = &job->sg[0];
3055 hw_sg_to_cpu(sg_out);
3056 len = sg_out->length;
3057 ctx->op->sym->m_src->pkt_len = len;
3058 ctx->op->sym->m_src->data_len = len;
3060 if (!ctx->fd_status) {
3061 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3063 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3064 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3066 ev->event_ptr = (void *)ctx->op;
3067 ev->flow_id = outq->ev.flow_id;
3068 ev->sub_event_type = outq->ev.sub_event_type;
3069 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3070 ev->op = RTE_EVENT_OP_NEW;
3071 ev->sched_type = outq->ev.sched_type;
3072 ev->queue_id = outq->ev.queue_id;
3073 ev->priority = outq->ev.priority;
3075 /* Save active dqrr entries */
3076 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3077 DPAA_PER_LCORE_DQRR_SIZE++;
3078 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3079 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3080 ev->impl_opaque = index + 1;
3081 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3082 *bufs = (void *)ctx->op;
3084 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3086 return qman_cb_dqrr_defer;
3090 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3093 const struct rte_event *event)
3095 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3096 struct qm_mcc_initfq opts = {0};
3100 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3101 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3102 opts.fqd.dest.channel = ch_id;
3104 switch (event->sched_type) {
3105 case RTE_SCHED_TYPE_ATOMIC:
3106 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3107 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3108 * configuration with HOLD_ACTIVE setting
3110 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3111 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3113 case RTE_SCHED_TYPE_ORDERED:
3114 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3117 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3118 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3122 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3123 if (unlikely(ret)) {
3124 DPAA_SEC_ERR("unable to init caam source fq!");
3128 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3134 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3137 struct qm_mcc_initfq opts = {0};
3139 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3141 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3142 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3143 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3144 qp->outq.cb.ern = ern_sec_fq_handler;
3145 qman_retire_fq(&qp->outq, NULL);
3146 qman_oos_fq(&qp->outq);
3147 ret = qman_init_fq(&qp->outq, 0, &opts);
3149 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3150 qp->outq.cb.dqrr = NULL;
3155 static struct rte_cryptodev_ops crypto_ops = {
3156 .dev_configure = dpaa_sec_dev_configure,
3157 .dev_start = dpaa_sec_dev_start,
3158 .dev_stop = dpaa_sec_dev_stop,
3159 .dev_close = dpaa_sec_dev_close,
3160 .dev_infos_get = dpaa_sec_dev_infos_get,
3161 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3162 .queue_pair_release = dpaa_sec_queue_pair_release,
3163 .queue_pair_count = dpaa_sec_queue_pair_count,
3164 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3165 .sym_session_configure = dpaa_sec_sym_session_configure,
3166 .sym_session_clear = dpaa_sec_sym_session_clear
3169 #ifdef RTE_LIBRTE_SECURITY
3170 static const struct rte_security_capability *
3171 dpaa_sec_capabilities_get(void *device __rte_unused)
3173 return dpaa_sec_security_cap;
3176 static const struct rte_security_ops dpaa_sec_security_ops = {
3177 .session_create = dpaa_sec_security_session_create,
3178 .session_update = NULL,
3179 .session_stats_get = NULL,
3180 .session_destroy = dpaa_sec_security_session_destroy,
3181 .set_pkt_metadata = NULL,
3182 .capabilities_get = dpaa_sec_capabilities_get
3186 dpaa_sec_uninit(struct rte_cryptodev *dev)
3188 struct dpaa_sec_dev_private *internals;
3193 internals = dev->data->dev_private;
3194 rte_free(dev->security_ctx);
3196 rte_free(internals);
3198 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3199 dev->data->name, rte_socket_id());
3205 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3207 struct dpaa_sec_dev_private *internals;
3208 #ifdef RTE_LIBRTE_SECURITY
3209 struct rte_security_ctx *security_instance;
3211 struct dpaa_sec_qp *qp;
3215 PMD_INIT_FUNC_TRACE();
3217 cryptodev->driver_id = cryptodev_driver_id;
3218 cryptodev->dev_ops = &crypto_ops;
3220 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3221 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3222 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3223 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3224 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3225 RTE_CRYPTODEV_FF_SECURITY |
3226 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3227 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3228 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3229 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3230 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3232 internals = cryptodev->data->dev_private;
3233 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3234 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3237 * For secondary processes, we don't initialise any further as primary
3238 * has already done this work. Only check we don't need a different
3241 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3242 DPAA_SEC_WARN("Device already init by primary process");
3245 #ifdef RTE_LIBRTE_SECURITY
3246 /* Initialize security_ctx only for primary process*/
3247 security_instance = rte_malloc("rte_security_instances_ops",
3248 sizeof(struct rte_security_ctx), 0);
3249 if (security_instance == NULL)
3251 security_instance->device = (void *)cryptodev;
3252 security_instance->ops = &dpaa_sec_security_ops;
3253 security_instance->sess_cnt = 0;
3254 cryptodev->security_ctx = security_instance;
3256 rte_spinlock_init(&internals->lock);
3257 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3258 /* init qman fq for queue pair */
3259 qp = &internals->qps[i];
3260 ret = dpaa_sec_init_tx(&qp->outq);
3262 DPAA_SEC_ERR("config tx of queue pair %d", i);
3267 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3268 QMAN_FQ_FLAG_TO_DCPORTAL;
3269 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3270 /* create rx qman fq for sessions*/
3271 ret = qman_create_fq(0, flags, &internals->inq[i]);
3272 if (unlikely(ret != 0)) {
3273 DPAA_SEC_ERR("sec qman_create_fq failed");
3278 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3282 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3284 dpaa_sec_uninit(cryptodev);
3289 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3290 struct rte_dpaa_device *dpaa_dev)
3292 struct rte_cryptodev *cryptodev;
3293 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3297 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3299 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3300 if (cryptodev == NULL)
3303 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3304 cryptodev->data->dev_private = rte_zmalloc_socket(
3305 "cryptodev private structure",
3306 sizeof(struct dpaa_sec_dev_private),
3307 RTE_CACHE_LINE_SIZE,
3310 if (cryptodev->data->dev_private == NULL)
3311 rte_panic("Cannot allocate memzone for private "
3315 dpaa_dev->crypto_dev = cryptodev;
3316 cryptodev->device = &dpaa_dev->device;
3318 /* init user callbacks */
3319 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3321 /* if sec device version is not configured */
3322 if (!rta_get_sec_era()) {
3323 const struct device_node *caam_node;
3325 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3326 const uint32_t *prop = of_get_property(caam_node,
3331 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3337 /* Invoke PMD device initialization function */
3338 retval = dpaa_sec_dev_init(cryptodev);
3342 /* In case of error, cleanup is done */
3343 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3344 rte_free(cryptodev->data->dev_private);
3346 rte_cryptodev_pmd_release_device(cryptodev);
3352 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3354 struct rte_cryptodev *cryptodev;
3357 cryptodev = dpaa_dev->crypto_dev;
3358 if (cryptodev == NULL)
3361 ret = dpaa_sec_uninit(cryptodev);
3365 return rte_cryptodev_pmd_destroy(cryptodev);
3368 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3369 .drv_type = FSL_DPAA_CRYPTO,
3371 .name = "DPAA SEC PMD"
3373 .probe = cryptodev_dpaa_sec_probe,
3374 .remove = cryptodev_dpaa_sec_remove,
3377 static struct cryptodev_driver dpaa_sec_crypto_drv;
3379 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3380 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3381 cryptodev_driver_id);
3383 RTE_INIT(dpaa_sec_init_log)
3385 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3386 if (dpaa_logtype_sec >= 0)
3387 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);