1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
41 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec_event.h>
44 #include <dpaa_sec_log.h>
45 #include <dpaax_iova_table.h>
47 static uint8_t cryptodev_driver_id;
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 if (!ctx->fd_status) {
56 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63 static inline struct dpaa_sec_op_ctx *
64 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
66 struct dpaa_sec_op_ctx *ctx;
69 retval = rte_mempool_get(
70 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
73 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
77 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
78 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
79 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
80 * each packet, memset is costlier than dcbz_64().
82 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
83 dcbz_64(&ctx->job.sg[i]);
85 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
86 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
92 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
94 const struct qm_mr_entry *msg)
96 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
97 fq->fqid, msg->ern.rc, msg->ern.seqnum);
100 /* initialize the queue with dest chan as caam chan so that
101 * all the packets in this queue could be dispatched into caam
104 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
107 struct qm_mcc_initfq fq_opts;
111 /* Clear FQ options */
112 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
114 flags = QMAN_INITFQ_FLAG_SCHED;
115 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
116 QM_INITFQ_WE_CONTEXTB;
118 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
119 fq_opts.fqd.context_b = fqid_out;
120 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
121 fq_opts.fqd.dest.wq = 0;
123 fq_in->cb.ern = ern_sec_fq_handler;
125 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
127 ret = qman_init_fq(fq_in, flags, &fq_opts);
128 if (unlikely(ret != 0))
129 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
134 /* something is put into in_fq and caam put the crypto result into out_fq */
135 static enum qman_cb_dqrr_result
136 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
137 struct qman_fq *fq __always_unused,
138 const struct qm_dqrr_entry *dqrr)
140 const struct qm_fd *fd;
141 struct dpaa_sec_job *job;
142 struct dpaa_sec_op_ctx *ctx;
144 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
145 return qman_cb_dqrr_defer;
147 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
148 return qman_cb_dqrr_consume;
151 /* sg is embedded in an op ctx,
152 * sg[0] is for output
155 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
157 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
158 ctx->fd_status = fd->status;
159 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
160 struct qm_sg_entry *sg_out;
162 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
163 ctx->op->sym->m_src : ctx->op->sym->m_dst;
165 sg_out = &job->sg[0];
166 hw_sg_to_cpu(sg_out);
167 len = sg_out->length;
169 while (mbuf->next != NULL) {
170 len -= mbuf->data_len;
173 mbuf->data_len = len;
175 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
176 dpaa_sec_op_ending(ctx);
178 return qman_cb_dqrr_consume;
181 /* caam result is put into this queue */
183 dpaa_sec_init_tx(struct qman_fq *fq)
186 struct qm_mcc_initfq opts;
189 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
190 QMAN_FQ_FLAG_DYNAMIC_FQID;
192 ret = qman_create_fq(0, flags, fq);
194 DPAA_SEC_ERR("qman_create_fq failed");
198 memset(&opts, 0, sizeof(opts));
199 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
200 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
202 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
204 fq->cb.dqrr = dqrr_out_fq_cb_rx;
205 fq->cb.ern = ern_sec_fq_handler;
207 ret = qman_init_fq(fq, 0, &opts);
209 DPAA_SEC_ERR("unable to init caam source fq!");
216 static inline int is_aead(dpaa_sec_session *ses)
218 return ((ses->cipher_alg == 0) &&
219 (ses->auth_alg == 0) &&
220 (ses->aead_alg != 0));
223 static inline int is_encode(dpaa_sec_session *ses)
225 return ses->dir == DIR_ENC;
228 static inline int is_decode(dpaa_sec_session *ses)
230 return ses->dir == DIR_DEC;
233 #ifdef RTE_LIB_SECURITY
235 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
237 struct alginfo authdata = {0}, cipherdata = {0};
238 struct sec_cdb *cdb = &ses->cdb;
239 struct alginfo *p_authdata = NULL;
240 int32_t shared_desc_len = 0;
241 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
247 cipherdata.key = (size_t)ses->cipher_key.data;
248 cipherdata.keylen = ses->cipher_key.length;
249 cipherdata.key_enc_flags = 0;
250 cipherdata.key_type = RTA_DATA_IMM;
251 cipherdata.algtype = ses->cipher_key.alg;
252 cipherdata.algmode = ses->cipher_key.algmode;
255 authdata.key = (size_t)ses->auth_key.data;
256 authdata.keylen = ses->auth_key.length;
257 authdata.key_enc_flags = 0;
258 authdata.key_type = RTA_DATA_IMM;
259 authdata.algtype = ses->auth_key.alg;
260 authdata.algmode = ses->auth_key.algmode;
262 p_authdata = &authdata;
265 if (rta_inline_pdcp_query(authdata.algtype,
268 ses->pdcp.hfn_ovd)) {
270 (size_t)rte_dpaa_mem_vtop((void *)
271 (size_t)cipherdata.key);
272 cipherdata.key_type = RTA_DATA_PTR;
275 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
276 if (ses->dir == DIR_ENC)
277 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
278 cdb->sh_desc, 1, swap,
283 ses->pdcp.hfn_threshold,
284 &cipherdata, &authdata,
286 else if (ses->dir == DIR_DEC)
287 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
288 cdb->sh_desc, 1, swap,
293 ses->pdcp.hfn_threshold,
294 &cipherdata, &authdata,
297 if (ses->dir == DIR_ENC) {
298 if (ses->pdcp.sdap_enabled)
300 cnstr_shdsc_pdcp_sdap_u_plane_encap(
301 cdb->sh_desc, 1, swap,
306 ses->pdcp.hfn_threshold,
307 &cipherdata, p_authdata, 0);
310 cnstr_shdsc_pdcp_u_plane_encap(
311 cdb->sh_desc, 1, swap,
316 ses->pdcp.hfn_threshold,
317 &cipherdata, p_authdata, 0);
318 } else if (ses->dir == DIR_DEC) {
319 if (ses->pdcp.sdap_enabled)
321 cnstr_shdsc_pdcp_sdap_u_plane_decap(
322 cdb->sh_desc, 1, swap,
327 ses->pdcp.hfn_threshold,
328 &cipherdata, p_authdata, 0);
331 cnstr_shdsc_pdcp_u_plane_decap(
332 cdb->sh_desc, 1, swap,
337 ses->pdcp.hfn_threshold,
338 &cipherdata, p_authdata, 0);
341 return shared_desc_len;
344 /* prepare ipsec proto command block of the session */
346 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
348 struct alginfo cipherdata = {0}, authdata = {0};
349 struct sec_cdb *cdb = &ses->cdb;
350 int32_t shared_desc_len = 0;
352 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
358 cipherdata.key = (size_t)ses->cipher_key.data;
359 cipherdata.keylen = ses->cipher_key.length;
360 cipherdata.key_enc_flags = 0;
361 cipherdata.key_type = RTA_DATA_IMM;
362 cipherdata.algtype = ses->cipher_key.alg;
363 cipherdata.algmode = ses->cipher_key.algmode;
365 if (ses->auth_key.length) {
366 authdata.key = (size_t)ses->auth_key.data;
367 authdata.keylen = ses->auth_key.length;
368 authdata.key_enc_flags = 0;
369 authdata.key_type = RTA_DATA_IMM;
370 authdata.algtype = ses->auth_key.alg;
371 authdata.algmode = ses->auth_key.algmode;
374 cdb->sh_desc[0] = cipherdata.keylen;
375 cdb->sh_desc[1] = authdata.keylen;
376 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
378 (unsigned int *)cdb->sh_desc,
379 &cdb->sh_desc[2], 2);
382 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
385 if (cdb->sh_desc[2] & 1)
386 cipherdata.key_type = RTA_DATA_IMM;
388 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
389 (void *)(size_t)cipherdata.key);
390 cipherdata.key_type = RTA_DATA_PTR;
392 if (cdb->sh_desc[2] & (1<<1))
393 authdata.key_type = RTA_DATA_IMM;
395 authdata.key = (size_t)rte_dpaa_mem_vtop(
396 (void *)(size_t)authdata.key);
397 authdata.key_type = RTA_DATA_PTR;
403 if (ses->dir == DIR_ENC) {
404 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
406 true, swap, SHR_SERIAL,
408 (uint8_t *)&ses->ip4_hdr,
409 &cipherdata, &authdata);
410 } else if (ses->dir == DIR_DEC) {
411 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
413 true, swap, SHR_SERIAL,
415 &cipherdata, &authdata);
417 return shared_desc_len;
420 /* prepare command block of the session */
422 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
424 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
425 int32_t shared_desc_len = 0;
426 struct sec_cdb *cdb = &ses->cdb;
428 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
434 memset(cdb, 0, sizeof(struct sec_cdb));
437 #ifdef RTE_LIB_SECURITY
439 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
442 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
445 case DPAA_SEC_CIPHER:
446 alginfo_c.key = (size_t)ses->cipher_key.data;
447 alginfo_c.keylen = ses->cipher_key.length;
448 alginfo_c.key_enc_flags = 0;
449 alginfo_c.key_type = RTA_DATA_IMM;
450 alginfo_c.algtype = ses->cipher_key.alg;
451 alginfo_c.algmode = ses->cipher_key.algmode;
453 switch (ses->cipher_alg) {
454 case RTE_CRYPTO_CIPHER_AES_CBC:
455 case RTE_CRYPTO_CIPHER_3DES_CBC:
456 case RTE_CRYPTO_CIPHER_AES_CTR:
457 case RTE_CRYPTO_CIPHER_3DES_CTR:
458 shared_desc_len = cnstr_shdsc_blkcipher(
460 swap, SHR_NEVER, &alginfo_c,
464 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
465 shared_desc_len = cnstr_shdsc_snow_f8(
466 cdb->sh_desc, true, swap,
470 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
471 shared_desc_len = cnstr_shdsc_zuce(
472 cdb->sh_desc, true, swap,
477 DPAA_SEC_ERR("unsupported cipher alg %d",
483 alginfo_a.key = (size_t)ses->auth_key.data;
484 alginfo_a.keylen = ses->auth_key.length;
485 alginfo_a.key_enc_flags = 0;
486 alginfo_a.key_type = RTA_DATA_IMM;
487 alginfo_a.algtype = ses->auth_key.alg;
488 alginfo_a.algmode = ses->auth_key.algmode;
489 switch (ses->auth_alg) {
490 case RTE_CRYPTO_AUTH_MD5_HMAC:
491 case RTE_CRYPTO_AUTH_SHA1_HMAC:
492 case RTE_CRYPTO_AUTH_SHA224_HMAC:
493 case RTE_CRYPTO_AUTH_SHA256_HMAC:
494 case RTE_CRYPTO_AUTH_SHA384_HMAC:
495 case RTE_CRYPTO_AUTH_SHA512_HMAC:
496 shared_desc_len = cnstr_shdsc_hmac(
498 swap, SHR_NEVER, &alginfo_a,
502 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
503 shared_desc_len = cnstr_shdsc_snow_f9(
504 cdb->sh_desc, true, swap,
509 case RTE_CRYPTO_AUTH_ZUC_EIA3:
510 shared_desc_len = cnstr_shdsc_zuca(
511 cdb->sh_desc, true, swap,
517 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
521 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
522 DPAA_SEC_ERR("not supported aead alg");
525 alginfo.key = (size_t)ses->aead_key.data;
526 alginfo.keylen = ses->aead_key.length;
527 alginfo.key_enc_flags = 0;
528 alginfo.key_type = RTA_DATA_IMM;
529 alginfo.algtype = ses->aead_key.alg;
530 alginfo.algmode = ses->aead_key.algmode;
532 if (ses->dir == DIR_ENC)
533 shared_desc_len = cnstr_shdsc_gcm_encap(
534 cdb->sh_desc, true, swap, SHR_NEVER,
539 shared_desc_len = cnstr_shdsc_gcm_decap(
540 cdb->sh_desc, true, swap, SHR_NEVER,
545 case DPAA_SEC_CIPHER_HASH:
546 alginfo_c.key = (size_t)ses->cipher_key.data;
547 alginfo_c.keylen = ses->cipher_key.length;
548 alginfo_c.key_enc_flags = 0;
549 alginfo_c.key_type = RTA_DATA_IMM;
550 alginfo_c.algtype = ses->cipher_key.alg;
551 alginfo_c.algmode = ses->cipher_key.algmode;
553 alginfo_a.key = (size_t)ses->auth_key.data;
554 alginfo_a.keylen = ses->auth_key.length;
555 alginfo_a.key_enc_flags = 0;
556 alginfo_a.key_type = RTA_DATA_IMM;
557 alginfo_a.algtype = ses->auth_key.alg;
558 alginfo_a.algmode = ses->auth_key.algmode;
560 cdb->sh_desc[0] = alginfo_c.keylen;
561 cdb->sh_desc[1] = alginfo_a.keylen;
562 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
564 (unsigned int *)cdb->sh_desc,
565 &cdb->sh_desc[2], 2);
568 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
571 if (cdb->sh_desc[2] & 1)
572 alginfo_c.key_type = RTA_DATA_IMM;
574 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
575 (void *)(size_t)alginfo_c.key);
576 alginfo_c.key_type = RTA_DATA_PTR;
578 if (cdb->sh_desc[2] & (1<<1))
579 alginfo_a.key_type = RTA_DATA_IMM;
581 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
582 (void *)(size_t)alginfo_a.key);
583 alginfo_a.key_type = RTA_DATA_PTR;
588 /* Auth_only_len is set as 0 here and it will be
589 * overwritten in fd for each packet.
591 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
592 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
594 ses->digest_length, ses->dir);
596 case DPAA_SEC_HASH_CIPHER:
598 DPAA_SEC_ERR("error: Unsupported session");
602 if (shared_desc_len < 0) {
603 DPAA_SEC_ERR("error in preparing command block");
604 return shared_desc_len;
607 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
608 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
609 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
614 /* qp is lockless, should be accessed by only one thread */
616 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
619 unsigned int pkts = 0;
620 int num_rx_bufs, ret;
621 struct qm_dqrr_entry *dq;
622 uint32_t vdqcr_flags = 0;
626 * Until request for four buffers, we provide exact number of buffers.
627 * Otherwise we do not set the QM_VDQCR_EXACT flag.
628 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
629 * requested, so we request two less in this case.
632 vdqcr_flags = QM_VDQCR_EXACT;
633 num_rx_bufs = nb_ops;
635 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
636 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
638 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
643 const struct qm_fd *fd;
644 struct dpaa_sec_job *job;
645 struct dpaa_sec_op_ctx *ctx;
646 struct rte_crypto_op *op;
648 dq = qman_dequeue(fq);
653 /* sg is embedded in an op ctx,
654 * sg[0] is for output
657 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
659 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
660 ctx->fd_status = fd->status;
662 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
663 struct qm_sg_entry *sg_out;
665 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
666 op->sym->m_src : op->sym->m_dst;
668 sg_out = &job->sg[0];
669 hw_sg_to_cpu(sg_out);
670 len = sg_out->length;
672 while (mbuf->next != NULL) {
673 len -= mbuf->data_len;
676 mbuf->data_len = len;
678 if (!ctx->fd_status) {
679 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
681 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
682 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
686 /* report op status to sym->op and then free the ctx memeory */
687 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
689 qman_dqrr_consume(fq, dq);
690 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
695 static inline struct dpaa_sec_job *
696 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
698 struct rte_crypto_sym_op *sym = op->sym;
699 struct rte_mbuf *mbuf = sym->m_src;
700 struct dpaa_sec_job *cf;
701 struct dpaa_sec_op_ctx *ctx;
702 struct qm_sg_entry *sg, *out_sg, *in_sg;
703 phys_addr_t start_addr;
704 uint8_t *old_digest, extra_segs;
705 int data_len, data_offset;
707 data_len = sym->auth.data.length;
708 data_offset = sym->auth.data.offset;
710 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
711 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
712 if ((data_len & 7) || (data_offset & 7)) {
713 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
717 data_len = data_len >> 3;
718 data_offset = data_offset >> 3;
726 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
727 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
731 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
737 old_digest = ctx->digest;
741 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
742 out_sg->length = ses->digest_length;
743 cpu_to_hw_sg(out_sg);
747 /* need to extend the input to a compound frame */
748 in_sg->extension = 1;
750 in_sg->length = data_len;
751 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
756 if (ses->iv.length) {
759 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
762 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
763 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
765 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
766 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
769 sg->length = ses->iv.length;
771 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
772 in_sg->length += sg->length;
777 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
778 sg->offset = data_offset;
780 if (data_len <= (mbuf->data_len - data_offset)) {
781 sg->length = data_len;
783 sg->length = mbuf->data_len - data_offset;
785 /* remaining i/p segs */
786 while ((data_len = data_len - sg->length) &&
787 (mbuf = mbuf->next)) {
790 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
791 if (data_len > mbuf->data_len)
792 sg->length = mbuf->data_len;
794 sg->length = data_len;
798 if (is_decode(ses)) {
799 /* Digest verification case */
802 rte_memcpy(old_digest, sym->auth.digest.data,
804 start_addr = rte_dpaa_mem_vtop(old_digest);
805 qm_sg_entry_set64(sg, start_addr);
806 sg->length = ses->digest_length;
807 in_sg->length += ses->digest_length;
818 * |<----data_len------->|
819 * |ip_header|ah_header|icv|payload|
824 static inline struct dpaa_sec_job *
825 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
827 struct rte_crypto_sym_op *sym = op->sym;
828 struct rte_mbuf *mbuf = sym->m_src;
829 struct dpaa_sec_job *cf;
830 struct dpaa_sec_op_ctx *ctx;
831 struct qm_sg_entry *sg, *in_sg;
832 rte_iova_t start_addr;
834 int data_len, data_offset;
836 data_len = sym->auth.data.length;
837 data_offset = sym->auth.data.offset;
839 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
840 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
841 if ((data_len & 7) || (data_offset & 7)) {
842 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
846 data_len = data_len >> 3;
847 data_offset = data_offset >> 3;
850 ctx = dpaa_sec_alloc_ctx(ses, 4);
856 old_digest = ctx->digest;
858 start_addr = rte_pktmbuf_iova(mbuf);
861 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
862 sg->length = ses->digest_length;
867 /* need to extend the input to a compound frame */
868 in_sg->extension = 1;
870 in_sg->length = data_len;
871 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
874 if (ses->iv.length) {
877 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
880 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
881 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
883 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
884 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
887 sg->length = ses->iv.length;
889 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
890 in_sg->length += sg->length;
895 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
896 sg->offset = data_offset;
897 sg->length = data_len;
899 if (is_decode(ses)) {
900 /* Digest verification case */
902 /* hash result or digest, save digest first */
903 rte_memcpy(old_digest, sym->auth.digest.data,
905 /* let's check digest by hw */
906 start_addr = rte_dpaa_mem_vtop(old_digest);
908 qm_sg_entry_set64(sg, start_addr);
909 sg->length = ses->digest_length;
910 in_sg->length += ses->digest_length;
919 static inline struct dpaa_sec_job *
920 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
922 struct rte_crypto_sym_op *sym = op->sym;
923 struct dpaa_sec_job *cf;
924 struct dpaa_sec_op_ctx *ctx;
925 struct qm_sg_entry *sg, *out_sg, *in_sg;
926 struct rte_mbuf *mbuf;
928 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 int data_len, data_offset;
932 data_len = sym->cipher.data.length;
933 data_offset = sym->cipher.data.offset;
935 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
936 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
937 if ((data_len & 7) || (data_offset & 7)) {
938 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
942 data_len = data_len >> 3;
943 data_offset = data_offset >> 3;
948 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
951 req_segs = mbuf->nb_segs * 2 + 3;
953 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
954 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
959 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
968 out_sg->extension = 1;
969 out_sg->length = data_len;
970 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
971 cpu_to_hw_sg(out_sg);
975 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
976 sg->length = mbuf->data_len - data_offset;
977 sg->offset = data_offset;
979 /* Successive segs */
984 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
985 sg->length = mbuf->data_len;
994 in_sg->extension = 1;
996 in_sg->length = data_len + ses->iv.length;
999 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1000 cpu_to_hw_sg(in_sg);
1003 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1004 sg->length = ses->iv.length;
1009 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1010 sg->length = mbuf->data_len - data_offset;
1011 sg->offset = data_offset;
1013 /* Successive segs */
1018 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1019 sg->length = mbuf->data_len;
1028 static inline struct dpaa_sec_job *
1029 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1031 struct rte_crypto_sym_op *sym = op->sym;
1032 struct dpaa_sec_job *cf;
1033 struct dpaa_sec_op_ctx *ctx;
1034 struct qm_sg_entry *sg;
1035 rte_iova_t src_start_addr, dst_start_addr;
1036 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1038 int data_len, data_offset;
1040 data_len = sym->cipher.data.length;
1041 data_offset = sym->cipher.data.offset;
1043 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1044 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1045 if ((data_len & 7) || (data_offset & 7)) {
1046 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1050 data_len = data_len >> 3;
1051 data_offset = data_offset >> 3;
1054 ctx = dpaa_sec_alloc_ctx(ses, 4);
1061 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1064 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1066 dst_start_addr = src_start_addr;
1070 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1071 sg->length = data_len + ses->iv.length;
1077 /* need to extend the input to a compound frame */
1080 sg->length = data_len + ses->iv.length;
1081 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1085 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1086 sg->length = ses->iv.length;
1090 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1091 sg->length = data_len;
1098 static inline struct dpaa_sec_job *
1099 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1101 struct rte_crypto_sym_op *sym = op->sym;
1102 struct dpaa_sec_job *cf;
1103 struct dpaa_sec_op_ctx *ctx;
1104 struct qm_sg_entry *sg, *out_sg, *in_sg;
1105 struct rte_mbuf *mbuf;
1107 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1112 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1115 req_segs = mbuf->nb_segs * 2 + 4;
1118 if (ses->auth_only_len)
1121 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1122 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1127 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1134 rte_prefetch0(cf->sg);
1137 out_sg = &cf->sg[0];
1138 out_sg->extension = 1;
1140 out_sg->length = sym->aead.data.length + ses->digest_length;
1142 out_sg->length = sym->aead.data.length;
1144 /* output sg entries */
1146 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1147 cpu_to_hw_sg(out_sg);
1150 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1151 sg->length = mbuf->data_len - sym->aead.data.offset;
1152 sg->offset = sym->aead.data.offset;
1154 /* Successive segs */
1159 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1160 sg->length = mbuf->data_len;
1163 sg->length -= ses->digest_length;
1165 if (is_encode(ses)) {
1167 /* set auth output */
1169 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1170 sg->length = ses->digest_length;
1178 in_sg->extension = 1;
1181 in_sg->length = ses->iv.length + sym->aead.data.length
1182 + ses->auth_only_len;
1184 in_sg->length = ses->iv.length + sym->aead.data.length
1185 + ses->auth_only_len + ses->digest_length;
1187 /* input sg entries */
1189 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1190 cpu_to_hw_sg(in_sg);
1193 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1194 sg->length = ses->iv.length;
1197 /* 2nd seg auth only */
1198 if (ses->auth_only_len) {
1200 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1201 sg->length = ses->auth_only_len;
1207 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1208 sg->length = mbuf->data_len - sym->aead.data.offset;
1209 sg->offset = sym->aead.data.offset;
1211 /* Successive segs */
1216 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1217 sg->length = mbuf->data_len;
1221 if (is_decode(ses)) {
1224 memcpy(ctx->digest, sym->aead.digest.data,
1225 ses->digest_length);
1226 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1227 sg->length = ses->digest_length;
1235 static inline struct dpaa_sec_job *
1236 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1238 struct rte_crypto_sym_op *sym = op->sym;
1239 struct dpaa_sec_job *cf;
1240 struct dpaa_sec_op_ctx *ctx;
1241 struct qm_sg_entry *sg;
1242 uint32_t length = 0;
1243 rte_iova_t src_start_addr, dst_start_addr;
1244 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1247 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1250 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1252 dst_start_addr = src_start_addr;
1254 ctx = dpaa_sec_alloc_ctx(ses, 7);
1262 rte_prefetch0(cf->sg);
1264 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1265 if (is_encode(ses)) {
1266 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1267 sg->length = ses->iv.length;
1268 length += sg->length;
1272 if (ses->auth_only_len) {
1273 qm_sg_entry_set64(sg,
1274 rte_dpaa_mem_vtop(sym->aead.aad.data));
1275 sg->length = ses->auth_only_len;
1276 length += sg->length;
1280 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1281 sg->length = sym->aead.data.length;
1282 length += sg->length;
1286 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1287 sg->length = ses->iv.length;
1288 length += sg->length;
1292 if (ses->auth_only_len) {
1293 qm_sg_entry_set64(sg,
1294 rte_dpaa_mem_vtop(sym->aead.aad.data));
1295 sg->length = ses->auth_only_len;
1296 length += sg->length;
1300 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1301 sg->length = sym->aead.data.length;
1302 length += sg->length;
1305 memcpy(ctx->digest, sym->aead.digest.data,
1306 ses->digest_length);
1309 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1310 sg->length = ses->digest_length;
1311 length += sg->length;
1315 /* input compound frame */
1316 cf->sg[1].length = length;
1317 cf->sg[1].extension = 1;
1318 cf->sg[1].final = 1;
1319 cpu_to_hw_sg(&cf->sg[1]);
1323 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1324 qm_sg_entry_set64(sg,
1325 dst_start_addr + sym->aead.data.offset);
1326 sg->length = sym->aead.data.length;
1327 length = sg->length;
1328 if (is_encode(ses)) {
1330 /* set auth output */
1332 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1333 sg->length = ses->digest_length;
1334 length += sg->length;
1339 /* output compound frame */
1340 cf->sg[0].length = length;
1341 cf->sg[0].extension = 1;
1342 cpu_to_hw_sg(&cf->sg[0]);
1347 static inline struct dpaa_sec_job *
1348 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1350 struct rte_crypto_sym_op *sym = op->sym;
1351 struct dpaa_sec_job *cf;
1352 struct dpaa_sec_op_ctx *ctx;
1353 struct qm_sg_entry *sg, *out_sg, *in_sg;
1354 struct rte_mbuf *mbuf;
1356 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1361 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1364 req_segs = mbuf->nb_segs * 2 + 4;
1367 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1368 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1373 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1380 rte_prefetch0(cf->sg);
1383 out_sg = &cf->sg[0];
1384 out_sg->extension = 1;
1386 out_sg->length = sym->auth.data.length + ses->digest_length;
1388 out_sg->length = sym->auth.data.length;
1390 /* output sg entries */
1392 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1393 cpu_to_hw_sg(out_sg);
1396 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1397 sg->length = mbuf->data_len - sym->auth.data.offset;
1398 sg->offset = sym->auth.data.offset;
1400 /* Successive segs */
1405 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1406 sg->length = mbuf->data_len;
1409 sg->length -= ses->digest_length;
1411 if (is_encode(ses)) {
1413 /* set auth output */
1415 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1416 sg->length = ses->digest_length;
1424 in_sg->extension = 1;
1427 in_sg->length = ses->iv.length + sym->auth.data.length;
1429 in_sg->length = ses->iv.length + sym->auth.data.length
1430 + ses->digest_length;
1432 /* input sg entries */
1434 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1435 cpu_to_hw_sg(in_sg);
1438 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1439 sg->length = ses->iv.length;
1444 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1445 sg->length = mbuf->data_len - sym->auth.data.offset;
1446 sg->offset = sym->auth.data.offset;
1448 /* Successive segs */
1453 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1454 sg->length = mbuf->data_len;
1458 sg->length -= ses->digest_length;
1459 if (is_decode(ses)) {
1462 memcpy(ctx->digest, sym->auth.digest.data,
1463 ses->digest_length);
1464 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1465 sg->length = ses->digest_length;
1473 static inline struct dpaa_sec_job *
1474 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1476 struct rte_crypto_sym_op *sym = op->sym;
1477 struct dpaa_sec_job *cf;
1478 struct dpaa_sec_op_ctx *ctx;
1479 struct qm_sg_entry *sg;
1480 rte_iova_t src_start_addr, dst_start_addr;
1481 uint32_t length = 0;
1482 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1485 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1487 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1489 dst_start_addr = src_start_addr;
1491 ctx = dpaa_sec_alloc_ctx(ses, 7);
1499 rte_prefetch0(cf->sg);
1501 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1502 if (is_encode(ses)) {
1503 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1504 sg->length = ses->iv.length;
1505 length += sg->length;
1509 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1510 sg->length = sym->auth.data.length;
1511 length += sg->length;
1515 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1516 sg->length = ses->iv.length;
1517 length += sg->length;
1522 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1523 sg->length = sym->auth.data.length;
1524 length += sg->length;
1527 memcpy(ctx->digest, sym->auth.digest.data,
1528 ses->digest_length);
1531 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1532 sg->length = ses->digest_length;
1533 length += sg->length;
1537 /* input compound frame */
1538 cf->sg[1].length = length;
1539 cf->sg[1].extension = 1;
1540 cf->sg[1].final = 1;
1541 cpu_to_hw_sg(&cf->sg[1]);
1545 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1546 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1547 sg->length = sym->cipher.data.length;
1548 length = sg->length;
1549 if (is_encode(ses)) {
1551 /* set auth output */
1553 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1554 sg->length = ses->digest_length;
1555 length += sg->length;
1560 /* output compound frame */
1561 cf->sg[0].length = length;
1562 cf->sg[0].extension = 1;
1563 cpu_to_hw_sg(&cf->sg[0]);
1568 #ifdef RTE_LIB_SECURITY
1569 static inline struct dpaa_sec_job *
1570 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1572 struct rte_crypto_sym_op *sym = op->sym;
1573 struct dpaa_sec_job *cf;
1574 struct dpaa_sec_op_ctx *ctx;
1575 struct qm_sg_entry *sg;
1576 phys_addr_t src_start_addr, dst_start_addr;
1578 ctx = dpaa_sec_alloc_ctx(ses, 2);
1584 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1587 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1589 dst_start_addr = src_start_addr;
1593 qm_sg_entry_set64(sg, src_start_addr);
1594 sg->length = sym->m_src->pkt_len;
1598 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1601 qm_sg_entry_set64(sg, dst_start_addr);
1602 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1608 static inline struct dpaa_sec_job *
1609 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1611 struct rte_crypto_sym_op *sym = op->sym;
1612 struct dpaa_sec_job *cf;
1613 struct dpaa_sec_op_ctx *ctx;
1614 struct qm_sg_entry *sg, *out_sg, *in_sg;
1615 struct rte_mbuf *mbuf;
1617 uint32_t in_len = 0, out_len = 0;
1624 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1625 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1626 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1631 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1637 out_sg = &cf->sg[0];
1638 out_sg->extension = 1;
1639 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1643 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1646 /* Successive segs */
1647 while (mbuf->next) {
1648 sg->length = mbuf->data_len;
1649 out_len += sg->length;
1653 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1656 sg->length = mbuf->buf_len - mbuf->data_off;
1657 out_len += sg->length;
1661 out_sg->length = out_len;
1662 cpu_to_hw_sg(out_sg);
1667 in_sg->extension = 1;
1669 in_len = mbuf->data_len;
1672 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1675 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1676 sg->length = mbuf->data_len;
1679 /* Successive segs */
1684 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1685 sg->length = mbuf->data_len;
1687 in_len += sg->length;
1693 in_sg->length = in_len;
1694 cpu_to_hw_sg(in_sg);
1696 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1703 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1706 /* Function to transmit the frames to given device and queuepair */
1708 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1709 uint16_t num_tx = 0;
1710 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1711 uint32_t frames_to_send;
1712 struct rte_crypto_op *op;
1713 struct dpaa_sec_job *cf;
1714 dpaa_sec_session *ses;
1715 uint16_t auth_hdr_len, auth_tail_len;
1716 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1717 struct qman_fq *inq[DPAA_SEC_BURST];
1720 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1721 DPAA_SEC_BURST : nb_ops;
1722 for (loop = 0; loop < frames_to_send; loop++) {
1724 if (*dpaa_seqn(op->sym->m_src) != 0) {
1725 index = *dpaa_seqn(op->sym->m_src) - 1;
1726 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1727 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1728 flags[loop] = ((index & 0x0f) << 8);
1729 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1730 DPAA_PER_LCORE_DQRR_SIZE--;
1731 DPAA_PER_LCORE_DQRR_HELD &=
1736 switch (op->sess_type) {
1737 case RTE_CRYPTO_OP_WITH_SESSION:
1738 ses = (dpaa_sec_session *)
1739 get_sym_session_private_data(
1741 cryptodev_driver_id);
1743 #ifdef RTE_LIB_SECURITY
1744 case RTE_CRYPTO_OP_SECURITY_SESSION:
1745 ses = (dpaa_sec_session *)
1746 get_sec_session_private_data(
1747 op->sym->sec_session);
1752 "sessionless crypto op not supported");
1753 frames_to_send = loop;
1759 DPAA_SEC_DP_ERR("session not available");
1760 frames_to_send = loop;
1765 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1766 if (dpaa_sec_attach_sess_q(qp, ses)) {
1767 frames_to_send = loop;
1771 } else if (unlikely(ses->qp[rte_lcore_id() %
1772 MAX_DPAA_CORES] != qp)) {
1773 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1775 ses->qp[rte_lcore_id() %
1776 MAX_DPAA_CORES], qp);
1777 frames_to_send = loop;
1782 auth_hdr_len = op->sym->auth.data.length -
1783 op->sym->cipher.data.length;
1786 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1787 ((op->sym->m_dst == NULL) ||
1788 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1789 switch (ses->ctxt) {
1790 #ifdef RTE_LIB_SECURITY
1792 case DPAA_SEC_IPSEC:
1793 cf = build_proto(op, ses);
1797 cf = build_auth_only(op, ses);
1799 case DPAA_SEC_CIPHER:
1800 cf = build_cipher_only(op, ses);
1803 cf = build_cipher_auth_gcm(op, ses);
1804 auth_hdr_len = ses->auth_only_len;
1806 case DPAA_SEC_CIPHER_HASH:
1808 op->sym->cipher.data.offset
1809 - op->sym->auth.data.offset;
1811 op->sym->auth.data.length
1812 - op->sym->cipher.data.length
1814 cf = build_cipher_auth(op, ses);
1817 DPAA_SEC_DP_ERR("not supported ops");
1818 frames_to_send = loop;
1823 switch (ses->ctxt) {
1824 #ifdef RTE_LIB_SECURITY
1826 case DPAA_SEC_IPSEC:
1827 cf = build_proto_sg(op, ses);
1831 cf = build_auth_only_sg(op, ses);
1833 case DPAA_SEC_CIPHER:
1834 cf = build_cipher_only_sg(op, ses);
1837 cf = build_cipher_auth_gcm_sg(op, ses);
1838 auth_hdr_len = ses->auth_only_len;
1840 case DPAA_SEC_CIPHER_HASH:
1842 op->sym->cipher.data.offset
1843 - op->sym->auth.data.offset;
1845 op->sym->auth.data.length
1846 - op->sym->cipher.data.length
1848 cf = build_cipher_auth_sg(op, ses);
1851 DPAA_SEC_DP_ERR("not supported ops");
1852 frames_to_send = loop;
1857 if (unlikely(!cf)) {
1858 frames_to_send = loop;
1864 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1865 fd->opaque_addr = 0;
1867 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1868 fd->_format1 = qm_fd_compound;
1869 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1871 /* Auth_only_len is set as 0 in descriptor and it is
1872 * overwritten here in the fd.cmd which will update
1875 if (auth_hdr_len || auth_tail_len) {
1876 fd->cmd = 0x80000000;
1878 ((auth_tail_len << 16) | auth_hdr_len);
1881 #ifdef RTE_LIB_SECURITY
1882 /* In case of PDCP, per packet HFN is stored in
1883 * mbuf priv after sym_op.
1885 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1886 fd->cmd = 0x80000000 |
1887 *((uint32_t *)((uint8_t *)op +
1888 ses->pdcp.hfn_ovd_offset));
1889 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1890 *((uint32_t *)((uint8_t *)op +
1891 ses->pdcp.hfn_ovd_offset)),
1898 while (loop < frames_to_send) {
1899 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1900 &flags[loop], frames_to_send - loop);
1902 nb_ops -= frames_to_send;
1903 num_tx += frames_to_send;
1906 dpaa_qp->tx_pkts += num_tx;
1907 dpaa_qp->tx_errs += nb_ops - num_tx;
1913 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1917 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1919 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1921 dpaa_qp->rx_pkts += num_rx;
1922 dpaa_qp->rx_errs += nb_ops - num_rx;
1924 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1929 /** Release queue pair */
1931 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1934 struct dpaa_sec_dev_private *internals;
1935 struct dpaa_sec_qp *qp = NULL;
1937 PMD_INIT_FUNC_TRACE();
1939 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1941 internals = dev->data->dev_private;
1942 if (qp_id >= internals->max_nb_queue_pairs) {
1943 DPAA_SEC_ERR("Max supported qpid %d",
1944 internals->max_nb_queue_pairs);
1948 qp = &internals->qps[qp_id];
1949 rte_mempool_free(qp->ctx_pool);
1950 qp->internals = NULL;
1951 dev->data->queue_pairs[qp_id] = NULL;
1956 /** Setup a queue pair */
1958 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1959 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1960 __rte_unused int socket_id)
1962 struct dpaa_sec_dev_private *internals;
1963 struct dpaa_sec_qp *qp = NULL;
1966 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1968 internals = dev->data->dev_private;
1969 if (qp_id >= internals->max_nb_queue_pairs) {
1970 DPAA_SEC_ERR("Max supported qpid %d",
1971 internals->max_nb_queue_pairs);
1975 qp = &internals->qps[qp_id];
1976 qp->internals = internals;
1977 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1978 dev->data->dev_id, qp_id);
1979 if (!qp->ctx_pool) {
1980 qp->ctx_pool = rte_mempool_create((const char *)str,
1983 CTX_POOL_CACHE_SIZE, 0,
1984 NULL, NULL, NULL, NULL,
1986 if (!qp->ctx_pool) {
1987 DPAA_SEC_ERR("%s create failed\n", str);
1991 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1992 dev->data->dev_id, qp_id);
1993 dev->data->queue_pairs[qp_id] = qp;
1998 /** Returns the size of session structure */
2000 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2002 PMD_INIT_FUNC_TRACE();
2004 return sizeof(dpaa_sec_session);
2008 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2009 struct rte_crypto_sym_xform *xform,
2010 dpaa_sec_session *session)
2012 session->ctxt = DPAA_SEC_CIPHER;
2013 session->cipher_alg = xform->cipher.algo;
2014 session->iv.length = xform->cipher.iv.length;
2015 session->iv.offset = xform->cipher.iv.offset;
2016 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2017 RTE_CACHE_LINE_SIZE);
2018 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2019 DPAA_SEC_ERR("No Memory for cipher key");
2022 session->cipher_key.length = xform->cipher.key.length;
2024 memcpy(session->cipher_key.data, xform->cipher.key.data,
2025 xform->cipher.key.length);
2026 switch (xform->cipher.algo) {
2027 case RTE_CRYPTO_CIPHER_AES_CBC:
2028 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2029 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2031 case RTE_CRYPTO_CIPHER_3DES_CBC:
2032 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2033 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2035 case RTE_CRYPTO_CIPHER_AES_CTR:
2036 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2037 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2039 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2040 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2042 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2043 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2046 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2047 xform->cipher.algo);
2050 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2057 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2058 struct rte_crypto_sym_xform *xform,
2059 dpaa_sec_session *session)
2061 session->ctxt = DPAA_SEC_AUTH;
2062 session->auth_alg = xform->auth.algo;
2063 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2064 RTE_CACHE_LINE_SIZE);
2065 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2066 DPAA_SEC_ERR("No Memory for auth key");
2069 session->auth_key.length = xform->auth.key.length;
2070 session->digest_length = xform->auth.digest_length;
2071 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2072 session->iv.offset = xform->auth.iv.offset;
2073 session->iv.length = xform->auth.iv.length;
2076 memcpy(session->auth_key.data, xform->auth.key.data,
2077 xform->auth.key.length);
2079 switch (xform->auth.algo) {
2080 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2081 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2082 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2084 case RTE_CRYPTO_AUTH_MD5_HMAC:
2085 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2086 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2088 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2089 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2090 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2092 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2093 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2094 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2096 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2097 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2098 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2100 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2101 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2102 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2104 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2105 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2106 session->auth_key.algmode = OP_ALG_AAI_F9;
2108 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2109 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2110 session->auth_key.algmode = OP_ALG_AAI_F9;
2113 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2118 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2125 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2126 struct rte_crypto_sym_xform *xform,
2127 dpaa_sec_session *session)
2130 struct rte_crypto_cipher_xform *cipher_xform;
2131 struct rte_crypto_auth_xform *auth_xform;
2133 session->ctxt = DPAA_SEC_CIPHER_HASH;
2134 if (session->auth_cipher_text) {
2135 cipher_xform = &xform->cipher;
2136 auth_xform = &xform->next->auth;
2138 cipher_xform = &xform->next->cipher;
2139 auth_xform = &xform->auth;
2142 /* Set IV parameters */
2143 session->iv.offset = cipher_xform->iv.offset;
2144 session->iv.length = cipher_xform->iv.length;
2146 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2147 RTE_CACHE_LINE_SIZE);
2148 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2149 DPAA_SEC_ERR("No Memory for cipher key");
2152 session->cipher_key.length = cipher_xform->key.length;
2153 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2154 RTE_CACHE_LINE_SIZE);
2155 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2156 DPAA_SEC_ERR("No Memory for auth key");
2159 session->auth_key.length = auth_xform->key.length;
2160 memcpy(session->cipher_key.data, cipher_xform->key.data,
2161 cipher_xform->key.length);
2162 memcpy(session->auth_key.data, auth_xform->key.data,
2163 auth_xform->key.length);
2165 session->digest_length = auth_xform->digest_length;
2166 session->auth_alg = auth_xform->algo;
2168 switch (auth_xform->algo) {
2169 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2170 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2171 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2173 case RTE_CRYPTO_AUTH_MD5_HMAC:
2174 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2175 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2177 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2178 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2179 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2181 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2182 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2183 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2185 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2186 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2187 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2189 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2190 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2191 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2194 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2199 session->cipher_alg = cipher_xform->algo;
2201 switch (cipher_xform->algo) {
2202 case RTE_CRYPTO_CIPHER_AES_CBC:
2203 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2204 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2206 case RTE_CRYPTO_CIPHER_3DES_CBC:
2207 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2208 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2210 case RTE_CRYPTO_CIPHER_AES_CTR:
2211 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2212 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2215 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2216 cipher_xform->algo);
2219 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2225 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2226 struct rte_crypto_sym_xform *xform,
2227 dpaa_sec_session *session)
2229 session->aead_alg = xform->aead.algo;
2230 session->ctxt = DPAA_SEC_AEAD;
2231 session->iv.length = xform->aead.iv.length;
2232 session->iv.offset = xform->aead.iv.offset;
2233 session->auth_only_len = xform->aead.aad_length;
2234 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2235 RTE_CACHE_LINE_SIZE);
2236 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2237 DPAA_SEC_ERR("No Memory for aead key\n");
2240 session->aead_key.length = xform->aead.key.length;
2241 session->digest_length = xform->aead.digest_length;
2243 memcpy(session->aead_key.data, xform->aead.key.data,
2244 xform->aead.key.length);
2246 switch (session->aead_alg) {
2247 case RTE_CRYPTO_AEAD_AES_GCM:
2248 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2249 session->aead_key.algmode = OP_ALG_AAI_GCM;
2252 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2256 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2262 static struct qman_fq *
2263 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2267 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2268 if (qi->inq_attach[i] == 0) {
2269 qi->inq_attach[i] = 1;
2273 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2279 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2283 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2284 if (&qi->inq[i] == fq) {
2285 if (qman_retire_fq(fq, NULL) != 0)
2286 DPAA_SEC_DEBUG("Queue is not retired\n");
2288 qi->inq_attach[i] = 0;
2296 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2300 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2301 ret = dpaa_sec_prep_cdb(sess);
2303 DPAA_SEC_ERR("Unable to prepare sec cdb");
2306 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2307 ret = rte_dpaa_portal_init((void *)0);
2309 DPAA_SEC_ERR("Failure in affining portal");
2313 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2314 rte_dpaa_mem_vtop(&sess->cdb),
2315 qman_fq_fqid(&qp->outq));
2317 DPAA_SEC_ERR("Unable to init sec queue");
2323 free_session_data(dpaa_sec_session *s)
2326 rte_free(s->aead_key.data);
2328 rte_free(s->auth_key.data);
2329 rte_free(s->cipher_key.data);
2331 memset(s, 0, sizeof(dpaa_sec_session));
2335 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2336 struct rte_crypto_sym_xform *xform, void *sess)
2338 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2339 dpaa_sec_session *session = sess;
2343 PMD_INIT_FUNC_TRACE();
2345 if (unlikely(sess == NULL)) {
2346 DPAA_SEC_ERR("invalid session struct");
2349 memset(session, 0, sizeof(dpaa_sec_session));
2351 /* Default IV length = 0 */
2352 session->iv.length = 0;
2355 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2356 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2357 ret = dpaa_sec_cipher_init(dev, xform, session);
2359 /* Authentication Only */
2360 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2361 xform->next == NULL) {
2362 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2363 session->ctxt = DPAA_SEC_AUTH;
2364 ret = dpaa_sec_auth_init(dev, xform, session);
2366 /* Cipher then Authenticate */
2367 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2368 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2369 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2370 session->auth_cipher_text = 1;
2371 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2372 ret = dpaa_sec_auth_init(dev, xform, session);
2373 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2374 ret = dpaa_sec_cipher_init(dev, xform, session);
2376 ret = dpaa_sec_chain_init(dev, xform, session);
2378 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2381 /* Authenticate then Cipher */
2382 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2383 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2384 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2385 session->auth_cipher_text = 0;
2386 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2387 ret = dpaa_sec_cipher_init(dev, xform, session);
2388 else if (xform->next->cipher.algo
2389 == RTE_CRYPTO_CIPHER_NULL)
2390 ret = dpaa_sec_auth_init(dev, xform, session);
2392 ret = dpaa_sec_chain_init(dev, xform, session);
2394 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2398 /* AEAD operation for AES-GCM kind of Algorithms */
2399 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2400 xform->next == NULL) {
2401 ret = dpaa_sec_aead_init(dev, xform, session);
2404 DPAA_SEC_ERR("Invalid crypto type");
2408 DPAA_SEC_ERR("unable to init session");
2412 rte_spinlock_lock(&internals->lock);
2413 for (i = 0; i < MAX_DPAA_CORES; i++) {
2414 session->inq[i] = dpaa_sec_attach_rxq(internals);
2415 if (session->inq[i] == NULL) {
2416 DPAA_SEC_ERR("unable to attach sec queue");
2417 rte_spinlock_unlock(&internals->lock);
2422 rte_spinlock_unlock(&internals->lock);
2427 free_session_data(session);
2432 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2433 struct rte_crypto_sym_xform *xform,
2434 struct rte_cryptodev_sym_session *sess,
2435 struct rte_mempool *mempool)
2437 void *sess_private_data;
2440 PMD_INIT_FUNC_TRACE();
2442 if (rte_mempool_get(mempool, &sess_private_data)) {
2443 DPAA_SEC_ERR("Couldn't get object from session mempool");
2447 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2449 DPAA_SEC_ERR("failed to configure session parameters");
2451 /* Return session to mempool */
2452 rte_mempool_put(mempool, sess_private_data);
2456 set_sym_session_private_data(sess, dev->driver_id,
2464 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2466 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2467 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2470 for (i = 0; i < MAX_DPAA_CORES; i++) {
2472 dpaa_sec_detach_rxq(qi, s->inq[i]);
2476 free_session_data(s);
2477 rte_mempool_put(sess_mp, (void *)s);
2480 /** Clear the memory of session so it doesn't leave key material behind */
2482 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2483 struct rte_cryptodev_sym_session *sess)
2485 PMD_INIT_FUNC_TRACE();
2486 uint8_t index = dev->driver_id;
2487 void *sess_priv = get_sym_session_private_data(sess, index);
2488 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2491 free_session_memory(dev, s);
2492 set_sym_session_private_data(sess, index, NULL);
2496 #ifdef RTE_LIB_SECURITY
2498 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2499 struct rte_security_ipsec_xform *ipsec_xform,
2500 dpaa_sec_session *session)
2502 PMD_INIT_FUNC_TRACE();
2504 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2505 RTE_CACHE_LINE_SIZE);
2506 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2507 DPAA_SEC_ERR("No Memory for aead key");
2510 memcpy(session->aead_key.data, aead_xform->key.data,
2511 aead_xform->key.length);
2513 session->digest_length = aead_xform->digest_length;
2514 session->aead_key.length = aead_xform->key.length;
2516 switch (aead_xform->algo) {
2517 case RTE_CRYPTO_AEAD_AES_GCM:
2518 switch (session->digest_length) {
2520 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2523 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2526 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2529 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2530 session->digest_length);
2533 if (session->dir == DIR_ENC) {
2534 memcpy(session->encap_pdb.gcm.salt,
2535 (uint8_t *)&(ipsec_xform->salt), 4);
2537 memcpy(session->decap_pdb.gcm.salt,
2538 (uint8_t *)&(ipsec_xform->salt), 4);
2540 session->aead_key.algmode = OP_ALG_AAI_GCM;
2541 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2544 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2552 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2553 struct rte_crypto_auth_xform *auth_xform,
2554 struct rte_security_ipsec_xform *ipsec_xform,
2555 dpaa_sec_session *session)
2558 session->cipher_key.data = rte_zmalloc(NULL,
2559 cipher_xform->key.length,
2560 RTE_CACHE_LINE_SIZE);
2561 if (session->cipher_key.data == NULL &&
2562 cipher_xform->key.length > 0) {
2563 DPAA_SEC_ERR("No Memory for cipher key");
2567 session->cipher_key.length = cipher_xform->key.length;
2568 memcpy(session->cipher_key.data, cipher_xform->key.data,
2569 cipher_xform->key.length);
2570 session->cipher_alg = cipher_xform->algo;
2572 session->cipher_key.data = NULL;
2573 session->cipher_key.length = 0;
2574 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2578 session->auth_key.data = rte_zmalloc(NULL,
2579 auth_xform->key.length,
2580 RTE_CACHE_LINE_SIZE);
2581 if (session->auth_key.data == NULL &&
2582 auth_xform->key.length > 0) {
2583 DPAA_SEC_ERR("No Memory for auth key");
2586 session->auth_key.length = auth_xform->key.length;
2587 memcpy(session->auth_key.data, auth_xform->key.data,
2588 auth_xform->key.length);
2589 session->auth_alg = auth_xform->algo;
2590 session->digest_length = auth_xform->digest_length;
2592 session->auth_key.data = NULL;
2593 session->auth_key.length = 0;
2594 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2597 switch (session->auth_alg) {
2598 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2599 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2600 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2602 case RTE_CRYPTO_AUTH_MD5_HMAC:
2603 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2604 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2606 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2607 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2608 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2609 if (session->digest_length != 16)
2611 "+++Using sha256-hmac truncated len is non-standard,"
2612 "it will not work with lookaside proto");
2614 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2615 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2616 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2618 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2619 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2620 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2622 case RTE_CRYPTO_AUTH_AES_CMAC:
2623 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2625 case RTE_CRYPTO_AUTH_NULL:
2626 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2628 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2629 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2630 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2631 case RTE_CRYPTO_AUTH_SHA1:
2632 case RTE_CRYPTO_AUTH_SHA256:
2633 case RTE_CRYPTO_AUTH_SHA512:
2634 case RTE_CRYPTO_AUTH_SHA224:
2635 case RTE_CRYPTO_AUTH_SHA384:
2636 case RTE_CRYPTO_AUTH_MD5:
2637 case RTE_CRYPTO_AUTH_AES_GMAC:
2638 case RTE_CRYPTO_AUTH_KASUMI_F9:
2639 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2640 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2641 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2645 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2650 switch (session->cipher_alg) {
2651 case RTE_CRYPTO_CIPHER_AES_CBC:
2652 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2653 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2655 case RTE_CRYPTO_CIPHER_3DES_CBC:
2656 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2657 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2659 case RTE_CRYPTO_CIPHER_AES_CTR:
2660 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2661 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2662 if (session->dir == DIR_ENC) {
2663 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2664 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2666 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2667 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2670 case RTE_CRYPTO_CIPHER_NULL:
2671 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2673 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2674 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2675 case RTE_CRYPTO_CIPHER_3DES_ECB:
2676 case RTE_CRYPTO_CIPHER_AES_ECB:
2677 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2678 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2679 session->cipher_alg);
2682 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2683 session->cipher_alg);
2691 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2692 struct rte_security_session_conf *conf,
2695 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2696 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2697 struct rte_crypto_auth_xform *auth_xform = NULL;
2698 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2699 struct rte_crypto_aead_xform *aead_xform = NULL;
2700 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2704 PMD_INIT_FUNC_TRACE();
2706 memset(session, 0, sizeof(dpaa_sec_session));
2707 session->proto_alg = conf->protocol;
2708 session->ctxt = DPAA_SEC_IPSEC;
2710 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2711 session->dir = DIR_ENC;
2713 session->dir = DIR_DEC;
2715 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2716 cipher_xform = &conf->crypto_xform->cipher;
2717 if (conf->crypto_xform->next)
2718 auth_xform = &conf->crypto_xform->next->auth;
2719 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2720 ipsec_xform, session);
2721 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2722 auth_xform = &conf->crypto_xform->auth;
2723 if (conf->crypto_xform->next)
2724 cipher_xform = &conf->crypto_xform->next->cipher;
2725 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2726 ipsec_xform, session);
2727 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2728 aead_xform = &conf->crypto_xform->aead;
2729 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2730 ipsec_xform, session);
2732 DPAA_SEC_ERR("XFORM not specified");
2737 DPAA_SEC_ERR("Failed to process xform");
2741 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2742 if (ipsec_xform->tunnel.type ==
2743 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2744 session->ip4_hdr.ip_v = IPVERSION;
2745 session->ip4_hdr.ip_hl = 5;
2746 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2747 sizeof(session->ip4_hdr));
2748 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2749 session->ip4_hdr.ip_id = 0;
2750 session->ip4_hdr.ip_off = 0;
2751 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2752 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2753 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2754 IPPROTO_ESP : IPPROTO_AH;
2755 session->ip4_hdr.ip_sum = 0;
2756 session->ip4_hdr.ip_src =
2757 ipsec_xform->tunnel.ipv4.src_ip;
2758 session->ip4_hdr.ip_dst =
2759 ipsec_xform->tunnel.ipv4.dst_ip;
2760 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2761 (void *)&session->ip4_hdr,
2763 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2764 } else if (ipsec_xform->tunnel.type ==
2765 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2766 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2767 DPAA_IPv6_DEFAULT_VTC_FLOW |
2768 ((ipsec_xform->tunnel.ipv6.dscp <<
2769 RTE_IPV6_HDR_TC_SHIFT) &
2770 RTE_IPV6_HDR_TC_MASK) |
2771 ((ipsec_xform->tunnel.ipv6.flabel <<
2772 RTE_IPV6_HDR_FL_SHIFT) &
2773 RTE_IPV6_HDR_FL_MASK));
2774 /* Payload length will be updated by HW */
2775 session->ip6_hdr.payload_len = 0;
2776 session->ip6_hdr.hop_limits =
2777 ipsec_xform->tunnel.ipv6.hlimit;
2778 session->ip6_hdr.proto = (ipsec_xform->proto ==
2779 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2780 IPPROTO_ESP : IPPROTO_AH;
2781 memcpy(&session->ip6_hdr.src_addr,
2782 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2783 memcpy(&session->ip6_hdr.dst_addr,
2784 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2785 session->encap_pdb.ip_hdr_len =
2786 sizeof(struct rte_ipv6_hdr);
2788 session->encap_pdb.options =
2789 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2790 PDBOPTS_ESP_OIHI_PDB_INL |
2792 PDBHMO_ESP_ENCAP_DTTL |
2794 if (ipsec_xform->options.esn)
2795 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2796 session->encap_pdb.spi = ipsec_xform->spi;
2798 } else if (ipsec_xform->direction ==
2799 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2800 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2801 session->decap_pdb.options = sizeof(struct ip) << 16;
2803 session->decap_pdb.options =
2804 sizeof(struct rte_ipv6_hdr) << 16;
2805 if (ipsec_xform->options.esn)
2806 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2807 if (ipsec_xform->replay_win_sz) {
2809 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2818 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2821 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2824 session->decap_pdb.options |=
2830 rte_spinlock_lock(&internals->lock);
2831 for (i = 0; i < MAX_DPAA_CORES; i++) {
2832 session->inq[i] = dpaa_sec_attach_rxq(internals);
2833 if (session->inq[i] == NULL) {
2834 DPAA_SEC_ERR("unable to attach sec queue");
2835 rte_spinlock_unlock(&internals->lock);
2839 rte_spinlock_unlock(&internals->lock);
2843 free_session_data(session);
2848 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2849 struct rte_security_session_conf *conf,
2852 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2853 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2854 struct rte_crypto_auth_xform *auth_xform = NULL;
2855 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2856 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2857 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2861 PMD_INIT_FUNC_TRACE();
2863 memset(session, 0, sizeof(dpaa_sec_session));
2865 /* find xfrm types */
2866 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2867 cipher_xform = &xform->cipher;
2868 if (xform->next != NULL)
2869 auth_xform = &xform->next->auth;
2870 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2871 auth_xform = &xform->auth;
2872 if (xform->next != NULL)
2873 cipher_xform = &xform->next->cipher;
2875 DPAA_SEC_ERR("Invalid crypto type");
2879 session->proto_alg = conf->protocol;
2880 session->ctxt = DPAA_SEC_PDCP;
2883 switch (cipher_xform->algo) {
2884 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2885 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2887 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2888 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2890 case RTE_CRYPTO_CIPHER_AES_CTR:
2891 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2893 case RTE_CRYPTO_CIPHER_NULL:
2894 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2897 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2898 session->cipher_alg);
2902 session->cipher_key.data = rte_zmalloc(NULL,
2903 cipher_xform->key.length,
2904 RTE_CACHE_LINE_SIZE);
2905 if (session->cipher_key.data == NULL &&
2906 cipher_xform->key.length > 0) {
2907 DPAA_SEC_ERR("No Memory for cipher key");
2910 session->cipher_key.length = cipher_xform->key.length;
2911 memcpy(session->cipher_key.data, cipher_xform->key.data,
2912 cipher_xform->key.length);
2913 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2915 session->cipher_alg = cipher_xform->algo;
2917 session->cipher_key.data = NULL;
2918 session->cipher_key.length = 0;
2919 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2920 session->dir = DIR_ENC;
2923 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2924 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2925 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2927 "PDCP Seq Num size should be 5/12 bits for cmode");
2934 switch (auth_xform->algo) {
2935 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2936 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2938 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2939 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2941 case RTE_CRYPTO_AUTH_AES_CMAC:
2942 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2944 case RTE_CRYPTO_AUTH_NULL:
2945 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2948 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2950 rte_free(session->cipher_key.data);
2953 session->auth_key.data = rte_zmalloc(NULL,
2954 auth_xform->key.length,
2955 RTE_CACHE_LINE_SIZE);
2956 if (!session->auth_key.data &&
2957 auth_xform->key.length > 0) {
2958 DPAA_SEC_ERR("No Memory for auth key");
2959 rte_free(session->cipher_key.data);
2962 session->auth_key.length = auth_xform->key.length;
2963 memcpy(session->auth_key.data, auth_xform->key.data,
2964 auth_xform->key.length);
2965 session->auth_alg = auth_xform->algo;
2967 session->auth_key.data = NULL;
2968 session->auth_key.length = 0;
2969 session->auth_alg = 0;
2971 session->pdcp.domain = pdcp_xform->domain;
2972 session->pdcp.bearer = pdcp_xform->bearer;
2973 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2974 session->pdcp.sn_size = pdcp_xform->sn_size;
2975 session->pdcp.hfn = pdcp_xform->hfn;
2976 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2977 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2978 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
2980 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2982 rte_spinlock_lock(&dev_priv->lock);
2983 for (i = 0; i < MAX_DPAA_CORES; i++) {
2984 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2985 if (session->inq[i] == NULL) {
2986 DPAA_SEC_ERR("unable to attach sec queue");
2987 rte_spinlock_unlock(&dev_priv->lock);
2992 rte_spinlock_unlock(&dev_priv->lock);
2995 rte_free(session->auth_key.data);
2996 rte_free(session->cipher_key.data);
2997 memset(session, 0, sizeof(dpaa_sec_session));
3002 dpaa_sec_security_session_create(void *dev,
3003 struct rte_security_session_conf *conf,
3004 struct rte_security_session *sess,
3005 struct rte_mempool *mempool)
3007 void *sess_private_data;
3008 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3011 if (rte_mempool_get(mempool, &sess_private_data)) {
3012 DPAA_SEC_ERR("Couldn't get object from session mempool");
3016 switch (conf->protocol) {
3017 case RTE_SECURITY_PROTOCOL_IPSEC:
3018 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3021 case RTE_SECURITY_PROTOCOL_PDCP:
3022 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3025 case RTE_SECURITY_PROTOCOL_MACSEC:
3031 DPAA_SEC_ERR("failed to configure session parameters");
3032 /* Return session to mempool */
3033 rte_mempool_put(mempool, sess_private_data);
3037 set_sec_session_private_data(sess, sess_private_data);
3042 /** Clear the memory of session so it doesn't leave key material behind */
3044 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3045 struct rte_security_session *sess)
3047 PMD_INIT_FUNC_TRACE();
3048 void *sess_priv = get_sec_session_private_data(sess);
3049 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3052 free_session_memory((struct rte_cryptodev *)dev, s);
3053 set_sec_session_private_data(sess, NULL);
3059 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3060 struct rte_cryptodev_config *config __rte_unused)
3062 PMD_INIT_FUNC_TRACE();
3068 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3070 PMD_INIT_FUNC_TRACE();
3075 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3077 PMD_INIT_FUNC_TRACE();
3081 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3083 PMD_INIT_FUNC_TRACE();
3092 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3093 struct rte_cryptodev_info *info)
3095 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3097 PMD_INIT_FUNC_TRACE();
3099 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3100 info->feature_flags = dev->feature_flags;
3101 info->capabilities = dpaa_sec_capabilities;
3102 info->sym.max_nb_sessions = internals->max_nb_sessions;
3103 info->driver_id = cryptodev_driver_id;
3107 static enum qman_cb_dqrr_result
3108 dpaa_sec_process_parallel_event(void *event,
3109 struct qman_portal *qm __always_unused,
3110 struct qman_fq *outq,
3111 const struct qm_dqrr_entry *dqrr,
3114 const struct qm_fd *fd;
3115 struct dpaa_sec_job *job;
3116 struct dpaa_sec_op_ctx *ctx;
3117 struct rte_event *ev = (struct rte_event *)event;
3121 /* sg is embedded in an op ctx,
3122 * sg[0] is for output
3125 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3127 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3128 ctx->fd_status = fd->status;
3129 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3130 struct qm_sg_entry *sg_out;
3133 sg_out = &job->sg[0];
3134 hw_sg_to_cpu(sg_out);
3135 len = sg_out->length;
3136 ctx->op->sym->m_src->pkt_len = len;
3137 ctx->op->sym->m_src->data_len = len;
3139 if (!ctx->fd_status) {
3140 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3142 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3143 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3145 ev->event_ptr = (void *)ctx->op;
3147 ev->flow_id = outq->ev.flow_id;
3148 ev->sub_event_type = outq->ev.sub_event_type;
3149 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3150 ev->op = RTE_EVENT_OP_NEW;
3151 ev->sched_type = outq->ev.sched_type;
3152 ev->queue_id = outq->ev.queue_id;
3153 ev->priority = outq->ev.priority;
3154 *bufs = (void *)ctx->op;
3156 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3158 return qman_cb_dqrr_consume;
3161 static enum qman_cb_dqrr_result
3162 dpaa_sec_process_atomic_event(void *event,
3163 struct qman_portal *qm __rte_unused,
3164 struct qman_fq *outq,
3165 const struct qm_dqrr_entry *dqrr,
3169 const struct qm_fd *fd;
3170 struct dpaa_sec_job *job;
3171 struct dpaa_sec_op_ctx *ctx;
3172 struct rte_event *ev = (struct rte_event *)event;
3176 /* sg is embedded in an op ctx,
3177 * sg[0] is for output
3180 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3182 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3183 ctx->fd_status = fd->status;
3184 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3185 struct qm_sg_entry *sg_out;
3188 sg_out = &job->sg[0];
3189 hw_sg_to_cpu(sg_out);
3190 len = sg_out->length;
3191 ctx->op->sym->m_src->pkt_len = len;
3192 ctx->op->sym->m_src->data_len = len;
3194 if (!ctx->fd_status) {
3195 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3197 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3198 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3200 ev->event_ptr = (void *)ctx->op;
3201 ev->flow_id = outq->ev.flow_id;
3202 ev->sub_event_type = outq->ev.sub_event_type;
3203 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3204 ev->op = RTE_EVENT_OP_NEW;
3205 ev->sched_type = outq->ev.sched_type;
3206 ev->queue_id = outq->ev.queue_id;
3207 ev->priority = outq->ev.priority;
3209 /* Save active dqrr entries */
3210 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3211 DPAA_PER_LCORE_DQRR_SIZE++;
3212 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3213 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3214 ev->impl_opaque = index + 1;
3215 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3216 *bufs = (void *)ctx->op;
3218 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3220 return qman_cb_dqrr_defer;
3224 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3227 const struct rte_event *event)
3229 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3230 struct qm_mcc_initfq opts = {0};
3234 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3235 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3236 opts.fqd.dest.channel = ch_id;
3238 switch (event->sched_type) {
3239 case RTE_SCHED_TYPE_ATOMIC:
3240 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3241 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3242 * configuration with HOLD_ACTIVE setting
3244 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3245 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3247 case RTE_SCHED_TYPE_ORDERED:
3248 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3251 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3252 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3256 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3257 if (unlikely(ret)) {
3258 DPAA_SEC_ERR("unable to init caam source fq!");
3262 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3268 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3271 struct qm_mcc_initfq opts = {0};
3273 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3275 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3276 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3277 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3278 qp->outq.cb.ern = ern_sec_fq_handler;
3279 qman_retire_fq(&qp->outq, NULL);
3280 qman_oos_fq(&qp->outq);
3281 ret = qman_init_fq(&qp->outq, 0, &opts);
3283 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3284 qp->outq.cb.dqrr = NULL;
3289 static struct rte_cryptodev_ops crypto_ops = {
3290 .dev_configure = dpaa_sec_dev_configure,
3291 .dev_start = dpaa_sec_dev_start,
3292 .dev_stop = dpaa_sec_dev_stop,
3293 .dev_close = dpaa_sec_dev_close,
3294 .dev_infos_get = dpaa_sec_dev_infos_get,
3295 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3296 .queue_pair_release = dpaa_sec_queue_pair_release,
3297 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3298 .sym_session_configure = dpaa_sec_sym_session_configure,
3299 .sym_session_clear = dpaa_sec_sym_session_clear
3302 #ifdef RTE_LIB_SECURITY
3303 static const struct rte_security_capability *
3304 dpaa_sec_capabilities_get(void *device __rte_unused)
3306 return dpaa_sec_security_cap;
3309 static const struct rte_security_ops dpaa_sec_security_ops = {
3310 .session_create = dpaa_sec_security_session_create,
3311 .session_update = NULL,
3312 .session_stats_get = NULL,
3313 .session_destroy = dpaa_sec_security_session_destroy,
3314 .set_pkt_metadata = NULL,
3315 .capabilities_get = dpaa_sec_capabilities_get
3319 dpaa_sec_uninit(struct rte_cryptodev *dev)
3321 struct dpaa_sec_dev_private *internals;
3326 internals = dev->data->dev_private;
3327 rte_free(dev->security_ctx);
3329 rte_free(internals);
3331 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3332 dev->data->name, rte_socket_id());
3338 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3340 struct dpaa_sec_dev_private *internals;
3341 #ifdef RTE_LIB_SECURITY
3342 struct rte_security_ctx *security_instance;
3344 struct dpaa_sec_qp *qp;
3348 PMD_INIT_FUNC_TRACE();
3350 cryptodev->driver_id = cryptodev_driver_id;
3351 cryptodev->dev_ops = &crypto_ops;
3353 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3354 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3355 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3356 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3357 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3358 RTE_CRYPTODEV_FF_SECURITY |
3359 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3360 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3361 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3362 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3363 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3365 internals = cryptodev->data->dev_private;
3366 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3367 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3370 * For secondary processes, we don't initialise any further as primary
3371 * has already done this work. Only check we don't need a different
3374 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3375 DPAA_SEC_WARN("Device already init by primary process");
3378 #ifdef RTE_LIB_SECURITY
3379 /* Initialize security_ctx only for primary process*/
3380 security_instance = rte_malloc("rte_security_instances_ops",
3381 sizeof(struct rte_security_ctx), 0);
3382 if (security_instance == NULL)
3384 security_instance->device = (void *)cryptodev;
3385 security_instance->ops = &dpaa_sec_security_ops;
3386 security_instance->sess_cnt = 0;
3387 cryptodev->security_ctx = security_instance;
3389 rte_spinlock_init(&internals->lock);
3390 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3391 /* init qman fq for queue pair */
3392 qp = &internals->qps[i];
3393 ret = dpaa_sec_init_tx(&qp->outq);
3395 DPAA_SEC_ERR("config tx of queue pair %d", i);
3400 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3401 QMAN_FQ_FLAG_TO_DCPORTAL;
3402 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3403 /* create rx qman fq for sessions*/
3404 ret = qman_create_fq(0, flags, &internals->inq[i]);
3405 if (unlikely(ret != 0)) {
3406 DPAA_SEC_ERR("sec qman_create_fq failed");
3411 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3415 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3417 rte_free(cryptodev->security_ctx);
3422 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3423 struct rte_dpaa_device *dpaa_dev)
3425 struct rte_cryptodev *cryptodev;
3426 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3430 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3432 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3433 if (cryptodev == NULL)
3436 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3437 cryptodev->data->dev_private = rte_zmalloc_socket(
3438 "cryptodev private structure",
3439 sizeof(struct dpaa_sec_dev_private),
3440 RTE_CACHE_LINE_SIZE,
3443 if (cryptodev->data->dev_private == NULL)
3444 rte_panic("Cannot allocate memzone for private "
3448 dpaa_dev->crypto_dev = cryptodev;
3449 cryptodev->device = &dpaa_dev->device;
3451 /* init user callbacks */
3452 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3454 /* if sec device version is not configured */
3455 if (!rta_get_sec_era()) {
3456 const struct device_node *caam_node;
3458 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3459 const uint32_t *prop = of_get_property(caam_node,
3464 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3470 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3471 retval = rte_dpaa_portal_init((void *)1);
3473 DPAA_SEC_ERR("Unable to initialize portal");
3478 /* Invoke PMD device initialization function */
3479 retval = dpaa_sec_dev_init(cryptodev);
3485 /* In case of error, cleanup is done */
3486 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3487 rte_free(cryptodev->data->dev_private);
3489 rte_cryptodev_pmd_release_device(cryptodev);
3495 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3497 struct rte_cryptodev *cryptodev;
3500 cryptodev = dpaa_dev->crypto_dev;
3501 if (cryptodev == NULL)
3504 ret = dpaa_sec_uninit(cryptodev);
3508 return rte_cryptodev_pmd_destroy(cryptodev);
3511 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3512 .drv_type = FSL_DPAA_CRYPTO,
3514 .name = "DPAA SEC PMD"
3516 .probe = cryptodev_dpaa_sec_probe,
3517 .remove = cryptodev_dpaa_sec_remove,
3520 static struct cryptodev_driver dpaa_sec_crypto_drv;
3522 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3523 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3524 cryptodev_driver_id);
3525 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);