1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 static uint8_t cryptodev_driver_id;
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 if (!ctx->fd_status) {
58 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
65 static inline struct dpaa_sec_op_ctx *
66 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
68 struct dpaa_sec_op_ctx *ctx;
71 retval = rte_mempool_get(
72 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
75 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
79 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82 * each packet, memset is costlier than dcbz_64().
84 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
85 dcbz_64(&ctx->job.sg[i]);
87 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
88 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
94 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
96 const struct qm_mr_entry *msg)
98 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
99 fq->fqid, msg->ern.rc, msg->ern.seqnum);
102 /* initialize the queue with dest chan as caam chan so that
103 * all the packets in this queue could be dispatched into caam
106 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
109 struct qm_mcc_initfq fq_opts;
113 /* Clear FQ options */
114 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
116 flags = QMAN_INITFQ_FLAG_SCHED;
117 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
118 QM_INITFQ_WE_CONTEXTB;
120 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
121 fq_opts.fqd.context_b = fqid_out;
122 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
123 fq_opts.fqd.dest.wq = 0;
125 fq_in->cb.ern = ern_sec_fq_handler;
127 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
129 ret = qman_init_fq(fq_in, flags, &fq_opts);
130 if (unlikely(ret != 0))
131 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
136 /* something is put into in_fq and caam put the crypto result into out_fq */
137 static enum qman_cb_dqrr_result
138 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
139 struct qman_fq *fq __always_unused,
140 const struct qm_dqrr_entry *dqrr)
142 const struct qm_fd *fd;
143 struct dpaa_sec_job *job;
144 struct dpaa_sec_op_ctx *ctx;
146 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
147 return qman_cb_dqrr_defer;
149 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
150 return qman_cb_dqrr_consume;
153 /* sg is embedded in an op ctx,
154 * sg[0] is for output
157 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
159 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
160 ctx->fd_status = fd->status;
161 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
162 struct qm_sg_entry *sg_out;
164 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
165 ctx->op->sym->m_src : ctx->op->sym->m_dst;
167 sg_out = &job->sg[0];
168 hw_sg_to_cpu(sg_out);
169 len = sg_out->length;
171 while (mbuf->next != NULL) {
172 len -= mbuf->data_len;
175 mbuf->data_len = len;
177 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
178 dpaa_sec_op_ending(ctx);
180 return qman_cb_dqrr_consume;
183 /* caam result is put into this queue */
185 dpaa_sec_init_tx(struct qman_fq *fq)
188 struct qm_mcc_initfq opts;
191 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
192 QMAN_FQ_FLAG_DYNAMIC_FQID;
194 ret = qman_create_fq(0, flags, fq);
196 DPAA_SEC_ERR("qman_create_fq failed");
200 memset(&opts, 0, sizeof(opts));
201 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
202 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
204 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
206 fq->cb.dqrr = dqrr_out_fq_cb_rx;
207 fq->cb.ern = ern_sec_fq_handler;
209 ret = qman_init_fq(fq, 0, &opts);
211 DPAA_SEC_ERR("unable to init caam source fq!");
218 static inline int is_aead(dpaa_sec_session *ses)
220 return ((ses->cipher_alg == 0) &&
221 (ses->auth_alg == 0) &&
222 (ses->aead_alg != 0));
225 static inline int is_encode(dpaa_sec_session *ses)
227 return ses->dir == DIR_ENC;
230 static inline int is_decode(dpaa_sec_session *ses)
232 return ses->dir == DIR_DEC;
235 #ifdef RTE_LIBRTE_SECURITY
237 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
239 struct alginfo authdata = {0}, cipherdata = {0};
240 struct sec_cdb *cdb = &ses->cdb;
241 struct alginfo *p_authdata = NULL;
242 int32_t shared_desc_len = 0;
244 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
250 cipherdata.key = (size_t)ses->cipher_key.data;
251 cipherdata.keylen = ses->cipher_key.length;
252 cipherdata.key_enc_flags = 0;
253 cipherdata.key_type = RTA_DATA_IMM;
254 cipherdata.algtype = ses->cipher_key.alg;
255 cipherdata.algmode = ses->cipher_key.algmode;
257 cdb->sh_desc[0] = cipherdata.keylen;
262 authdata.key = (size_t)ses->auth_key.data;
263 authdata.keylen = ses->auth_key.length;
264 authdata.key_enc_flags = 0;
265 authdata.key_type = RTA_DATA_IMM;
266 authdata.algtype = ses->auth_key.alg;
267 authdata.algmode = ses->auth_key.algmode;
269 p_authdata = &authdata;
271 cdb->sh_desc[1] = authdata.keylen;
274 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
276 (unsigned int *)cdb->sh_desc,
277 &cdb->sh_desc[2], 2);
279 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
283 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
285 (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key);
286 cipherdata.key_type = RTA_DATA_PTR;
288 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
290 (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key);
291 authdata.key_type = RTA_DATA_PTR;
298 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
299 if (ses->dir == DIR_ENC)
300 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
301 cdb->sh_desc, 1, swap,
306 ses->pdcp.hfn_threshold,
307 &cipherdata, &authdata,
309 else if (ses->dir == DIR_DEC)
310 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
311 cdb->sh_desc, 1, swap,
316 ses->pdcp.hfn_threshold,
317 &cipherdata, &authdata,
320 if (ses->dir == DIR_ENC)
321 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
322 cdb->sh_desc, 1, swap,
327 ses->pdcp.hfn_threshold,
328 &cipherdata, p_authdata, 0);
329 else if (ses->dir == DIR_DEC)
330 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
331 cdb->sh_desc, 1, swap,
336 ses->pdcp.hfn_threshold,
337 &cipherdata, p_authdata, 0);
339 return shared_desc_len;
342 /* prepare ipsec proto command block of the session */
344 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
346 struct alginfo cipherdata = {0}, authdata = {0};
347 struct sec_cdb *cdb = &ses->cdb;
348 int32_t shared_desc_len = 0;
350 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
356 cipherdata.key = (size_t)ses->cipher_key.data;
357 cipherdata.keylen = ses->cipher_key.length;
358 cipherdata.key_enc_flags = 0;
359 cipherdata.key_type = RTA_DATA_IMM;
360 cipherdata.algtype = ses->cipher_key.alg;
361 cipherdata.algmode = ses->cipher_key.algmode;
363 if (ses->auth_key.length) {
364 authdata.key = (size_t)ses->auth_key.data;
365 authdata.keylen = ses->auth_key.length;
366 authdata.key_enc_flags = 0;
367 authdata.key_type = RTA_DATA_IMM;
368 authdata.algtype = ses->auth_key.alg;
369 authdata.algmode = ses->auth_key.algmode;
372 cdb->sh_desc[0] = cipherdata.keylen;
373 cdb->sh_desc[1] = authdata.keylen;
374 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
376 (unsigned int *)cdb->sh_desc,
377 &cdb->sh_desc[2], 2);
380 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
383 if (cdb->sh_desc[2] & 1)
384 cipherdata.key_type = RTA_DATA_IMM;
386 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
387 (void *)(size_t)cipherdata.key);
388 cipherdata.key_type = RTA_DATA_PTR;
390 if (cdb->sh_desc[2] & (1<<1))
391 authdata.key_type = RTA_DATA_IMM;
393 authdata.key = (size_t)rte_dpaa_mem_vtop(
394 (void *)(size_t)authdata.key);
395 authdata.key_type = RTA_DATA_PTR;
401 if (ses->dir == DIR_ENC) {
402 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
404 true, swap, SHR_SERIAL,
406 (uint8_t *)&ses->ip4_hdr,
407 &cipherdata, &authdata);
408 } else if (ses->dir == DIR_DEC) {
409 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
411 true, swap, SHR_SERIAL,
413 &cipherdata, &authdata);
415 return shared_desc_len;
418 /* prepare command block of the session */
420 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
422 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
423 int32_t shared_desc_len = 0;
424 struct sec_cdb *cdb = &ses->cdb;
426 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
432 memset(cdb, 0, sizeof(struct sec_cdb));
435 #ifdef RTE_LIBRTE_SECURITY
437 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
440 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
443 case DPAA_SEC_CIPHER:
444 alginfo_c.key = (size_t)ses->cipher_key.data;
445 alginfo_c.keylen = ses->cipher_key.length;
446 alginfo_c.key_enc_flags = 0;
447 alginfo_c.key_type = RTA_DATA_IMM;
448 alginfo_c.algtype = ses->cipher_key.alg;
449 alginfo_c.algmode = ses->cipher_key.algmode;
451 switch (ses->cipher_alg) {
452 case RTE_CRYPTO_CIPHER_AES_CBC:
453 case RTE_CRYPTO_CIPHER_3DES_CBC:
454 case RTE_CRYPTO_CIPHER_AES_CTR:
455 case RTE_CRYPTO_CIPHER_3DES_CTR:
456 shared_desc_len = cnstr_shdsc_blkcipher(
458 swap, SHR_NEVER, &alginfo_c,
462 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
463 shared_desc_len = cnstr_shdsc_snow_f8(
464 cdb->sh_desc, true, swap,
468 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
469 shared_desc_len = cnstr_shdsc_zuce(
470 cdb->sh_desc, true, swap,
475 DPAA_SEC_ERR("unsupported cipher alg %d",
481 alginfo_a.key = (size_t)ses->auth_key.data;
482 alginfo_a.keylen = ses->auth_key.length;
483 alginfo_a.key_enc_flags = 0;
484 alginfo_a.key_type = RTA_DATA_IMM;
485 alginfo_a.algtype = ses->auth_key.alg;
486 alginfo_a.algmode = ses->auth_key.algmode;
487 switch (ses->auth_alg) {
488 case RTE_CRYPTO_AUTH_MD5_HMAC:
489 case RTE_CRYPTO_AUTH_SHA1_HMAC:
490 case RTE_CRYPTO_AUTH_SHA224_HMAC:
491 case RTE_CRYPTO_AUTH_SHA256_HMAC:
492 case RTE_CRYPTO_AUTH_SHA384_HMAC:
493 case RTE_CRYPTO_AUTH_SHA512_HMAC:
494 shared_desc_len = cnstr_shdsc_hmac(
496 swap, SHR_NEVER, &alginfo_a,
500 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
501 shared_desc_len = cnstr_shdsc_snow_f9(
502 cdb->sh_desc, true, swap,
507 case RTE_CRYPTO_AUTH_ZUC_EIA3:
508 shared_desc_len = cnstr_shdsc_zuca(
509 cdb->sh_desc, true, swap,
515 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
519 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
520 DPAA_SEC_ERR("not supported aead alg");
523 alginfo.key = (size_t)ses->aead_key.data;
524 alginfo.keylen = ses->aead_key.length;
525 alginfo.key_enc_flags = 0;
526 alginfo.key_type = RTA_DATA_IMM;
527 alginfo.algtype = ses->aead_key.alg;
528 alginfo.algmode = ses->aead_key.algmode;
530 if (ses->dir == DIR_ENC)
531 shared_desc_len = cnstr_shdsc_gcm_encap(
532 cdb->sh_desc, true, swap, SHR_NEVER,
537 shared_desc_len = cnstr_shdsc_gcm_decap(
538 cdb->sh_desc, true, swap, SHR_NEVER,
543 case DPAA_SEC_CIPHER_HASH:
544 alginfo_c.key = (size_t)ses->cipher_key.data;
545 alginfo_c.keylen = ses->cipher_key.length;
546 alginfo_c.key_enc_flags = 0;
547 alginfo_c.key_type = RTA_DATA_IMM;
548 alginfo_c.algtype = ses->cipher_key.alg;
549 alginfo_c.algmode = ses->cipher_key.algmode;
551 alginfo_a.key = (size_t)ses->auth_key.data;
552 alginfo_a.keylen = ses->auth_key.length;
553 alginfo_a.key_enc_flags = 0;
554 alginfo_a.key_type = RTA_DATA_IMM;
555 alginfo_a.algtype = ses->auth_key.alg;
556 alginfo_a.algmode = ses->auth_key.algmode;
558 cdb->sh_desc[0] = alginfo_c.keylen;
559 cdb->sh_desc[1] = alginfo_a.keylen;
560 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
562 (unsigned int *)cdb->sh_desc,
563 &cdb->sh_desc[2], 2);
566 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
569 if (cdb->sh_desc[2] & 1)
570 alginfo_c.key_type = RTA_DATA_IMM;
572 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
573 (void *)(size_t)alginfo_c.key);
574 alginfo_c.key_type = RTA_DATA_PTR;
576 if (cdb->sh_desc[2] & (1<<1))
577 alginfo_a.key_type = RTA_DATA_IMM;
579 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
580 (void *)(size_t)alginfo_a.key);
581 alginfo_a.key_type = RTA_DATA_PTR;
586 /* Auth_only_len is set as 0 here and it will be
587 * overwritten in fd for each packet.
589 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
590 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
592 ses->digest_length, ses->dir);
594 case DPAA_SEC_HASH_CIPHER:
596 DPAA_SEC_ERR("error: Unsupported session");
600 if (shared_desc_len < 0) {
601 DPAA_SEC_ERR("error in preparing command block");
602 return shared_desc_len;
605 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
606 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
607 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
612 /* qp is lockless, should be accessed by only one thread */
614 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
617 unsigned int pkts = 0;
618 int num_rx_bufs, ret;
619 struct qm_dqrr_entry *dq;
620 uint32_t vdqcr_flags = 0;
624 * Until request for four buffers, we provide exact number of buffers.
625 * Otherwise we do not set the QM_VDQCR_EXACT flag.
626 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
627 * requested, so we request two less in this case.
630 vdqcr_flags = QM_VDQCR_EXACT;
631 num_rx_bufs = nb_ops;
633 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
634 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
636 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
641 const struct qm_fd *fd;
642 struct dpaa_sec_job *job;
643 struct dpaa_sec_op_ctx *ctx;
644 struct rte_crypto_op *op;
646 dq = qman_dequeue(fq);
651 /* sg is embedded in an op ctx,
652 * sg[0] is for output
655 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
657 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
658 ctx->fd_status = fd->status;
660 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
661 struct qm_sg_entry *sg_out;
663 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
664 op->sym->m_src : op->sym->m_dst;
666 sg_out = &job->sg[0];
667 hw_sg_to_cpu(sg_out);
668 len = sg_out->length;
670 while (mbuf->next != NULL) {
671 len -= mbuf->data_len;
674 mbuf->data_len = len;
676 if (!ctx->fd_status) {
677 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
679 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
680 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
684 /* report op status to sym->op and then free the ctx memeory */
685 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
687 qman_dqrr_consume(fq, dq);
688 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
693 static inline struct dpaa_sec_job *
694 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
696 struct rte_crypto_sym_op *sym = op->sym;
697 struct rte_mbuf *mbuf = sym->m_src;
698 struct dpaa_sec_job *cf;
699 struct dpaa_sec_op_ctx *ctx;
700 struct qm_sg_entry *sg, *out_sg, *in_sg;
701 phys_addr_t start_addr;
702 uint8_t *old_digest, extra_segs;
703 int data_len, data_offset;
705 data_len = sym->auth.data.length;
706 data_offset = sym->auth.data.offset;
708 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
709 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
710 if ((data_len & 7) || (data_offset & 7)) {
711 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
715 data_len = data_len >> 3;
716 data_offset = data_offset >> 3;
724 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
725 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
729 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
735 old_digest = ctx->digest;
739 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
740 out_sg->length = ses->digest_length;
741 cpu_to_hw_sg(out_sg);
745 /* need to extend the input to a compound frame */
746 in_sg->extension = 1;
748 in_sg->length = data_len;
749 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
754 if (ses->iv.length) {
757 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
760 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
761 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
763 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
764 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
767 sg->length = ses->iv.length;
769 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
770 in_sg->length += sg->length;
775 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
776 sg->offset = data_offset;
778 if (data_len <= (mbuf->data_len - data_offset)) {
779 sg->length = data_len;
781 sg->length = mbuf->data_len - data_offset;
783 /* remaining i/p segs */
784 while ((data_len = data_len - sg->length) &&
785 (mbuf = mbuf->next)) {
788 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
789 if (data_len > mbuf->data_len)
790 sg->length = mbuf->data_len;
792 sg->length = data_len;
796 if (is_decode(ses)) {
797 /* Digest verification case */
800 rte_memcpy(old_digest, sym->auth.digest.data,
802 start_addr = rte_dpaa_mem_vtop(old_digest);
803 qm_sg_entry_set64(sg, start_addr);
804 sg->length = ses->digest_length;
805 in_sg->length += ses->digest_length;
816 * |<----data_len------->|
817 * |ip_header|ah_header|icv|payload|
822 static inline struct dpaa_sec_job *
823 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
825 struct rte_crypto_sym_op *sym = op->sym;
826 struct rte_mbuf *mbuf = sym->m_src;
827 struct dpaa_sec_job *cf;
828 struct dpaa_sec_op_ctx *ctx;
829 struct qm_sg_entry *sg, *in_sg;
830 rte_iova_t start_addr;
832 int data_len, data_offset;
834 data_len = sym->auth.data.length;
835 data_offset = sym->auth.data.offset;
837 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
838 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
839 if ((data_len & 7) || (data_offset & 7)) {
840 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
844 data_len = data_len >> 3;
845 data_offset = data_offset >> 3;
848 ctx = dpaa_sec_alloc_ctx(ses, 4);
854 old_digest = ctx->digest;
856 start_addr = rte_pktmbuf_iova(mbuf);
859 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
860 sg->length = ses->digest_length;
865 /* need to extend the input to a compound frame */
866 in_sg->extension = 1;
868 in_sg->length = data_len;
869 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
872 if (ses->iv.length) {
875 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
878 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
879 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
881 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
882 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
885 sg->length = ses->iv.length;
887 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
888 in_sg->length += sg->length;
893 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
894 sg->offset = data_offset;
895 sg->length = data_len;
897 if (is_decode(ses)) {
898 /* Digest verification case */
900 /* hash result or digest, save digest first */
901 rte_memcpy(old_digest, sym->auth.digest.data,
903 /* let's check digest by hw */
904 start_addr = rte_dpaa_mem_vtop(old_digest);
906 qm_sg_entry_set64(sg, start_addr);
907 sg->length = ses->digest_length;
908 in_sg->length += ses->digest_length;
917 static inline struct dpaa_sec_job *
918 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
920 struct rte_crypto_sym_op *sym = op->sym;
921 struct dpaa_sec_job *cf;
922 struct dpaa_sec_op_ctx *ctx;
923 struct qm_sg_entry *sg, *out_sg, *in_sg;
924 struct rte_mbuf *mbuf;
926 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
928 int data_len, data_offset;
930 data_len = sym->cipher.data.length;
931 data_offset = sym->cipher.data.offset;
933 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
934 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
935 if ((data_len & 7) || (data_offset & 7)) {
936 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
940 data_len = data_len >> 3;
941 data_offset = data_offset >> 3;
946 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
949 req_segs = mbuf->nb_segs * 2 + 3;
951 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
952 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
957 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
966 out_sg->extension = 1;
967 out_sg->length = data_len;
968 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
969 cpu_to_hw_sg(out_sg);
973 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
974 sg->length = mbuf->data_len - data_offset;
975 sg->offset = data_offset;
977 /* Successive segs */
982 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
983 sg->length = mbuf->data_len;
992 in_sg->extension = 1;
994 in_sg->length = data_len + ses->iv.length;
997 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1001 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1002 sg->length = ses->iv.length;
1007 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1008 sg->length = mbuf->data_len - data_offset;
1009 sg->offset = data_offset;
1011 /* Successive segs */
1016 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1017 sg->length = mbuf->data_len;
1026 static inline struct dpaa_sec_job *
1027 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1029 struct rte_crypto_sym_op *sym = op->sym;
1030 struct dpaa_sec_job *cf;
1031 struct dpaa_sec_op_ctx *ctx;
1032 struct qm_sg_entry *sg;
1033 rte_iova_t src_start_addr, dst_start_addr;
1034 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1036 int data_len, data_offset;
1038 data_len = sym->cipher.data.length;
1039 data_offset = sym->cipher.data.offset;
1041 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1042 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1043 if ((data_len & 7) || (data_offset & 7)) {
1044 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1048 data_len = data_len >> 3;
1049 data_offset = data_offset >> 3;
1052 ctx = dpaa_sec_alloc_ctx(ses, 4);
1059 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1062 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1064 dst_start_addr = src_start_addr;
1068 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1069 sg->length = data_len + ses->iv.length;
1075 /* need to extend the input to a compound frame */
1078 sg->length = data_len + ses->iv.length;
1079 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1083 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1084 sg->length = ses->iv.length;
1088 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1089 sg->length = data_len;
1096 static inline struct dpaa_sec_job *
1097 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1099 struct rte_crypto_sym_op *sym = op->sym;
1100 struct dpaa_sec_job *cf;
1101 struct dpaa_sec_op_ctx *ctx;
1102 struct qm_sg_entry *sg, *out_sg, *in_sg;
1103 struct rte_mbuf *mbuf;
1105 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1113 req_segs = mbuf->nb_segs * 2 + 4;
1116 if (ses->auth_only_len)
1119 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1120 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1125 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1132 rte_prefetch0(cf->sg);
1135 out_sg = &cf->sg[0];
1136 out_sg->extension = 1;
1138 out_sg->length = sym->aead.data.length + ses->digest_length;
1140 out_sg->length = sym->aead.data.length;
1142 /* output sg entries */
1144 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1145 cpu_to_hw_sg(out_sg);
1148 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1149 sg->length = mbuf->data_len - sym->aead.data.offset;
1150 sg->offset = sym->aead.data.offset;
1152 /* Successive segs */
1157 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1158 sg->length = mbuf->data_len;
1161 sg->length -= ses->digest_length;
1163 if (is_encode(ses)) {
1165 /* set auth output */
1167 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1168 sg->length = ses->digest_length;
1176 in_sg->extension = 1;
1179 in_sg->length = ses->iv.length + sym->aead.data.length
1180 + ses->auth_only_len;
1182 in_sg->length = ses->iv.length + sym->aead.data.length
1183 + ses->auth_only_len + ses->digest_length;
1185 /* input sg entries */
1187 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1188 cpu_to_hw_sg(in_sg);
1191 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1192 sg->length = ses->iv.length;
1195 /* 2nd seg auth only */
1196 if (ses->auth_only_len) {
1198 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1199 sg->length = ses->auth_only_len;
1205 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1206 sg->length = mbuf->data_len - sym->aead.data.offset;
1207 sg->offset = sym->aead.data.offset;
1209 /* Successive segs */
1214 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1215 sg->length = mbuf->data_len;
1219 if (is_decode(ses)) {
1222 memcpy(ctx->digest, sym->aead.digest.data,
1223 ses->digest_length);
1224 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1225 sg->length = ses->digest_length;
1233 static inline struct dpaa_sec_job *
1234 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1236 struct rte_crypto_sym_op *sym = op->sym;
1237 struct dpaa_sec_job *cf;
1238 struct dpaa_sec_op_ctx *ctx;
1239 struct qm_sg_entry *sg;
1240 uint32_t length = 0;
1241 rte_iova_t src_start_addr, dst_start_addr;
1242 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1245 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1248 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1250 dst_start_addr = src_start_addr;
1252 ctx = dpaa_sec_alloc_ctx(ses, 7);
1260 rte_prefetch0(cf->sg);
1262 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1263 if (is_encode(ses)) {
1264 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1265 sg->length = ses->iv.length;
1266 length += sg->length;
1270 if (ses->auth_only_len) {
1271 qm_sg_entry_set64(sg,
1272 rte_dpaa_mem_vtop(sym->aead.aad.data));
1273 sg->length = ses->auth_only_len;
1274 length += sg->length;
1278 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1279 sg->length = sym->aead.data.length;
1280 length += sg->length;
1284 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1285 sg->length = ses->iv.length;
1286 length += sg->length;
1290 if (ses->auth_only_len) {
1291 qm_sg_entry_set64(sg,
1292 rte_dpaa_mem_vtop(sym->aead.aad.data));
1293 sg->length = ses->auth_only_len;
1294 length += sg->length;
1298 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1299 sg->length = sym->aead.data.length;
1300 length += sg->length;
1303 memcpy(ctx->digest, sym->aead.digest.data,
1304 ses->digest_length);
1307 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1308 sg->length = ses->digest_length;
1309 length += sg->length;
1313 /* input compound frame */
1314 cf->sg[1].length = length;
1315 cf->sg[1].extension = 1;
1316 cf->sg[1].final = 1;
1317 cpu_to_hw_sg(&cf->sg[1]);
1321 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1322 qm_sg_entry_set64(sg,
1323 dst_start_addr + sym->aead.data.offset);
1324 sg->length = sym->aead.data.length;
1325 length = sg->length;
1326 if (is_encode(ses)) {
1328 /* set auth output */
1330 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1331 sg->length = ses->digest_length;
1332 length += sg->length;
1337 /* output compound frame */
1338 cf->sg[0].length = length;
1339 cf->sg[0].extension = 1;
1340 cpu_to_hw_sg(&cf->sg[0]);
1345 static inline struct dpaa_sec_job *
1346 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1348 struct rte_crypto_sym_op *sym = op->sym;
1349 struct dpaa_sec_job *cf;
1350 struct dpaa_sec_op_ctx *ctx;
1351 struct qm_sg_entry *sg, *out_sg, *in_sg;
1352 struct rte_mbuf *mbuf;
1354 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1359 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1362 req_segs = mbuf->nb_segs * 2 + 4;
1365 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1366 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1371 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1378 rte_prefetch0(cf->sg);
1381 out_sg = &cf->sg[0];
1382 out_sg->extension = 1;
1384 out_sg->length = sym->auth.data.length + ses->digest_length;
1386 out_sg->length = sym->auth.data.length;
1388 /* output sg entries */
1390 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1391 cpu_to_hw_sg(out_sg);
1394 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1395 sg->length = mbuf->data_len - sym->auth.data.offset;
1396 sg->offset = sym->auth.data.offset;
1398 /* Successive segs */
1403 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1404 sg->length = mbuf->data_len;
1407 sg->length -= ses->digest_length;
1409 if (is_encode(ses)) {
1411 /* set auth output */
1413 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1414 sg->length = ses->digest_length;
1422 in_sg->extension = 1;
1425 in_sg->length = ses->iv.length + sym->auth.data.length;
1427 in_sg->length = ses->iv.length + sym->auth.data.length
1428 + ses->digest_length;
1430 /* input sg entries */
1432 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1433 cpu_to_hw_sg(in_sg);
1436 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1437 sg->length = ses->iv.length;
1442 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1443 sg->length = mbuf->data_len - sym->auth.data.offset;
1444 sg->offset = sym->auth.data.offset;
1446 /* Successive segs */
1451 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1452 sg->length = mbuf->data_len;
1456 sg->length -= ses->digest_length;
1457 if (is_decode(ses)) {
1460 memcpy(ctx->digest, sym->auth.digest.data,
1461 ses->digest_length);
1462 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1463 sg->length = ses->digest_length;
1471 static inline struct dpaa_sec_job *
1472 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1474 struct rte_crypto_sym_op *sym = op->sym;
1475 struct dpaa_sec_job *cf;
1476 struct dpaa_sec_op_ctx *ctx;
1477 struct qm_sg_entry *sg;
1478 rte_iova_t src_start_addr, dst_start_addr;
1479 uint32_t length = 0;
1480 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1483 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1485 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1487 dst_start_addr = src_start_addr;
1489 ctx = dpaa_sec_alloc_ctx(ses, 7);
1497 rte_prefetch0(cf->sg);
1499 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1500 if (is_encode(ses)) {
1501 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1502 sg->length = ses->iv.length;
1503 length += sg->length;
1507 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1508 sg->length = sym->auth.data.length;
1509 length += sg->length;
1513 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1514 sg->length = ses->iv.length;
1515 length += sg->length;
1520 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1521 sg->length = sym->auth.data.length;
1522 length += sg->length;
1525 memcpy(ctx->digest, sym->auth.digest.data,
1526 ses->digest_length);
1529 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1530 sg->length = ses->digest_length;
1531 length += sg->length;
1535 /* input compound frame */
1536 cf->sg[1].length = length;
1537 cf->sg[1].extension = 1;
1538 cf->sg[1].final = 1;
1539 cpu_to_hw_sg(&cf->sg[1]);
1543 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1544 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1545 sg->length = sym->cipher.data.length;
1546 length = sg->length;
1547 if (is_encode(ses)) {
1549 /* set auth output */
1551 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1552 sg->length = ses->digest_length;
1553 length += sg->length;
1558 /* output compound frame */
1559 cf->sg[0].length = length;
1560 cf->sg[0].extension = 1;
1561 cpu_to_hw_sg(&cf->sg[0]);
1566 #ifdef RTE_LIBRTE_SECURITY
1567 static inline struct dpaa_sec_job *
1568 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1570 struct rte_crypto_sym_op *sym = op->sym;
1571 struct dpaa_sec_job *cf;
1572 struct dpaa_sec_op_ctx *ctx;
1573 struct qm_sg_entry *sg;
1574 phys_addr_t src_start_addr, dst_start_addr;
1576 ctx = dpaa_sec_alloc_ctx(ses, 2);
1582 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1585 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1587 dst_start_addr = src_start_addr;
1591 qm_sg_entry_set64(sg, src_start_addr);
1592 sg->length = sym->m_src->pkt_len;
1596 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1599 qm_sg_entry_set64(sg, dst_start_addr);
1600 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1606 static inline struct dpaa_sec_job *
1607 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1609 struct rte_crypto_sym_op *sym = op->sym;
1610 struct dpaa_sec_job *cf;
1611 struct dpaa_sec_op_ctx *ctx;
1612 struct qm_sg_entry *sg, *out_sg, *in_sg;
1613 struct rte_mbuf *mbuf;
1615 uint32_t in_len = 0, out_len = 0;
1622 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1623 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1624 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1629 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1635 out_sg = &cf->sg[0];
1636 out_sg->extension = 1;
1637 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1641 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1644 /* Successive segs */
1645 while (mbuf->next) {
1646 sg->length = mbuf->data_len;
1647 out_len += sg->length;
1651 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1654 sg->length = mbuf->buf_len - mbuf->data_off;
1655 out_len += sg->length;
1659 out_sg->length = out_len;
1660 cpu_to_hw_sg(out_sg);
1665 in_sg->extension = 1;
1667 in_len = mbuf->data_len;
1670 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1673 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1674 sg->length = mbuf->data_len;
1677 /* Successive segs */
1682 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1683 sg->length = mbuf->data_len;
1685 in_len += sg->length;
1691 in_sg->length = in_len;
1692 cpu_to_hw_sg(in_sg);
1694 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1701 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1704 /* Function to transmit the frames to given device and queuepair */
1706 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1707 uint16_t num_tx = 0;
1708 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1709 uint32_t frames_to_send;
1710 struct rte_crypto_op *op;
1711 struct dpaa_sec_job *cf;
1712 dpaa_sec_session *ses;
1713 uint16_t auth_hdr_len, auth_tail_len;
1714 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1715 struct qman_fq *inq[DPAA_SEC_BURST];
1718 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1719 DPAA_SEC_BURST : nb_ops;
1720 for (loop = 0; loop < frames_to_send; loop++) {
1722 if (op->sym->m_src->seqn != 0) {
1723 index = op->sym->m_src->seqn - 1;
1724 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1725 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1726 flags[loop] = ((index & 0x0f) << 8);
1727 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1728 DPAA_PER_LCORE_DQRR_SIZE--;
1729 DPAA_PER_LCORE_DQRR_HELD &=
1734 switch (op->sess_type) {
1735 case RTE_CRYPTO_OP_WITH_SESSION:
1736 ses = (dpaa_sec_session *)
1737 get_sym_session_private_data(
1739 cryptodev_driver_id);
1741 #ifdef RTE_LIBRTE_SECURITY
1742 case RTE_CRYPTO_OP_SECURITY_SESSION:
1743 ses = (dpaa_sec_session *)
1744 get_sec_session_private_data(
1745 op->sym->sec_session);
1750 "sessionless crypto op not supported");
1751 frames_to_send = loop;
1757 DPAA_SEC_DP_ERR("session not available");
1758 frames_to_send = loop;
1763 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1764 if (dpaa_sec_attach_sess_q(qp, ses)) {
1765 frames_to_send = loop;
1769 } else if (unlikely(ses->qp[rte_lcore_id() %
1770 MAX_DPAA_CORES] != qp)) {
1771 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1773 ses->qp[rte_lcore_id() %
1774 MAX_DPAA_CORES], qp);
1775 frames_to_send = loop;
1780 auth_hdr_len = op->sym->auth.data.length -
1781 op->sym->cipher.data.length;
1784 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1785 ((op->sym->m_dst == NULL) ||
1786 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1787 switch (ses->ctxt) {
1788 #ifdef RTE_LIBRTE_SECURITY
1790 case DPAA_SEC_IPSEC:
1791 cf = build_proto(op, ses);
1795 cf = build_auth_only(op, ses);
1797 case DPAA_SEC_CIPHER:
1798 cf = build_cipher_only(op, ses);
1801 cf = build_cipher_auth_gcm(op, ses);
1802 auth_hdr_len = ses->auth_only_len;
1804 case DPAA_SEC_CIPHER_HASH:
1806 op->sym->cipher.data.offset
1807 - op->sym->auth.data.offset;
1809 op->sym->auth.data.length
1810 - op->sym->cipher.data.length
1812 cf = build_cipher_auth(op, ses);
1815 DPAA_SEC_DP_ERR("not supported ops");
1816 frames_to_send = loop;
1821 switch (ses->ctxt) {
1822 #ifdef RTE_LIBRTE_SECURITY
1824 case DPAA_SEC_IPSEC:
1825 cf = build_proto_sg(op, ses);
1829 cf = build_auth_only_sg(op, ses);
1831 case DPAA_SEC_CIPHER:
1832 cf = build_cipher_only_sg(op, ses);
1835 cf = build_cipher_auth_gcm_sg(op, ses);
1836 auth_hdr_len = ses->auth_only_len;
1838 case DPAA_SEC_CIPHER_HASH:
1840 op->sym->cipher.data.offset
1841 - op->sym->auth.data.offset;
1843 op->sym->auth.data.length
1844 - op->sym->cipher.data.length
1846 cf = build_cipher_auth_sg(op, ses);
1849 DPAA_SEC_DP_ERR("not supported ops");
1850 frames_to_send = loop;
1855 if (unlikely(!cf)) {
1856 frames_to_send = loop;
1862 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1863 fd->opaque_addr = 0;
1865 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1866 fd->_format1 = qm_fd_compound;
1867 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1869 /* Auth_only_len is set as 0 in descriptor and it is
1870 * overwritten here in the fd.cmd which will update
1873 if (auth_hdr_len || auth_tail_len) {
1874 fd->cmd = 0x80000000;
1876 ((auth_tail_len << 16) | auth_hdr_len);
1879 #ifdef RTE_LIBRTE_SECURITY
1880 /* In case of PDCP, per packet HFN is stored in
1881 * mbuf priv after sym_op.
1883 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1884 fd->cmd = 0x80000000 |
1885 *((uint32_t *)((uint8_t *)op +
1886 ses->pdcp.hfn_ovd_offset));
1887 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1888 *((uint32_t *)((uint8_t *)op +
1889 ses->pdcp.hfn_ovd_offset)),
1896 while (loop < frames_to_send) {
1897 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1898 &flags[loop], frames_to_send - loop);
1900 nb_ops -= frames_to_send;
1901 num_tx += frames_to_send;
1904 dpaa_qp->tx_pkts += num_tx;
1905 dpaa_qp->tx_errs += nb_ops - num_tx;
1911 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1915 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1917 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1919 dpaa_qp->rx_pkts += num_rx;
1920 dpaa_qp->rx_errs += nb_ops - num_rx;
1922 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1927 /** Release queue pair */
1929 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1932 struct dpaa_sec_dev_private *internals;
1933 struct dpaa_sec_qp *qp = NULL;
1935 PMD_INIT_FUNC_TRACE();
1937 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1939 internals = dev->data->dev_private;
1940 if (qp_id >= internals->max_nb_queue_pairs) {
1941 DPAA_SEC_ERR("Max supported qpid %d",
1942 internals->max_nb_queue_pairs);
1946 qp = &internals->qps[qp_id];
1947 rte_mempool_free(qp->ctx_pool);
1948 qp->internals = NULL;
1949 dev->data->queue_pairs[qp_id] = NULL;
1954 /** Setup a queue pair */
1956 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1957 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1958 __rte_unused int socket_id)
1960 struct dpaa_sec_dev_private *internals;
1961 struct dpaa_sec_qp *qp = NULL;
1964 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1966 internals = dev->data->dev_private;
1967 if (qp_id >= internals->max_nb_queue_pairs) {
1968 DPAA_SEC_ERR("Max supported qpid %d",
1969 internals->max_nb_queue_pairs);
1973 qp = &internals->qps[qp_id];
1974 qp->internals = internals;
1975 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1976 dev->data->dev_id, qp_id);
1977 if (!qp->ctx_pool) {
1978 qp->ctx_pool = rte_mempool_create((const char *)str,
1981 CTX_POOL_CACHE_SIZE, 0,
1982 NULL, NULL, NULL, NULL,
1984 if (!qp->ctx_pool) {
1985 DPAA_SEC_ERR("%s create failed\n", str);
1989 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1990 dev->data->dev_id, qp_id);
1991 dev->data->queue_pairs[qp_id] = qp;
1996 /** Returns the size of session structure */
1998 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2000 PMD_INIT_FUNC_TRACE();
2002 return sizeof(dpaa_sec_session);
2006 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2007 struct rte_crypto_sym_xform *xform,
2008 dpaa_sec_session *session)
2010 session->ctxt = DPAA_SEC_CIPHER;
2011 session->cipher_alg = xform->cipher.algo;
2012 session->iv.length = xform->cipher.iv.length;
2013 session->iv.offset = xform->cipher.iv.offset;
2014 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2015 RTE_CACHE_LINE_SIZE);
2016 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2017 DPAA_SEC_ERR("No Memory for cipher key");
2020 session->cipher_key.length = xform->cipher.key.length;
2022 memcpy(session->cipher_key.data, xform->cipher.key.data,
2023 xform->cipher.key.length);
2024 switch (xform->cipher.algo) {
2025 case RTE_CRYPTO_CIPHER_AES_CBC:
2026 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2027 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2029 case RTE_CRYPTO_CIPHER_3DES_CBC:
2030 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2031 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2033 case RTE_CRYPTO_CIPHER_AES_CTR:
2034 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2035 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2037 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2038 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2040 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2041 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2044 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2045 xform->cipher.algo);
2048 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2055 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2056 struct rte_crypto_sym_xform *xform,
2057 dpaa_sec_session *session)
2059 session->ctxt = DPAA_SEC_AUTH;
2060 session->auth_alg = xform->auth.algo;
2061 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2062 RTE_CACHE_LINE_SIZE);
2063 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2064 DPAA_SEC_ERR("No Memory for auth key");
2067 session->auth_key.length = xform->auth.key.length;
2068 session->digest_length = xform->auth.digest_length;
2069 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2070 session->iv.offset = xform->auth.iv.offset;
2071 session->iv.length = xform->auth.iv.length;
2074 memcpy(session->auth_key.data, xform->auth.key.data,
2075 xform->auth.key.length);
2077 switch (xform->auth.algo) {
2078 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2079 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2080 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2082 case RTE_CRYPTO_AUTH_MD5_HMAC:
2083 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2084 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2086 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2087 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2088 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2090 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2091 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2092 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2094 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2095 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2096 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2098 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2099 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2100 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2102 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2103 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2104 session->auth_key.algmode = OP_ALG_AAI_F9;
2106 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2107 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2108 session->auth_key.algmode = OP_ALG_AAI_F9;
2111 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2116 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2123 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2124 struct rte_crypto_sym_xform *xform,
2125 dpaa_sec_session *session)
2128 struct rte_crypto_cipher_xform *cipher_xform;
2129 struct rte_crypto_auth_xform *auth_xform;
2131 session->ctxt = DPAA_SEC_CIPHER_HASH;
2132 if (session->auth_cipher_text) {
2133 cipher_xform = &xform->cipher;
2134 auth_xform = &xform->next->auth;
2136 cipher_xform = &xform->next->cipher;
2137 auth_xform = &xform->auth;
2140 /* Set IV parameters */
2141 session->iv.offset = cipher_xform->iv.offset;
2142 session->iv.length = cipher_xform->iv.length;
2144 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2145 RTE_CACHE_LINE_SIZE);
2146 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2147 DPAA_SEC_ERR("No Memory for cipher key");
2150 session->cipher_key.length = cipher_xform->key.length;
2151 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2152 RTE_CACHE_LINE_SIZE);
2153 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2154 DPAA_SEC_ERR("No Memory for auth key");
2157 session->auth_key.length = auth_xform->key.length;
2158 memcpy(session->cipher_key.data, cipher_xform->key.data,
2159 cipher_xform->key.length);
2160 memcpy(session->auth_key.data, auth_xform->key.data,
2161 auth_xform->key.length);
2163 session->digest_length = auth_xform->digest_length;
2164 session->auth_alg = auth_xform->algo;
2166 switch (auth_xform->algo) {
2167 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2168 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2169 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2171 case RTE_CRYPTO_AUTH_MD5_HMAC:
2172 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2173 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2175 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2176 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2177 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2179 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2180 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2181 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2183 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2184 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2185 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2187 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2188 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2189 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2192 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2197 session->cipher_alg = cipher_xform->algo;
2199 switch (cipher_xform->algo) {
2200 case RTE_CRYPTO_CIPHER_AES_CBC:
2201 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2202 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2204 case RTE_CRYPTO_CIPHER_3DES_CBC:
2205 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2206 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2208 case RTE_CRYPTO_CIPHER_AES_CTR:
2209 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2210 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2213 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2214 cipher_xform->algo);
2217 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2223 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2224 struct rte_crypto_sym_xform *xform,
2225 dpaa_sec_session *session)
2227 session->aead_alg = xform->aead.algo;
2228 session->ctxt = DPAA_SEC_AEAD;
2229 session->iv.length = xform->aead.iv.length;
2230 session->iv.offset = xform->aead.iv.offset;
2231 session->auth_only_len = xform->aead.aad_length;
2232 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2233 RTE_CACHE_LINE_SIZE);
2234 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2235 DPAA_SEC_ERR("No Memory for aead key\n");
2238 session->aead_key.length = xform->aead.key.length;
2239 session->digest_length = xform->aead.digest_length;
2241 memcpy(session->aead_key.data, xform->aead.key.data,
2242 xform->aead.key.length);
2244 switch (session->aead_alg) {
2245 case RTE_CRYPTO_AEAD_AES_GCM:
2246 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2247 session->aead_key.algmode = OP_ALG_AAI_GCM;
2250 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2254 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2260 static struct qman_fq *
2261 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2265 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2266 if (qi->inq_attach[i] == 0) {
2267 qi->inq_attach[i] = 1;
2271 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2277 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2281 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2282 if (&qi->inq[i] == fq) {
2283 if (qman_retire_fq(fq, NULL) != 0)
2284 DPAA_SEC_WARN("Queue is not retired\n");
2286 qi->inq_attach[i] = 0;
2294 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2298 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2299 ret = dpaa_sec_prep_cdb(sess);
2301 DPAA_SEC_ERR("Unable to prepare sec cdb");
2304 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2305 ret = rte_dpaa_portal_init((void *)0);
2307 DPAA_SEC_ERR("Failure in affining portal");
2311 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2312 rte_dpaa_mem_vtop(&sess->cdb),
2313 qman_fq_fqid(&qp->outq));
2315 DPAA_SEC_ERR("Unable to init sec queue");
2321 free_session_data(dpaa_sec_session *s)
2324 rte_free(s->aead_key.data);
2326 rte_free(s->auth_key.data);
2327 rte_free(s->cipher_key.data);
2329 memset(s, 0, sizeof(dpaa_sec_session));
2333 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2334 struct rte_crypto_sym_xform *xform, void *sess)
2336 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2337 dpaa_sec_session *session = sess;
2341 PMD_INIT_FUNC_TRACE();
2343 if (unlikely(sess == NULL)) {
2344 DPAA_SEC_ERR("invalid session struct");
2347 memset(session, 0, sizeof(dpaa_sec_session));
2349 /* Default IV length = 0 */
2350 session->iv.length = 0;
2353 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2354 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2355 ret = dpaa_sec_cipher_init(dev, xform, session);
2357 /* Authentication Only */
2358 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2359 xform->next == NULL) {
2360 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2361 session->ctxt = DPAA_SEC_AUTH;
2362 ret = dpaa_sec_auth_init(dev, xform, session);
2364 /* Cipher then Authenticate */
2365 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2366 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2367 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2368 session->auth_cipher_text = 1;
2369 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2370 ret = dpaa_sec_auth_init(dev, xform, session);
2371 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2372 ret = dpaa_sec_cipher_init(dev, xform, session);
2374 ret = dpaa_sec_chain_init(dev, xform, session);
2376 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2379 /* Authenticate then Cipher */
2380 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2381 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2382 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2383 session->auth_cipher_text = 0;
2384 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2385 ret = dpaa_sec_cipher_init(dev, xform, session);
2386 else if (xform->next->cipher.algo
2387 == RTE_CRYPTO_CIPHER_NULL)
2388 ret = dpaa_sec_auth_init(dev, xform, session);
2390 ret = dpaa_sec_chain_init(dev, xform, session);
2392 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2396 /* AEAD operation for AES-GCM kind of Algorithms */
2397 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2398 xform->next == NULL) {
2399 ret = dpaa_sec_aead_init(dev, xform, session);
2402 DPAA_SEC_ERR("Invalid crypto type");
2406 DPAA_SEC_ERR("unable to init session");
2410 rte_spinlock_lock(&internals->lock);
2411 for (i = 0; i < MAX_DPAA_CORES; i++) {
2412 session->inq[i] = dpaa_sec_attach_rxq(internals);
2413 if (session->inq[i] == NULL) {
2414 DPAA_SEC_ERR("unable to attach sec queue");
2415 rte_spinlock_unlock(&internals->lock);
2420 rte_spinlock_unlock(&internals->lock);
2425 free_session_data(session);
2430 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2431 struct rte_crypto_sym_xform *xform,
2432 struct rte_cryptodev_sym_session *sess,
2433 struct rte_mempool *mempool)
2435 void *sess_private_data;
2438 PMD_INIT_FUNC_TRACE();
2440 if (rte_mempool_get(mempool, &sess_private_data)) {
2441 DPAA_SEC_ERR("Couldn't get object from session mempool");
2445 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2447 DPAA_SEC_ERR("failed to configure session parameters");
2449 /* Return session to mempool */
2450 rte_mempool_put(mempool, sess_private_data);
2454 set_sym_session_private_data(sess, dev->driver_id,
2462 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2464 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2465 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2468 for (i = 0; i < MAX_DPAA_CORES; i++) {
2470 dpaa_sec_detach_rxq(qi, s->inq[i]);
2474 free_session_data(s);
2475 rte_mempool_put(sess_mp, (void *)s);
2478 /** Clear the memory of session so it doesn't leave key material behind */
2480 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2481 struct rte_cryptodev_sym_session *sess)
2483 PMD_INIT_FUNC_TRACE();
2484 uint8_t index = dev->driver_id;
2485 void *sess_priv = get_sym_session_private_data(sess, index);
2486 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2489 free_session_memory(dev, s);
2490 set_sym_session_private_data(sess, index, NULL);
2494 #ifdef RTE_LIBRTE_SECURITY
2496 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2497 struct rte_security_ipsec_xform *ipsec_xform,
2498 dpaa_sec_session *session)
2500 PMD_INIT_FUNC_TRACE();
2502 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2503 RTE_CACHE_LINE_SIZE);
2504 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2505 DPAA_SEC_ERR("No Memory for aead key");
2508 memcpy(session->aead_key.data, aead_xform->key.data,
2509 aead_xform->key.length);
2511 session->digest_length = aead_xform->digest_length;
2512 session->aead_key.length = aead_xform->key.length;
2514 switch (aead_xform->algo) {
2515 case RTE_CRYPTO_AEAD_AES_GCM:
2516 switch (session->digest_length) {
2518 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2521 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2524 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2527 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2528 session->digest_length);
2531 if (session->dir == DIR_ENC) {
2532 memcpy(session->encap_pdb.gcm.salt,
2533 (uint8_t *)&(ipsec_xform->salt), 4);
2535 memcpy(session->decap_pdb.gcm.salt,
2536 (uint8_t *)&(ipsec_xform->salt), 4);
2538 session->aead_key.algmode = OP_ALG_AAI_GCM;
2539 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2542 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2550 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2551 struct rte_crypto_auth_xform *auth_xform,
2552 struct rte_security_ipsec_xform *ipsec_xform,
2553 dpaa_sec_session *session)
2556 session->cipher_key.data = rte_zmalloc(NULL,
2557 cipher_xform->key.length,
2558 RTE_CACHE_LINE_SIZE);
2559 if (session->cipher_key.data == NULL &&
2560 cipher_xform->key.length > 0) {
2561 DPAA_SEC_ERR("No Memory for cipher key");
2565 session->cipher_key.length = cipher_xform->key.length;
2566 memcpy(session->cipher_key.data, cipher_xform->key.data,
2567 cipher_xform->key.length);
2568 session->cipher_alg = cipher_xform->algo;
2570 session->cipher_key.data = NULL;
2571 session->cipher_key.length = 0;
2572 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2576 session->auth_key.data = rte_zmalloc(NULL,
2577 auth_xform->key.length,
2578 RTE_CACHE_LINE_SIZE);
2579 if (session->auth_key.data == NULL &&
2580 auth_xform->key.length > 0) {
2581 DPAA_SEC_ERR("No Memory for auth key");
2584 session->auth_key.length = auth_xform->key.length;
2585 memcpy(session->auth_key.data, auth_xform->key.data,
2586 auth_xform->key.length);
2587 session->auth_alg = auth_xform->algo;
2588 session->digest_length = auth_xform->digest_length;
2590 session->auth_key.data = NULL;
2591 session->auth_key.length = 0;
2592 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2595 switch (session->auth_alg) {
2596 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2597 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2598 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2600 case RTE_CRYPTO_AUTH_MD5_HMAC:
2601 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2602 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2604 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2605 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2606 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2607 if (session->digest_length != 16)
2609 "+++Using sha256-hmac truncated len is non-standard,"
2610 "it will not work with lookaside proto");
2612 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2613 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2614 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2616 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2617 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2618 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2620 case RTE_CRYPTO_AUTH_AES_CMAC:
2621 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2623 case RTE_CRYPTO_AUTH_NULL:
2624 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2626 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2627 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2628 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2629 case RTE_CRYPTO_AUTH_SHA1:
2630 case RTE_CRYPTO_AUTH_SHA256:
2631 case RTE_CRYPTO_AUTH_SHA512:
2632 case RTE_CRYPTO_AUTH_SHA224:
2633 case RTE_CRYPTO_AUTH_SHA384:
2634 case RTE_CRYPTO_AUTH_MD5:
2635 case RTE_CRYPTO_AUTH_AES_GMAC:
2636 case RTE_CRYPTO_AUTH_KASUMI_F9:
2637 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2638 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2639 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2643 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2648 switch (session->cipher_alg) {
2649 case RTE_CRYPTO_CIPHER_AES_CBC:
2650 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2651 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2653 case RTE_CRYPTO_CIPHER_3DES_CBC:
2654 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2655 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2657 case RTE_CRYPTO_CIPHER_AES_CTR:
2658 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2659 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2660 if (session->dir == DIR_ENC) {
2661 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2662 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2664 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2665 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2668 case RTE_CRYPTO_CIPHER_NULL:
2669 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2671 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2672 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2673 case RTE_CRYPTO_CIPHER_3DES_ECB:
2674 case RTE_CRYPTO_CIPHER_AES_ECB:
2675 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2676 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2677 session->cipher_alg);
2680 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2681 session->cipher_alg);
2689 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2690 struct rte_security_session_conf *conf,
2693 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2694 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2695 struct rte_crypto_auth_xform *auth_xform = NULL;
2696 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2697 struct rte_crypto_aead_xform *aead_xform = NULL;
2698 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2702 PMD_INIT_FUNC_TRACE();
2704 memset(session, 0, sizeof(dpaa_sec_session));
2705 session->proto_alg = conf->protocol;
2706 session->ctxt = DPAA_SEC_IPSEC;
2708 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2709 session->dir = DIR_ENC;
2711 session->dir = DIR_DEC;
2713 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2714 cipher_xform = &conf->crypto_xform->cipher;
2715 if (conf->crypto_xform->next)
2716 auth_xform = &conf->crypto_xform->next->auth;
2717 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2718 ipsec_xform, session);
2719 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2720 auth_xform = &conf->crypto_xform->auth;
2721 if (conf->crypto_xform->next)
2722 cipher_xform = &conf->crypto_xform->next->cipher;
2723 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2724 ipsec_xform, session);
2725 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2726 aead_xform = &conf->crypto_xform->aead;
2727 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2728 ipsec_xform, session);
2730 DPAA_SEC_ERR("XFORM not specified");
2735 DPAA_SEC_ERR("Failed to process xform");
2739 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2740 if (ipsec_xform->tunnel.type ==
2741 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2742 session->ip4_hdr.ip_v = IPVERSION;
2743 session->ip4_hdr.ip_hl = 5;
2744 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2745 sizeof(session->ip4_hdr));
2746 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2747 session->ip4_hdr.ip_id = 0;
2748 session->ip4_hdr.ip_off = 0;
2749 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2750 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2751 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2752 IPPROTO_ESP : IPPROTO_AH;
2753 session->ip4_hdr.ip_sum = 0;
2754 session->ip4_hdr.ip_src =
2755 ipsec_xform->tunnel.ipv4.src_ip;
2756 session->ip4_hdr.ip_dst =
2757 ipsec_xform->tunnel.ipv4.dst_ip;
2758 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2759 (void *)&session->ip4_hdr,
2761 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2762 } else if (ipsec_xform->tunnel.type ==
2763 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2764 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2765 DPAA_IPv6_DEFAULT_VTC_FLOW |
2766 ((ipsec_xform->tunnel.ipv6.dscp <<
2767 RTE_IPV6_HDR_TC_SHIFT) &
2768 RTE_IPV6_HDR_TC_MASK) |
2769 ((ipsec_xform->tunnel.ipv6.flabel <<
2770 RTE_IPV6_HDR_FL_SHIFT) &
2771 RTE_IPV6_HDR_FL_MASK));
2772 /* Payload length will be updated by HW */
2773 session->ip6_hdr.payload_len = 0;
2774 session->ip6_hdr.hop_limits =
2775 ipsec_xform->tunnel.ipv6.hlimit;
2776 session->ip6_hdr.proto = (ipsec_xform->proto ==
2777 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2778 IPPROTO_ESP : IPPROTO_AH;
2779 memcpy(&session->ip6_hdr.src_addr,
2780 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2781 memcpy(&session->ip6_hdr.dst_addr,
2782 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2783 session->encap_pdb.ip_hdr_len =
2784 sizeof(struct rte_ipv6_hdr);
2786 session->encap_pdb.options =
2787 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2788 PDBOPTS_ESP_OIHI_PDB_INL |
2790 PDBHMO_ESP_ENCAP_DTTL |
2792 if (ipsec_xform->options.esn)
2793 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2794 session->encap_pdb.spi = ipsec_xform->spi;
2796 } else if (ipsec_xform->direction ==
2797 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2798 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2799 session->decap_pdb.options = sizeof(struct ip) << 16;
2801 session->decap_pdb.options =
2802 sizeof(struct rte_ipv6_hdr) << 16;
2803 if (ipsec_xform->options.esn)
2804 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2805 if (ipsec_xform->replay_win_sz) {
2807 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2816 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2819 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2822 session->decap_pdb.options |=
2828 rte_spinlock_lock(&internals->lock);
2829 for (i = 0; i < MAX_DPAA_CORES; i++) {
2830 session->inq[i] = dpaa_sec_attach_rxq(internals);
2831 if (session->inq[i] == NULL) {
2832 DPAA_SEC_ERR("unable to attach sec queue");
2833 rte_spinlock_unlock(&internals->lock);
2837 rte_spinlock_unlock(&internals->lock);
2841 free_session_data(session);
2846 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2847 struct rte_security_session_conf *conf,
2850 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2851 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2852 struct rte_crypto_auth_xform *auth_xform = NULL;
2853 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2854 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2855 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2859 PMD_INIT_FUNC_TRACE();
2861 memset(session, 0, sizeof(dpaa_sec_session));
2863 /* find xfrm types */
2864 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2865 cipher_xform = &xform->cipher;
2866 if (xform->next != NULL)
2867 auth_xform = &xform->next->auth;
2868 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2869 auth_xform = &xform->auth;
2870 if (xform->next != NULL)
2871 cipher_xform = &xform->next->cipher;
2873 DPAA_SEC_ERR("Invalid crypto type");
2877 session->proto_alg = conf->protocol;
2878 session->ctxt = DPAA_SEC_PDCP;
2881 switch (cipher_xform->algo) {
2882 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2883 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2885 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2886 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2888 case RTE_CRYPTO_CIPHER_AES_CTR:
2889 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2891 case RTE_CRYPTO_CIPHER_NULL:
2892 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2895 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2896 session->cipher_alg);
2900 session->cipher_key.data = rte_zmalloc(NULL,
2901 cipher_xform->key.length,
2902 RTE_CACHE_LINE_SIZE);
2903 if (session->cipher_key.data == NULL &&
2904 cipher_xform->key.length > 0) {
2905 DPAA_SEC_ERR("No Memory for cipher key");
2908 session->cipher_key.length = cipher_xform->key.length;
2909 memcpy(session->cipher_key.data, cipher_xform->key.data,
2910 cipher_xform->key.length);
2911 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2913 session->cipher_alg = cipher_xform->algo;
2915 session->cipher_key.data = NULL;
2916 session->cipher_key.length = 0;
2917 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2918 session->dir = DIR_ENC;
2921 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2922 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2923 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2925 "PDCP Seq Num size should be 5/12 bits for cmode");
2932 switch (auth_xform->algo) {
2933 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2934 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2936 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2937 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2939 case RTE_CRYPTO_AUTH_AES_CMAC:
2940 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2942 case RTE_CRYPTO_AUTH_NULL:
2943 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2946 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2948 rte_free(session->cipher_key.data);
2951 session->auth_key.data = rte_zmalloc(NULL,
2952 auth_xform->key.length,
2953 RTE_CACHE_LINE_SIZE);
2954 if (!session->auth_key.data &&
2955 auth_xform->key.length > 0) {
2956 DPAA_SEC_ERR("No Memory for auth key");
2957 rte_free(session->cipher_key.data);
2960 session->auth_key.length = auth_xform->key.length;
2961 memcpy(session->auth_key.data, auth_xform->key.data,
2962 auth_xform->key.length);
2963 session->auth_alg = auth_xform->algo;
2965 session->auth_key.data = NULL;
2966 session->auth_key.length = 0;
2967 session->auth_alg = 0;
2969 session->pdcp.domain = pdcp_xform->domain;
2970 session->pdcp.bearer = pdcp_xform->bearer;
2971 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2972 session->pdcp.sn_size = pdcp_xform->sn_size;
2973 session->pdcp.hfn = pdcp_xform->hfn;
2974 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2975 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2976 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2978 rte_spinlock_lock(&dev_priv->lock);
2979 for (i = 0; i < MAX_DPAA_CORES; i++) {
2980 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2981 if (session->inq[i] == NULL) {
2982 DPAA_SEC_ERR("unable to attach sec queue");
2983 rte_spinlock_unlock(&dev_priv->lock);
2988 rte_spinlock_unlock(&dev_priv->lock);
2991 rte_free(session->auth_key.data);
2992 rte_free(session->cipher_key.data);
2993 memset(session, 0, sizeof(dpaa_sec_session));
2998 dpaa_sec_security_session_create(void *dev,
2999 struct rte_security_session_conf *conf,
3000 struct rte_security_session *sess,
3001 struct rte_mempool *mempool)
3003 void *sess_private_data;
3004 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3007 if (rte_mempool_get(mempool, &sess_private_data)) {
3008 DPAA_SEC_ERR("Couldn't get object from session mempool");
3012 switch (conf->protocol) {
3013 case RTE_SECURITY_PROTOCOL_IPSEC:
3014 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3017 case RTE_SECURITY_PROTOCOL_PDCP:
3018 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3021 case RTE_SECURITY_PROTOCOL_MACSEC:
3027 DPAA_SEC_ERR("failed to configure session parameters");
3028 /* Return session to mempool */
3029 rte_mempool_put(mempool, sess_private_data);
3033 set_sec_session_private_data(sess, sess_private_data);
3038 /** Clear the memory of session so it doesn't leave key material behind */
3040 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3041 struct rte_security_session *sess)
3043 PMD_INIT_FUNC_TRACE();
3044 void *sess_priv = get_sec_session_private_data(sess);
3045 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3048 free_session_memory((struct rte_cryptodev *)dev, s);
3049 set_sec_session_private_data(sess, NULL);
3055 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3056 struct rte_cryptodev_config *config __rte_unused)
3058 PMD_INIT_FUNC_TRACE();
3064 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3066 PMD_INIT_FUNC_TRACE();
3071 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3073 PMD_INIT_FUNC_TRACE();
3077 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3079 PMD_INIT_FUNC_TRACE();
3088 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3089 struct rte_cryptodev_info *info)
3091 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3093 PMD_INIT_FUNC_TRACE();
3095 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3096 info->feature_flags = dev->feature_flags;
3097 info->capabilities = dpaa_sec_capabilities;
3098 info->sym.max_nb_sessions = internals->max_nb_sessions;
3099 info->driver_id = cryptodev_driver_id;
3103 static enum qman_cb_dqrr_result
3104 dpaa_sec_process_parallel_event(void *event,
3105 struct qman_portal *qm __always_unused,
3106 struct qman_fq *outq,
3107 const struct qm_dqrr_entry *dqrr,
3110 const struct qm_fd *fd;
3111 struct dpaa_sec_job *job;
3112 struct dpaa_sec_op_ctx *ctx;
3113 struct rte_event *ev = (struct rte_event *)event;
3117 /* sg is embedded in an op ctx,
3118 * sg[0] is for output
3121 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3123 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3124 ctx->fd_status = fd->status;
3125 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3126 struct qm_sg_entry *sg_out;
3129 sg_out = &job->sg[0];
3130 hw_sg_to_cpu(sg_out);
3131 len = sg_out->length;
3132 ctx->op->sym->m_src->pkt_len = len;
3133 ctx->op->sym->m_src->data_len = len;
3135 if (!ctx->fd_status) {
3136 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3138 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3139 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3141 ev->event_ptr = (void *)ctx->op;
3143 ev->flow_id = outq->ev.flow_id;
3144 ev->sub_event_type = outq->ev.sub_event_type;
3145 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3146 ev->op = RTE_EVENT_OP_NEW;
3147 ev->sched_type = outq->ev.sched_type;
3148 ev->queue_id = outq->ev.queue_id;
3149 ev->priority = outq->ev.priority;
3150 *bufs = (void *)ctx->op;
3152 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3154 return qman_cb_dqrr_consume;
3157 static enum qman_cb_dqrr_result
3158 dpaa_sec_process_atomic_event(void *event,
3159 struct qman_portal *qm __rte_unused,
3160 struct qman_fq *outq,
3161 const struct qm_dqrr_entry *dqrr,
3165 const struct qm_fd *fd;
3166 struct dpaa_sec_job *job;
3167 struct dpaa_sec_op_ctx *ctx;
3168 struct rte_event *ev = (struct rte_event *)event;
3172 /* sg is embedded in an op ctx,
3173 * sg[0] is for output
3176 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3178 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3179 ctx->fd_status = fd->status;
3180 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3181 struct qm_sg_entry *sg_out;
3184 sg_out = &job->sg[0];
3185 hw_sg_to_cpu(sg_out);
3186 len = sg_out->length;
3187 ctx->op->sym->m_src->pkt_len = len;
3188 ctx->op->sym->m_src->data_len = len;
3190 if (!ctx->fd_status) {
3191 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3193 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3194 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3196 ev->event_ptr = (void *)ctx->op;
3197 ev->flow_id = outq->ev.flow_id;
3198 ev->sub_event_type = outq->ev.sub_event_type;
3199 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3200 ev->op = RTE_EVENT_OP_NEW;
3201 ev->sched_type = outq->ev.sched_type;
3202 ev->queue_id = outq->ev.queue_id;
3203 ev->priority = outq->ev.priority;
3205 /* Save active dqrr entries */
3206 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3207 DPAA_PER_LCORE_DQRR_SIZE++;
3208 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3209 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3210 ev->impl_opaque = index + 1;
3211 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3212 *bufs = (void *)ctx->op;
3214 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3216 return qman_cb_dqrr_defer;
3220 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3223 const struct rte_event *event)
3225 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3226 struct qm_mcc_initfq opts = {0};
3230 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3231 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3232 opts.fqd.dest.channel = ch_id;
3234 switch (event->sched_type) {
3235 case RTE_SCHED_TYPE_ATOMIC:
3236 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3237 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3238 * configuration with HOLD_ACTIVE setting
3240 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3241 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3243 case RTE_SCHED_TYPE_ORDERED:
3244 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3247 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3248 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3252 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3253 if (unlikely(ret)) {
3254 DPAA_SEC_ERR("unable to init caam source fq!");
3258 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3264 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3267 struct qm_mcc_initfq opts = {0};
3269 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3271 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3272 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3273 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3274 qp->outq.cb.ern = ern_sec_fq_handler;
3275 qman_retire_fq(&qp->outq, NULL);
3276 qman_oos_fq(&qp->outq);
3277 ret = qman_init_fq(&qp->outq, 0, &opts);
3279 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3280 qp->outq.cb.dqrr = NULL;
3285 static struct rte_cryptodev_ops crypto_ops = {
3286 .dev_configure = dpaa_sec_dev_configure,
3287 .dev_start = dpaa_sec_dev_start,
3288 .dev_stop = dpaa_sec_dev_stop,
3289 .dev_close = dpaa_sec_dev_close,
3290 .dev_infos_get = dpaa_sec_dev_infos_get,
3291 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3292 .queue_pair_release = dpaa_sec_queue_pair_release,
3293 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3294 .sym_session_configure = dpaa_sec_sym_session_configure,
3295 .sym_session_clear = dpaa_sec_sym_session_clear
3298 #ifdef RTE_LIBRTE_SECURITY
3299 static const struct rte_security_capability *
3300 dpaa_sec_capabilities_get(void *device __rte_unused)
3302 return dpaa_sec_security_cap;
3305 static const struct rte_security_ops dpaa_sec_security_ops = {
3306 .session_create = dpaa_sec_security_session_create,
3307 .session_update = NULL,
3308 .session_stats_get = NULL,
3309 .session_destroy = dpaa_sec_security_session_destroy,
3310 .set_pkt_metadata = NULL,
3311 .capabilities_get = dpaa_sec_capabilities_get
3315 dpaa_sec_uninit(struct rte_cryptodev *dev)
3317 struct dpaa_sec_dev_private *internals;
3322 internals = dev->data->dev_private;
3323 rte_free(dev->security_ctx);
3325 rte_free(internals);
3327 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3328 dev->data->name, rte_socket_id());
3334 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3336 struct dpaa_sec_dev_private *internals;
3337 #ifdef RTE_LIBRTE_SECURITY
3338 struct rte_security_ctx *security_instance;
3340 struct dpaa_sec_qp *qp;
3344 PMD_INIT_FUNC_TRACE();
3346 cryptodev->driver_id = cryptodev_driver_id;
3347 cryptodev->dev_ops = &crypto_ops;
3349 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3350 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3351 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3352 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3353 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3354 RTE_CRYPTODEV_FF_SECURITY |
3355 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3356 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3357 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3358 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3359 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3361 internals = cryptodev->data->dev_private;
3362 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3363 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3366 * For secondary processes, we don't initialise any further as primary
3367 * has already done this work. Only check we don't need a different
3370 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3371 DPAA_SEC_WARN("Device already init by primary process");
3374 #ifdef RTE_LIBRTE_SECURITY
3375 /* Initialize security_ctx only for primary process*/
3376 security_instance = rte_malloc("rte_security_instances_ops",
3377 sizeof(struct rte_security_ctx), 0);
3378 if (security_instance == NULL)
3380 security_instance->device = (void *)cryptodev;
3381 security_instance->ops = &dpaa_sec_security_ops;
3382 security_instance->sess_cnt = 0;
3383 cryptodev->security_ctx = security_instance;
3385 rte_spinlock_init(&internals->lock);
3386 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3387 /* init qman fq for queue pair */
3388 qp = &internals->qps[i];
3389 ret = dpaa_sec_init_tx(&qp->outq);
3391 DPAA_SEC_ERR("config tx of queue pair %d", i);
3396 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3397 QMAN_FQ_FLAG_TO_DCPORTAL;
3398 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3399 /* create rx qman fq for sessions*/
3400 ret = qman_create_fq(0, flags, &internals->inq[i]);
3401 if (unlikely(ret != 0)) {
3402 DPAA_SEC_ERR("sec qman_create_fq failed");
3407 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3411 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3413 rte_free(cryptodev->security_ctx);
3418 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3419 struct rte_dpaa_device *dpaa_dev)
3421 struct rte_cryptodev *cryptodev;
3422 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3426 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3428 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3429 if (cryptodev == NULL)
3432 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3433 cryptodev->data->dev_private = rte_zmalloc_socket(
3434 "cryptodev private structure",
3435 sizeof(struct dpaa_sec_dev_private),
3436 RTE_CACHE_LINE_SIZE,
3439 if (cryptodev->data->dev_private == NULL)
3440 rte_panic("Cannot allocate memzone for private "
3444 dpaa_dev->crypto_dev = cryptodev;
3445 cryptodev->device = &dpaa_dev->device;
3447 /* init user callbacks */
3448 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3450 /* if sec device version is not configured */
3451 if (!rta_get_sec_era()) {
3452 const struct device_node *caam_node;
3454 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3455 const uint32_t *prop = of_get_property(caam_node,
3460 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3466 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3467 retval = rte_dpaa_portal_init((void *)1);
3469 DPAA_SEC_ERR("Unable to initialize portal");
3474 /* Invoke PMD device initialization function */
3475 retval = dpaa_sec_dev_init(cryptodev);
3481 /* In case of error, cleanup is done */
3482 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3483 rte_free(cryptodev->data->dev_private);
3485 rte_cryptodev_pmd_release_device(cryptodev);
3491 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3493 struct rte_cryptodev *cryptodev;
3496 cryptodev = dpaa_dev->crypto_dev;
3497 if (cryptodev == NULL)
3500 ret = dpaa_sec_uninit(cryptodev);
3504 return rte_cryptodev_pmd_destroy(cryptodev);
3507 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3508 .drv_type = FSL_DPAA_CRYPTO,
3510 .name = "DPAA SEC PMD"
3512 .probe = cryptodev_dpaa_sec_probe,
3513 .remove = cryptodev_dpaa_sec_remove,
3516 static struct cryptodev_driver dpaa_sec_crypto_drv;
3518 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3519 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3520 cryptodev_driver_id);
3521 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);