1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 static uint8_t cryptodev_driver_id;
49 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
54 if (!ctx->fd_status) {
55 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
57 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
58 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62 static inline struct dpaa_sec_op_ctx *
63 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
65 struct dpaa_sec_op_ctx *ctx;
68 retval = rte_mempool_get(
69 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
72 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
82 dcbz_64(&ctx->job.sg[i]);
84 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
85 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
91 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
93 const struct qm_mr_entry *msg)
95 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
96 fq->fqid, msg->ern.rc, msg->ern.seqnum);
99 /* initialize the queue with dest chan as caam chan so that
100 * all the packets in this queue could be dispatched into caam
103 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
106 struct qm_mcc_initfq fq_opts;
110 /* Clear FQ options */
111 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
113 flags = QMAN_INITFQ_FLAG_SCHED;
114 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
115 QM_INITFQ_WE_CONTEXTB;
117 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
118 fq_opts.fqd.context_b = fqid_out;
119 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
120 fq_opts.fqd.dest.wq = 0;
122 fq_in->cb.ern = ern_sec_fq_handler;
124 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
126 ret = qman_init_fq(fq_in, flags, &fq_opts);
127 if (unlikely(ret != 0))
128 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
133 /* something is put into in_fq and caam put the crypto result into out_fq */
134 static enum qman_cb_dqrr_result
135 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
136 struct qman_fq *fq __always_unused,
137 const struct qm_dqrr_entry *dqrr)
139 const struct qm_fd *fd;
140 struct dpaa_sec_job *job;
141 struct dpaa_sec_op_ctx *ctx;
143 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
144 return qman_cb_dqrr_defer;
146 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
147 return qman_cb_dqrr_consume;
150 /* sg is embedded in an op ctx,
151 * sg[0] is for output
154 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
156 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
157 ctx->fd_status = fd->status;
158 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
159 struct qm_sg_entry *sg_out;
161 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
162 ctx->op->sym->m_src : ctx->op->sym->m_dst;
164 sg_out = &job->sg[0];
165 hw_sg_to_cpu(sg_out);
166 len = sg_out->length;
168 while (mbuf->next != NULL) {
169 len -= mbuf->data_len;
172 mbuf->data_len = len;
174 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
175 dpaa_sec_op_ending(ctx);
177 return qman_cb_dqrr_consume;
180 /* caam result is put into this queue */
182 dpaa_sec_init_tx(struct qman_fq *fq)
185 struct qm_mcc_initfq opts;
188 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
189 QMAN_FQ_FLAG_DYNAMIC_FQID;
191 ret = qman_create_fq(0, flags, fq);
193 DPAA_SEC_ERR("qman_create_fq failed");
197 memset(&opts, 0, sizeof(opts));
198 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
199 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
201 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
203 fq->cb.dqrr = dqrr_out_fq_cb_rx;
204 fq->cb.ern = ern_sec_fq_handler;
206 ret = qman_init_fq(fq, 0, &opts);
208 DPAA_SEC_ERR("unable to init caam source fq!");
215 static inline int is_aead(dpaa_sec_session *ses)
217 return ((ses->cipher_alg == 0) &&
218 (ses->auth_alg == 0) &&
219 (ses->aead_alg != 0));
222 static inline int is_encode(dpaa_sec_session *ses)
224 return ses->dir == DIR_ENC;
227 static inline int is_decode(dpaa_sec_session *ses)
229 return ses->dir == DIR_DEC;
232 #ifdef RTE_LIBRTE_SECURITY
234 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
236 struct alginfo authdata = {0}, cipherdata = {0};
237 struct sec_cdb *cdb = &ses->cdb;
238 struct alginfo *p_authdata = NULL;
239 int32_t shared_desc_len = 0;
240 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
246 cipherdata.key = (size_t)ses->cipher_key.data;
247 cipherdata.keylen = ses->cipher_key.length;
248 cipherdata.key_enc_flags = 0;
249 cipherdata.key_type = RTA_DATA_IMM;
250 cipherdata.algtype = ses->cipher_key.alg;
251 cipherdata.algmode = ses->cipher_key.algmode;
254 authdata.key = (size_t)ses->auth_key.data;
255 authdata.keylen = ses->auth_key.length;
256 authdata.key_enc_flags = 0;
257 authdata.key_type = RTA_DATA_IMM;
258 authdata.algtype = ses->auth_key.alg;
259 authdata.algmode = ses->auth_key.algmode;
261 p_authdata = &authdata;
264 if (rta_inline_pdcp_query(authdata.algtype,
267 ses->pdcp.hfn_ovd)) {
269 (size_t)rte_dpaa_mem_vtop((void *)
270 (size_t)cipherdata.key);
271 cipherdata.key_type = RTA_DATA_PTR;
274 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
275 if (ses->dir == DIR_ENC)
276 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
277 cdb->sh_desc, 1, swap,
282 ses->pdcp.hfn_threshold,
283 &cipherdata, &authdata,
285 else if (ses->dir == DIR_DEC)
286 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
287 cdb->sh_desc, 1, swap,
292 ses->pdcp.hfn_threshold,
293 &cipherdata, &authdata,
296 if (ses->dir == DIR_ENC)
297 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
298 cdb->sh_desc, 1, swap,
303 ses->pdcp.hfn_threshold,
304 &cipherdata, p_authdata, 0);
305 else if (ses->dir == DIR_DEC)
306 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
307 cdb->sh_desc, 1, swap,
312 ses->pdcp.hfn_threshold,
313 &cipherdata, p_authdata, 0);
315 return shared_desc_len;
318 /* prepare ipsec proto command block of the session */
320 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
322 struct alginfo cipherdata = {0}, authdata = {0};
323 struct sec_cdb *cdb = &ses->cdb;
324 int32_t shared_desc_len = 0;
326 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
332 cipherdata.key = (size_t)ses->cipher_key.data;
333 cipherdata.keylen = ses->cipher_key.length;
334 cipherdata.key_enc_flags = 0;
335 cipherdata.key_type = RTA_DATA_IMM;
336 cipherdata.algtype = ses->cipher_key.alg;
337 cipherdata.algmode = ses->cipher_key.algmode;
339 if (ses->auth_key.length) {
340 authdata.key = (size_t)ses->auth_key.data;
341 authdata.keylen = ses->auth_key.length;
342 authdata.key_enc_flags = 0;
343 authdata.key_type = RTA_DATA_IMM;
344 authdata.algtype = ses->auth_key.alg;
345 authdata.algmode = ses->auth_key.algmode;
348 cdb->sh_desc[0] = cipherdata.keylen;
349 cdb->sh_desc[1] = authdata.keylen;
350 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
352 (unsigned int *)cdb->sh_desc,
353 &cdb->sh_desc[2], 2);
356 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
359 if (cdb->sh_desc[2] & 1)
360 cipherdata.key_type = RTA_DATA_IMM;
362 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
363 (void *)(size_t)cipherdata.key);
364 cipherdata.key_type = RTA_DATA_PTR;
366 if (cdb->sh_desc[2] & (1<<1))
367 authdata.key_type = RTA_DATA_IMM;
369 authdata.key = (size_t)rte_dpaa_mem_vtop(
370 (void *)(size_t)authdata.key);
371 authdata.key_type = RTA_DATA_PTR;
377 if (ses->dir == DIR_ENC) {
378 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
380 true, swap, SHR_SERIAL,
382 (uint8_t *)&ses->ip4_hdr,
383 &cipherdata, &authdata);
384 } else if (ses->dir == DIR_DEC) {
385 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
387 true, swap, SHR_SERIAL,
389 &cipherdata, &authdata);
391 return shared_desc_len;
394 /* prepare command block of the session */
396 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
398 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
399 int32_t shared_desc_len = 0;
400 struct sec_cdb *cdb = &ses->cdb;
402 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
408 memset(cdb, 0, sizeof(struct sec_cdb));
411 #ifdef RTE_LIBRTE_SECURITY
413 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
416 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
419 case DPAA_SEC_CIPHER:
420 alginfo_c.key = (size_t)ses->cipher_key.data;
421 alginfo_c.keylen = ses->cipher_key.length;
422 alginfo_c.key_enc_flags = 0;
423 alginfo_c.key_type = RTA_DATA_IMM;
424 alginfo_c.algtype = ses->cipher_key.alg;
425 alginfo_c.algmode = ses->cipher_key.algmode;
427 switch (ses->cipher_alg) {
428 case RTE_CRYPTO_CIPHER_AES_CBC:
429 case RTE_CRYPTO_CIPHER_3DES_CBC:
430 case RTE_CRYPTO_CIPHER_AES_CTR:
431 case RTE_CRYPTO_CIPHER_3DES_CTR:
432 shared_desc_len = cnstr_shdsc_blkcipher(
434 swap, SHR_NEVER, &alginfo_c,
438 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
439 shared_desc_len = cnstr_shdsc_snow_f8(
440 cdb->sh_desc, true, swap,
444 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
445 shared_desc_len = cnstr_shdsc_zuce(
446 cdb->sh_desc, true, swap,
451 DPAA_SEC_ERR("unsupported cipher alg %d",
457 alginfo_a.key = (size_t)ses->auth_key.data;
458 alginfo_a.keylen = ses->auth_key.length;
459 alginfo_a.key_enc_flags = 0;
460 alginfo_a.key_type = RTA_DATA_IMM;
461 alginfo_a.algtype = ses->auth_key.alg;
462 alginfo_a.algmode = ses->auth_key.algmode;
463 switch (ses->auth_alg) {
464 case RTE_CRYPTO_AUTH_MD5_HMAC:
465 case RTE_CRYPTO_AUTH_SHA1_HMAC:
466 case RTE_CRYPTO_AUTH_SHA224_HMAC:
467 case RTE_CRYPTO_AUTH_SHA256_HMAC:
468 case RTE_CRYPTO_AUTH_SHA384_HMAC:
469 case RTE_CRYPTO_AUTH_SHA512_HMAC:
470 shared_desc_len = cnstr_shdsc_hmac(
472 swap, SHR_NEVER, &alginfo_a,
476 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
477 shared_desc_len = cnstr_shdsc_snow_f9(
478 cdb->sh_desc, true, swap,
483 case RTE_CRYPTO_AUTH_ZUC_EIA3:
484 shared_desc_len = cnstr_shdsc_zuca(
485 cdb->sh_desc, true, swap,
491 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
495 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
496 DPAA_SEC_ERR("not supported aead alg");
499 alginfo.key = (size_t)ses->aead_key.data;
500 alginfo.keylen = ses->aead_key.length;
501 alginfo.key_enc_flags = 0;
502 alginfo.key_type = RTA_DATA_IMM;
503 alginfo.algtype = ses->aead_key.alg;
504 alginfo.algmode = ses->aead_key.algmode;
506 if (ses->dir == DIR_ENC)
507 shared_desc_len = cnstr_shdsc_gcm_encap(
508 cdb->sh_desc, true, swap, SHR_NEVER,
513 shared_desc_len = cnstr_shdsc_gcm_decap(
514 cdb->sh_desc, true, swap, SHR_NEVER,
519 case DPAA_SEC_CIPHER_HASH:
520 alginfo_c.key = (size_t)ses->cipher_key.data;
521 alginfo_c.keylen = ses->cipher_key.length;
522 alginfo_c.key_enc_flags = 0;
523 alginfo_c.key_type = RTA_DATA_IMM;
524 alginfo_c.algtype = ses->cipher_key.alg;
525 alginfo_c.algmode = ses->cipher_key.algmode;
527 alginfo_a.key = (size_t)ses->auth_key.data;
528 alginfo_a.keylen = ses->auth_key.length;
529 alginfo_a.key_enc_flags = 0;
530 alginfo_a.key_type = RTA_DATA_IMM;
531 alginfo_a.algtype = ses->auth_key.alg;
532 alginfo_a.algmode = ses->auth_key.algmode;
534 cdb->sh_desc[0] = alginfo_c.keylen;
535 cdb->sh_desc[1] = alginfo_a.keylen;
536 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
538 (unsigned int *)cdb->sh_desc,
539 &cdb->sh_desc[2], 2);
542 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
545 if (cdb->sh_desc[2] & 1)
546 alginfo_c.key_type = RTA_DATA_IMM;
548 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
549 (void *)(size_t)alginfo_c.key);
550 alginfo_c.key_type = RTA_DATA_PTR;
552 if (cdb->sh_desc[2] & (1<<1))
553 alginfo_a.key_type = RTA_DATA_IMM;
555 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
556 (void *)(size_t)alginfo_a.key);
557 alginfo_a.key_type = RTA_DATA_PTR;
562 /* Auth_only_len is set as 0 here and it will be
563 * overwritten in fd for each packet.
565 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
566 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
568 ses->digest_length, ses->dir);
570 case DPAA_SEC_HASH_CIPHER:
572 DPAA_SEC_ERR("error: Unsupported session");
576 if (shared_desc_len < 0) {
577 DPAA_SEC_ERR("error in preparing command block");
578 return shared_desc_len;
581 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
582 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
583 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
588 /* qp is lockless, should be accessed by only one thread */
590 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
593 unsigned int pkts = 0;
594 int num_rx_bufs, ret;
595 struct qm_dqrr_entry *dq;
596 uint32_t vdqcr_flags = 0;
600 * Until request for four buffers, we provide exact number of buffers.
601 * Otherwise we do not set the QM_VDQCR_EXACT flag.
602 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
603 * requested, so we request two less in this case.
606 vdqcr_flags = QM_VDQCR_EXACT;
607 num_rx_bufs = nb_ops;
609 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
610 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
612 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
617 const struct qm_fd *fd;
618 struct dpaa_sec_job *job;
619 struct dpaa_sec_op_ctx *ctx;
620 struct rte_crypto_op *op;
622 dq = qman_dequeue(fq);
627 /* sg is embedded in an op ctx,
628 * sg[0] is for output
631 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
633 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
634 ctx->fd_status = fd->status;
636 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
637 struct qm_sg_entry *sg_out;
639 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
640 op->sym->m_src : op->sym->m_dst;
642 sg_out = &job->sg[0];
643 hw_sg_to_cpu(sg_out);
644 len = sg_out->length;
646 while (mbuf->next != NULL) {
647 len -= mbuf->data_len;
650 mbuf->data_len = len;
652 if (!ctx->fd_status) {
653 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
655 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
656 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
660 /* report op status to sym->op and then free the ctx memeory */
661 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
663 qman_dqrr_consume(fq, dq);
664 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
669 static inline struct dpaa_sec_job *
670 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
672 struct rte_crypto_sym_op *sym = op->sym;
673 struct rte_mbuf *mbuf = sym->m_src;
674 struct dpaa_sec_job *cf;
675 struct dpaa_sec_op_ctx *ctx;
676 struct qm_sg_entry *sg, *out_sg, *in_sg;
677 phys_addr_t start_addr;
678 uint8_t *old_digest, extra_segs;
679 int data_len, data_offset;
681 data_len = sym->auth.data.length;
682 data_offset = sym->auth.data.offset;
684 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
685 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
686 if ((data_len & 7) || (data_offset & 7)) {
687 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
691 data_len = data_len >> 3;
692 data_offset = data_offset >> 3;
700 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
701 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
705 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
711 old_digest = ctx->digest;
715 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
716 out_sg->length = ses->digest_length;
717 cpu_to_hw_sg(out_sg);
721 /* need to extend the input to a compound frame */
722 in_sg->extension = 1;
724 in_sg->length = data_len;
725 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
730 if (ses->iv.length) {
733 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
736 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
737 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
739 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
740 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
743 sg->length = ses->iv.length;
745 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
746 in_sg->length += sg->length;
751 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
752 sg->offset = data_offset;
754 if (data_len <= (mbuf->data_len - data_offset)) {
755 sg->length = data_len;
757 sg->length = mbuf->data_len - data_offset;
759 /* remaining i/p segs */
760 while ((data_len = data_len - sg->length) &&
761 (mbuf = mbuf->next)) {
764 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
765 if (data_len > mbuf->data_len)
766 sg->length = mbuf->data_len;
768 sg->length = data_len;
772 if (is_decode(ses)) {
773 /* Digest verification case */
776 rte_memcpy(old_digest, sym->auth.digest.data,
778 start_addr = rte_dpaa_mem_vtop(old_digest);
779 qm_sg_entry_set64(sg, start_addr);
780 sg->length = ses->digest_length;
781 in_sg->length += ses->digest_length;
792 * |<----data_len------->|
793 * |ip_header|ah_header|icv|payload|
798 static inline struct dpaa_sec_job *
799 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
801 struct rte_crypto_sym_op *sym = op->sym;
802 struct rte_mbuf *mbuf = sym->m_src;
803 struct dpaa_sec_job *cf;
804 struct dpaa_sec_op_ctx *ctx;
805 struct qm_sg_entry *sg, *in_sg;
806 rte_iova_t start_addr;
808 int data_len, data_offset;
810 data_len = sym->auth.data.length;
811 data_offset = sym->auth.data.offset;
813 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
814 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
815 if ((data_len & 7) || (data_offset & 7)) {
816 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
820 data_len = data_len >> 3;
821 data_offset = data_offset >> 3;
824 ctx = dpaa_sec_alloc_ctx(ses, 4);
830 old_digest = ctx->digest;
832 start_addr = rte_pktmbuf_iova(mbuf);
835 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
836 sg->length = ses->digest_length;
841 /* need to extend the input to a compound frame */
842 in_sg->extension = 1;
844 in_sg->length = data_len;
845 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
848 if (ses->iv.length) {
851 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
854 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
855 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
857 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
858 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
861 sg->length = ses->iv.length;
863 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
864 in_sg->length += sg->length;
869 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
870 sg->offset = data_offset;
871 sg->length = data_len;
873 if (is_decode(ses)) {
874 /* Digest verification case */
876 /* hash result or digest, save digest first */
877 rte_memcpy(old_digest, sym->auth.digest.data,
879 /* let's check digest by hw */
880 start_addr = rte_dpaa_mem_vtop(old_digest);
882 qm_sg_entry_set64(sg, start_addr);
883 sg->length = ses->digest_length;
884 in_sg->length += ses->digest_length;
893 static inline struct dpaa_sec_job *
894 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
896 struct rte_crypto_sym_op *sym = op->sym;
897 struct dpaa_sec_job *cf;
898 struct dpaa_sec_op_ctx *ctx;
899 struct qm_sg_entry *sg, *out_sg, *in_sg;
900 struct rte_mbuf *mbuf;
902 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
904 int data_len, data_offset;
906 data_len = sym->cipher.data.length;
907 data_offset = sym->cipher.data.offset;
909 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
910 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
911 if ((data_len & 7) || (data_offset & 7)) {
912 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
916 data_len = data_len >> 3;
917 data_offset = data_offset >> 3;
922 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
925 req_segs = mbuf->nb_segs * 2 + 3;
927 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
928 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
933 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
942 out_sg->extension = 1;
943 out_sg->length = data_len;
944 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
945 cpu_to_hw_sg(out_sg);
949 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
950 sg->length = mbuf->data_len - data_offset;
951 sg->offset = data_offset;
953 /* Successive segs */
958 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
959 sg->length = mbuf->data_len;
968 in_sg->extension = 1;
970 in_sg->length = data_len + ses->iv.length;
973 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
977 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
978 sg->length = ses->iv.length;
983 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
984 sg->length = mbuf->data_len - data_offset;
985 sg->offset = data_offset;
987 /* Successive segs */
992 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
993 sg->length = mbuf->data_len;
1002 static inline struct dpaa_sec_job *
1003 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1005 struct rte_crypto_sym_op *sym = op->sym;
1006 struct dpaa_sec_job *cf;
1007 struct dpaa_sec_op_ctx *ctx;
1008 struct qm_sg_entry *sg;
1009 rte_iova_t src_start_addr, dst_start_addr;
1010 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1012 int data_len, data_offset;
1014 data_len = sym->cipher.data.length;
1015 data_offset = sym->cipher.data.offset;
1017 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1018 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1019 if ((data_len & 7) || (data_offset & 7)) {
1020 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1024 data_len = data_len >> 3;
1025 data_offset = data_offset >> 3;
1028 ctx = dpaa_sec_alloc_ctx(ses, 4);
1035 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1038 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1040 dst_start_addr = src_start_addr;
1044 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1045 sg->length = data_len + ses->iv.length;
1051 /* need to extend the input to a compound frame */
1054 sg->length = data_len + ses->iv.length;
1055 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1059 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1060 sg->length = ses->iv.length;
1064 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1065 sg->length = data_len;
1072 static inline struct dpaa_sec_job *
1073 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1075 struct rte_crypto_sym_op *sym = op->sym;
1076 struct dpaa_sec_job *cf;
1077 struct dpaa_sec_op_ctx *ctx;
1078 struct qm_sg_entry *sg, *out_sg, *in_sg;
1079 struct rte_mbuf *mbuf;
1081 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1086 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1089 req_segs = mbuf->nb_segs * 2 + 4;
1092 if (ses->auth_only_len)
1095 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1096 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1101 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1108 rte_prefetch0(cf->sg);
1111 out_sg = &cf->sg[0];
1112 out_sg->extension = 1;
1114 out_sg->length = sym->aead.data.length + ses->digest_length;
1116 out_sg->length = sym->aead.data.length;
1118 /* output sg entries */
1120 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1121 cpu_to_hw_sg(out_sg);
1124 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1125 sg->length = mbuf->data_len - sym->aead.data.offset;
1126 sg->offset = sym->aead.data.offset;
1128 /* Successive segs */
1133 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1134 sg->length = mbuf->data_len;
1137 sg->length -= ses->digest_length;
1139 if (is_encode(ses)) {
1141 /* set auth output */
1143 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1144 sg->length = ses->digest_length;
1152 in_sg->extension = 1;
1155 in_sg->length = ses->iv.length + sym->aead.data.length
1156 + ses->auth_only_len;
1158 in_sg->length = ses->iv.length + sym->aead.data.length
1159 + ses->auth_only_len + ses->digest_length;
1161 /* input sg entries */
1163 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1164 cpu_to_hw_sg(in_sg);
1167 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1168 sg->length = ses->iv.length;
1171 /* 2nd seg auth only */
1172 if (ses->auth_only_len) {
1174 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1175 sg->length = ses->auth_only_len;
1181 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1182 sg->length = mbuf->data_len - sym->aead.data.offset;
1183 sg->offset = sym->aead.data.offset;
1185 /* Successive segs */
1190 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1191 sg->length = mbuf->data_len;
1195 if (is_decode(ses)) {
1198 memcpy(ctx->digest, sym->aead.digest.data,
1199 ses->digest_length);
1200 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1201 sg->length = ses->digest_length;
1209 static inline struct dpaa_sec_job *
1210 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1212 struct rte_crypto_sym_op *sym = op->sym;
1213 struct dpaa_sec_job *cf;
1214 struct dpaa_sec_op_ctx *ctx;
1215 struct qm_sg_entry *sg;
1216 uint32_t length = 0;
1217 rte_iova_t src_start_addr, dst_start_addr;
1218 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1221 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1224 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1226 dst_start_addr = src_start_addr;
1228 ctx = dpaa_sec_alloc_ctx(ses, 7);
1236 rte_prefetch0(cf->sg);
1238 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1239 if (is_encode(ses)) {
1240 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1241 sg->length = ses->iv.length;
1242 length += sg->length;
1246 if (ses->auth_only_len) {
1247 qm_sg_entry_set64(sg,
1248 rte_dpaa_mem_vtop(sym->aead.aad.data));
1249 sg->length = ses->auth_only_len;
1250 length += sg->length;
1254 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1255 sg->length = sym->aead.data.length;
1256 length += sg->length;
1260 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1261 sg->length = ses->iv.length;
1262 length += sg->length;
1266 if (ses->auth_only_len) {
1267 qm_sg_entry_set64(sg,
1268 rte_dpaa_mem_vtop(sym->aead.aad.data));
1269 sg->length = ses->auth_only_len;
1270 length += sg->length;
1274 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1275 sg->length = sym->aead.data.length;
1276 length += sg->length;
1279 memcpy(ctx->digest, sym->aead.digest.data,
1280 ses->digest_length);
1283 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1284 sg->length = ses->digest_length;
1285 length += sg->length;
1289 /* input compound frame */
1290 cf->sg[1].length = length;
1291 cf->sg[1].extension = 1;
1292 cf->sg[1].final = 1;
1293 cpu_to_hw_sg(&cf->sg[1]);
1297 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1298 qm_sg_entry_set64(sg,
1299 dst_start_addr + sym->aead.data.offset);
1300 sg->length = sym->aead.data.length;
1301 length = sg->length;
1302 if (is_encode(ses)) {
1304 /* set auth output */
1306 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1307 sg->length = ses->digest_length;
1308 length += sg->length;
1313 /* output compound frame */
1314 cf->sg[0].length = length;
1315 cf->sg[0].extension = 1;
1316 cpu_to_hw_sg(&cf->sg[0]);
1321 static inline struct dpaa_sec_job *
1322 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1324 struct rte_crypto_sym_op *sym = op->sym;
1325 struct dpaa_sec_job *cf;
1326 struct dpaa_sec_op_ctx *ctx;
1327 struct qm_sg_entry *sg, *out_sg, *in_sg;
1328 struct rte_mbuf *mbuf;
1330 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1335 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1338 req_segs = mbuf->nb_segs * 2 + 4;
1341 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1342 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1347 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1354 rte_prefetch0(cf->sg);
1357 out_sg = &cf->sg[0];
1358 out_sg->extension = 1;
1360 out_sg->length = sym->auth.data.length + ses->digest_length;
1362 out_sg->length = sym->auth.data.length;
1364 /* output sg entries */
1366 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1367 cpu_to_hw_sg(out_sg);
1370 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1371 sg->length = mbuf->data_len - sym->auth.data.offset;
1372 sg->offset = sym->auth.data.offset;
1374 /* Successive segs */
1379 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1380 sg->length = mbuf->data_len;
1383 sg->length -= ses->digest_length;
1385 if (is_encode(ses)) {
1387 /* set auth output */
1389 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1390 sg->length = ses->digest_length;
1398 in_sg->extension = 1;
1401 in_sg->length = ses->iv.length + sym->auth.data.length;
1403 in_sg->length = ses->iv.length + sym->auth.data.length
1404 + ses->digest_length;
1406 /* input sg entries */
1408 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1409 cpu_to_hw_sg(in_sg);
1412 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1413 sg->length = ses->iv.length;
1418 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1419 sg->length = mbuf->data_len - sym->auth.data.offset;
1420 sg->offset = sym->auth.data.offset;
1422 /* Successive segs */
1427 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1428 sg->length = mbuf->data_len;
1432 sg->length -= ses->digest_length;
1433 if (is_decode(ses)) {
1436 memcpy(ctx->digest, sym->auth.digest.data,
1437 ses->digest_length);
1438 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1439 sg->length = ses->digest_length;
1447 static inline struct dpaa_sec_job *
1448 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1450 struct rte_crypto_sym_op *sym = op->sym;
1451 struct dpaa_sec_job *cf;
1452 struct dpaa_sec_op_ctx *ctx;
1453 struct qm_sg_entry *sg;
1454 rte_iova_t src_start_addr, dst_start_addr;
1455 uint32_t length = 0;
1456 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1459 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1461 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1463 dst_start_addr = src_start_addr;
1465 ctx = dpaa_sec_alloc_ctx(ses, 7);
1473 rte_prefetch0(cf->sg);
1475 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1476 if (is_encode(ses)) {
1477 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1478 sg->length = ses->iv.length;
1479 length += sg->length;
1483 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1484 sg->length = sym->auth.data.length;
1485 length += sg->length;
1489 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1490 sg->length = ses->iv.length;
1491 length += sg->length;
1496 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1497 sg->length = sym->auth.data.length;
1498 length += sg->length;
1501 memcpy(ctx->digest, sym->auth.digest.data,
1502 ses->digest_length);
1505 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1506 sg->length = ses->digest_length;
1507 length += sg->length;
1511 /* input compound frame */
1512 cf->sg[1].length = length;
1513 cf->sg[1].extension = 1;
1514 cf->sg[1].final = 1;
1515 cpu_to_hw_sg(&cf->sg[1]);
1519 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1520 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1521 sg->length = sym->cipher.data.length;
1522 length = sg->length;
1523 if (is_encode(ses)) {
1525 /* set auth output */
1527 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1528 sg->length = ses->digest_length;
1529 length += sg->length;
1534 /* output compound frame */
1535 cf->sg[0].length = length;
1536 cf->sg[0].extension = 1;
1537 cpu_to_hw_sg(&cf->sg[0]);
1542 #ifdef RTE_LIBRTE_SECURITY
1543 static inline struct dpaa_sec_job *
1544 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1546 struct rte_crypto_sym_op *sym = op->sym;
1547 struct dpaa_sec_job *cf;
1548 struct dpaa_sec_op_ctx *ctx;
1549 struct qm_sg_entry *sg;
1550 phys_addr_t src_start_addr, dst_start_addr;
1552 ctx = dpaa_sec_alloc_ctx(ses, 2);
1558 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1561 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1563 dst_start_addr = src_start_addr;
1567 qm_sg_entry_set64(sg, src_start_addr);
1568 sg->length = sym->m_src->pkt_len;
1572 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1575 qm_sg_entry_set64(sg, dst_start_addr);
1576 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1582 static inline struct dpaa_sec_job *
1583 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1585 struct rte_crypto_sym_op *sym = op->sym;
1586 struct dpaa_sec_job *cf;
1587 struct dpaa_sec_op_ctx *ctx;
1588 struct qm_sg_entry *sg, *out_sg, *in_sg;
1589 struct rte_mbuf *mbuf;
1591 uint32_t in_len = 0, out_len = 0;
1598 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1599 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1600 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1605 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1611 out_sg = &cf->sg[0];
1612 out_sg->extension = 1;
1613 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1617 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1620 /* Successive segs */
1621 while (mbuf->next) {
1622 sg->length = mbuf->data_len;
1623 out_len += sg->length;
1627 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1630 sg->length = mbuf->buf_len - mbuf->data_off;
1631 out_len += sg->length;
1635 out_sg->length = out_len;
1636 cpu_to_hw_sg(out_sg);
1641 in_sg->extension = 1;
1643 in_len = mbuf->data_len;
1646 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1649 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1650 sg->length = mbuf->data_len;
1653 /* Successive segs */
1658 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1659 sg->length = mbuf->data_len;
1661 in_len += sg->length;
1667 in_sg->length = in_len;
1668 cpu_to_hw_sg(in_sg);
1670 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1677 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1680 /* Function to transmit the frames to given device and queuepair */
1682 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1683 uint16_t num_tx = 0;
1684 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1685 uint32_t frames_to_send;
1686 struct rte_crypto_op *op;
1687 struct dpaa_sec_job *cf;
1688 dpaa_sec_session *ses;
1689 uint16_t auth_hdr_len, auth_tail_len;
1690 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1691 struct qman_fq *inq[DPAA_SEC_BURST];
1694 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1695 DPAA_SEC_BURST : nb_ops;
1696 for (loop = 0; loop < frames_to_send; loop++) {
1698 if (op->sym->m_src->seqn != 0) {
1699 index = op->sym->m_src->seqn - 1;
1700 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1701 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1702 flags[loop] = ((index & 0x0f) << 8);
1703 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1704 DPAA_PER_LCORE_DQRR_SIZE--;
1705 DPAA_PER_LCORE_DQRR_HELD &=
1710 switch (op->sess_type) {
1711 case RTE_CRYPTO_OP_WITH_SESSION:
1712 ses = (dpaa_sec_session *)
1713 get_sym_session_private_data(
1715 cryptodev_driver_id);
1717 #ifdef RTE_LIBRTE_SECURITY
1718 case RTE_CRYPTO_OP_SECURITY_SESSION:
1719 ses = (dpaa_sec_session *)
1720 get_sec_session_private_data(
1721 op->sym->sec_session);
1726 "sessionless crypto op not supported");
1727 frames_to_send = loop;
1733 DPAA_SEC_DP_ERR("session not available");
1734 frames_to_send = loop;
1739 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1740 if (dpaa_sec_attach_sess_q(qp, ses)) {
1741 frames_to_send = loop;
1745 } else if (unlikely(ses->qp[rte_lcore_id() %
1746 MAX_DPAA_CORES] != qp)) {
1747 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1749 ses->qp[rte_lcore_id() %
1750 MAX_DPAA_CORES], qp);
1751 frames_to_send = loop;
1756 auth_hdr_len = op->sym->auth.data.length -
1757 op->sym->cipher.data.length;
1760 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1761 ((op->sym->m_dst == NULL) ||
1762 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1763 switch (ses->ctxt) {
1764 #ifdef RTE_LIBRTE_SECURITY
1766 case DPAA_SEC_IPSEC:
1767 cf = build_proto(op, ses);
1771 cf = build_auth_only(op, ses);
1773 case DPAA_SEC_CIPHER:
1774 cf = build_cipher_only(op, ses);
1777 cf = build_cipher_auth_gcm(op, ses);
1778 auth_hdr_len = ses->auth_only_len;
1780 case DPAA_SEC_CIPHER_HASH:
1782 op->sym->cipher.data.offset
1783 - op->sym->auth.data.offset;
1785 op->sym->auth.data.length
1786 - op->sym->cipher.data.length
1788 cf = build_cipher_auth(op, ses);
1791 DPAA_SEC_DP_ERR("not supported ops");
1792 frames_to_send = loop;
1797 switch (ses->ctxt) {
1798 #ifdef RTE_LIBRTE_SECURITY
1800 case DPAA_SEC_IPSEC:
1801 cf = build_proto_sg(op, ses);
1805 cf = build_auth_only_sg(op, ses);
1807 case DPAA_SEC_CIPHER:
1808 cf = build_cipher_only_sg(op, ses);
1811 cf = build_cipher_auth_gcm_sg(op, ses);
1812 auth_hdr_len = ses->auth_only_len;
1814 case DPAA_SEC_CIPHER_HASH:
1816 op->sym->cipher.data.offset
1817 - op->sym->auth.data.offset;
1819 op->sym->auth.data.length
1820 - op->sym->cipher.data.length
1822 cf = build_cipher_auth_sg(op, ses);
1825 DPAA_SEC_DP_ERR("not supported ops");
1826 frames_to_send = loop;
1831 if (unlikely(!cf)) {
1832 frames_to_send = loop;
1838 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1839 fd->opaque_addr = 0;
1841 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1842 fd->_format1 = qm_fd_compound;
1843 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1845 /* Auth_only_len is set as 0 in descriptor and it is
1846 * overwritten here in the fd.cmd which will update
1849 if (auth_hdr_len || auth_tail_len) {
1850 fd->cmd = 0x80000000;
1852 ((auth_tail_len << 16) | auth_hdr_len);
1855 #ifdef RTE_LIBRTE_SECURITY
1856 /* In case of PDCP, per packet HFN is stored in
1857 * mbuf priv after sym_op.
1859 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1860 fd->cmd = 0x80000000 |
1861 *((uint32_t *)((uint8_t *)op +
1862 ses->pdcp.hfn_ovd_offset));
1863 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1864 *((uint32_t *)((uint8_t *)op +
1865 ses->pdcp.hfn_ovd_offset)),
1872 while (loop < frames_to_send) {
1873 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1874 &flags[loop], frames_to_send - loop);
1876 nb_ops -= frames_to_send;
1877 num_tx += frames_to_send;
1880 dpaa_qp->tx_pkts += num_tx;
1881 dpaa_qp->tx_errs += nb_ops - num_tx;
1887 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1891 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1893 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1895 dpaa_qp->rx_pkts += num_rx;
1896 dpaa_qp->rx_errs += nb_ops - num_rx;
1898 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1903 /** Release queue pair */
1905 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1908 struct dpaa_sec_dev_private *internals;
1909 struct dpaa_sec_qp *qp = NULL;
1911 PMD_INIT_FUNC_TRACE();
1913 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1915 internals = dev->data->dev_private;
1916 if (qp_id >= internals->max_nb_queue_pairs) {
1917 DPAA_SEC_ERR("Max supported qpid %d",
1918 internals->max_nb_queue_pairs);
1922 qp = &internals->qps[qp_id];
1923 rte_mempool_free(qp->ctx_pool);
1924 qp->internals = NULL;
1925 dev->data->queue_pairs[qp_id] = NULL;
1930 /** Setup a queue pair */
1932 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1933 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1934 __rte_unused int socket_id)
1936 struct dpaa_sec_dev_private *internals;
1937 struct dpaa_sec_qp *qp = NULL;
1940 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1942 internals = dev->data->dev_private;
1943 if (qp_id >= internals->max_nb_queue_pairs) {
1944 DPAA_SEC_ERR("Max supported qpid %d",
1945 internals->max_nb_queue_pairs);
1949 qp = &internals->qps[qp_id];
1950 qp->internals = internals;
1951 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1952 dev->data->dev_id, qp_id);
1953 if (!qp->ctx_pool) {
1954 qp->ctx_pool = rte_mempool_create((const char *)str,
1957 CTX_POOL_CACHE_SIZE, 0,
1958 NULL, NULL, NULL, NULL,
1960 if (!qp->ctx_pool) {
1961 DPAA_SEC_ERR("%s create failed\n", str);
1965 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1966 dev->data->dev_id, qp_id);
1967 dev->data->queue_pairs[qp_id] = qp;
1972 /** Returns the size of session structure */
1974 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1976 PMD_INIT_FUNC_TRACE();
1978 return sizeof(dpaa_sec_session);
1982 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1983 struct rte_crypto_sym_xform *xform,
1984 dpaa_sec_session *session)
1986 session->ctxt = DPAA_SEC_CIPHER;
1987 session->cipher_alg = xform->cipher.algo;
1988 session->iv.length = xform->cipher.iv.length;
1989 session->iv.offset = xform->cipher.iv.offset;
1990 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1991 RTE_CACHE_LINE_SIZE);
1992 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1993 DPAA_SEC_ERR("No Memory for cipher key");
1996 session->cipher_key.length = xform->cipher.key.length;
1998 memcpy(session->cipher_key.data, xform->cipher.key.data,
1999 xform->cipher.key.length);
2000 switch (xform->cipher.algo) {
2001 case RTE_CRYPTO_CIPHER_AES_CBC:
2002 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2003 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2005 case RTE_CRYPTO_CIPHER_3DES_CBC:
2006 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2007 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2009 case RTE_CRYPTO_CIPHER_AES_CTR:
2010 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2011 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2013 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2014 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2016 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2017 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2020 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2021 xform->cipher.algo);
2024 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2031 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2032 struct rte_crypto_sym_xform *xform,
2033 dpaa_sec_session *session)
2035 session->ctxt = DPAA_SEC_AUTH;
2036 session->auth_alg = xform->auth.algo;
2037 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2038 RTE_CACHE_LINE_SIZE);
2039 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2040 DPAA_SEC_ERR("No Memory for auth key");
2043 session->auth_key.length = xform->auth.key.length;
2044 session->digest_length = xform->auth.digest_length;
2045 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2046 session->iv.offset = xform->auth.iv.offset;
2047 session->iv.length = xform->auth.iv.length;
2050 memcpy(session->auth_key.data, xform->auth.key.data,
2051 xform->auth.key.length);
2053 switch (xform->auth.algo) {
2054 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2055 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2056 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2058 case RTE_CRYPTO_AUTH_MD5_HMAC:
2059 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2060 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2062 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2063 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2064 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2066 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2067 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2068 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2070 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2071 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2072 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2074 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2075 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2076 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2078 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2079 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2080 session->auth_key.algmode = OP_ALG_AAI_F9;
2082 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2083 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2084 session->auth_key.algmode = OP_ALG_AAI_F9;
2087 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2092 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2099 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2100 struct rte_crypto_sym_xform *xform,
2101 dpaa_sec_session *session)
2104 struct rte_crypto_cipher_xform *cipher_xform;
2105 struct rte_crypto_auth_xform *auth_xform;
2107 session->ctxt = DPAA_SEC_CIPHER_HASH;
2108 if (session->auth_cipher_text) {
2109 cipher_xform = &xform->cipher;
2110 auth_xform = &xform->next->auth;
2112 cipher_xform = &xform->next->cipher;
2113 auth_xform = &xform->auth;
2116 /* Set IV parameters */
2117 session->iv.offset = cipher_xform->iv.offset;
2118 session->iv.length = cipher_xform->iv.length;
2120 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2121 RTE_CACHE_LINE_SIZE);
2122 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2123 DPAA_SEC_ERR("No Memory for cipher key");
2126 session->cipher_key.length = cipher_xform->key.length;
2127 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2128 RTE_CACHE_LINE_SIZE);
2129 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2130 DPAA_SEC_ERR("No Memory for auth key");
2133 session->auth_key.length = auth_xform->key.length;
2134 memcpy(session->cipher_key.data, cipher_xform->key.data,
2135 cipher_xform->key.length);
2136 memcpy(session->auth_key.data, auth_xform->key.data,
2137 auth_xform->key.length);
2139 session->digest_length = auth_xform->digest_length;
2140 session->auth_alg = auth_xform->algo;
2142 switch (auth_xform->algo) {
2143 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2144 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2145 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2147 case RTE_CRYPTO_AUTH_MD5_HMAC:
2148 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2149 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2151 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2152 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2153 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2155 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2156 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2157 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2159 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2160 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2161 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2163 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2164 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2165 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2168 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2173 session->cipher_alg = cipher_xform->algo;
2175 switch (cipher_xform->algo) {
2176 case RTE_CRYPTO_CIPHER_AES_CBC:
2177 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2178 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2180 case RTE_CRYPTO_CIPHER_3DES_CBC:
2181 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2182 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2184 case RTE_CRYPTO_CIPHER_AES_CTR:
2185 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2186 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2189 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2190 cipher_xform->algo);
2193 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2199 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2200 struct rte_crypto_sym_xform *xform,
2201 dpaa_sec_session *session)
2203 session->aead_alg = xform->aead.algo;
2204 session->ctxt = DPAA_SEC_AEAD;
2205 session->iv.length = xform->aead.iv.length;
2206 session->iv.offset = xform->aead.iv.offset;
2207 session->auth_only_len = xform->aead.aad_length;
2208 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2209 RTE_CACHE_LINE_SIZE);
2210 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2211 DPAA_SEC_ERR("No Memory for aead key\n");
2214 session->aead_key.length = xform->aead.key.length;
2215 session->digest_length = xform->aead.digest_length;
2217 memcpy(session->aead_key.data, xform->aead.key.data,
2218 xform->aead.key.length);
2220 switch (session->aead_alg) {
2221 case RTE_CRYPTO_AEAD_AES_GCM:
2222 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2223 session->aead_key.algmode = OP_ALG_AAI_GCM;
2226 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2230 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2236 static struct qman_fq *
2237 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2241 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2242 if (qi->inq_attach[i] == 0) {
2243 qi->inq_attach[i] = 1;
2247 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2253 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2257 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2258 if (&qi->inq[i] == fq) {
2259 if (qman_retire_fq(fq, NULL) != 0)
2260 DPAA_SEC_WARN("Queue is not retired\n");
2262 qi->inq_attach[i] = 0;
2270 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2274 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2275 ret = dpaa_sec_prep_cdb(sess);
2277 DPAA_SEC_ERR("Unable to prepare sec cdb");
2280 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2281 ret = rte_dpaa_portal_init((void *)0);
2283 DPAA_SEC_ERR("Failure in affining portal");
2287 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2288 rte_dpaa_mem_vtop(&sess->cdb),
2289 qman_fq_fqid(&qp->outq));
2291 DPAA_SEC_ERR("Unable to init sec queue");
2297 free_session_data(dpaa_sec_session *s)
2300 rte_free(s->aead_key.data);
2302 rte_free(s->auth_key.data);
2303 rte_free(s->cipher_key.data);
2305 memset(s, 0, sizeof(dpaa_sec_session));
2309 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2310 struct rte_crypto_sym_xform *xform, void *sess)
2312 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2313 dpaa_sec_session *session = sess;
2317 PMD_INIT_FUNC_TRACE();
2319 if (unlikely(sess == NULL)) {
2320 DPAA_SEC_ERR("invalid session struct");
2323 memset(session, 0, sizeof(dpaa_sec_session));
2325 /* Default IV length = 0 */
2326 session->iv.length = 0;
2329 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2330 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2331 ret = dpaa_sec_cipher_init(dev, xform, session);
2333 /* Authentication Only */
2334 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2335 xform->next == NULL) {
2336 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2337 session->ctxt = DPAA_SEC_AUTH;
2338 ret = dpaa_sec_auth_init(dev, xform, session);
2340 /* Cipher then Authenticate */
2341 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2342 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2343 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2344 session->auth_cipher_text = 1;
2345 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2346 ret = dpaa_sec_auth_init(dev, xform, session);
2347 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2348 ret = dpaa_sec_cipher_init(dev, xform, session);
2350 ret = dpaa_sec_chain_init(dev, xform, session);
2352 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2355 /* Authenticate then Cipher */
2356 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2357 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2358 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2359 session->auth_cipher_text = 0;
2360 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2361 ret = dpaa_sec_cipher_init(dev, xform, session);
2362 else if (xform->next->cipher.algo
2363 == RTE_CRYPTO_CIPHER_NULL)
2364 ret = dpaa_sec_auth_init(dev, xform, session);
2366 ret = dpaa_sec_chain_init(dev, xform, session);
2368 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2372 /* AEAD operation for AES-GCM kind of Algorithms */
2373 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2374 xform->next == NULL) {
2375 ret = dpaa_sec_aead_init(dev, xform, session);
2378 DPAA_SEC_ERR("Invalid crypto type");
2382 DPAA_SEC_ERR("unable to init session");
2386 rte_spinlock_lock(&internals->lock);
2387 for (i = 0; i < MAX_DPAA_CORES; i++) {
2388 session->inq[i] = dpaa_sec_attach_rxq(internals);
2389 if (session->inq[i] == NULL) {
2390 DPAA_SEC_ERR("unable to attach sec queue");
2391 rte_spinlock_unlock(&internals->lock);
2396 rte_spinlock_unlock(&internals->lock);
2401 free_session_data(session);
2406 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2407 struct rte_crypto_sym_xform *xform,
2408 struct rte_cryptodev_sym_session *sess,
2409 struct rte_mempool *mempool)
2411 void *sess_private_data;
2414 PMD_INIT_FUNC_TRACE();
2416 if (rte_mempool_get(mempool, &sess_private_data)) {
2417 DPAA_SEC_ERR("Couldn't get object from session mempool");
2421 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2423 DPAA_SEC_ERR("failed to configure session parameters");
2425 /* Return session to mempool */
2426 rte_mempool_put(mempool, sess_private_data);
2430 set_sym_session_private_data(sess, dev->driver_id,
2438 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2440 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2441 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2444 for (i = 0; i < MAX_DPAA_CORES; i++) {
2446 dpaa_sec_detach_rxq(qi, s->inq[i]);
2450 free_session_data(s);
2451 rte_mempool_put(sess_mp, (void *)s);
2454 /** Clear the memory of session so it doesn't leave key material behind */
2456 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2457 struct rte_cryptodev_sym_session *sess)
2459 PMD_INIT_FUNC_TRACE();
2460 uint8_t index = dev->driver_id;
2461 void *sess_priv = get_sym_session_private_data(sess, index);
2462 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2465 free_session_memory(dev, s);
2466 set_sym_session_private_data(sess, index, NULL);
2470 #ifdef RTE_LIBRTE_SECURITY
2472 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2473 struct rte_security_ipsec_xform *ipsec_xform,
2474 dpaa_sec_session *session)
2476 PMD_INIT_FUNC_TRACE();
2478 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2479 RTE_CACHE_LINE_SIZE);
2480 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2481 DPAA_SEC_ERR("No Memory for aead key");
2484 memcpy(session->aead_key.data, aead_xform->key.data,
2485 aead_xform->key.length);
2487 session->digest_length = aead_xform->digest_length;
2488 session->aead_key.length = aead_xform->key.length;
2490 switch (aead_xform->algo) {
2491 case RTE_CRYPTO_AEAD_AES_GCM:
2492 switch (session->digest_length) {
2494 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2497 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2500 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2503 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2504 session->digest_length);
2507 if (session->dir == DIR_ENC) {
2508 memcpy(session->encap_pdb.gcm.salt,
2509 (uint8_t *)&(ipsec_xform->salt), 4);
2511 memcpy(session->decap_pdb.gcm.salt,
2512 (uint8_t *)&(ipsec_xform->salt), 4);
2514 session->aead_key.algmode = OP_ALG_AAI_GCM;
2515 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2518 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2526 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2527 struct rte_crypto_auth_xform *auth_xform,
2528 struct rte_security_ipsec_xform *ipsec_xform,
2529 dpaa_sec_session *session)
2532 session->cipher_key.data = rte_zmalloc(NULL,
2533 cipher_xform->key.length,
2534 RTE_CACHE_LINE_SIZE);
2535 if (session->cipher_key.data == NULL &&
2536 cipher_xform->key.length > 0) {
2537 DPAA_SEC_ERR("No Memory for cipher key");
2541 session->cipher_key.length = cipher_xform->key.length;
2542 memcpy(session->cipher_key.data, cipher_xform->key.data,
2543 cipher_xform->key.length);
2544 session->cipher_alg = cipher_xform->algo;
2546 session->cipher_key.data = NULL;
2547 session->cipher_key.length = 0;
2548 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2552 session->auth_key.data = rte_zmalloc(NULL,
2553 auth_xform->key.length,
2554 RTE_CACHE_LINE_SIZE);
2555 if (session->auth_key.data == NULL &&
2556 auth_xform->key.length > 0) {
2557 DPAA_SEC_ERR("No Memory for auth key");
2560 session->auth_key.length = auth_xform->key.length;
2561 memcpy(session->auth_key.data, auth_xform->key.data,
2562 auth_xform->key.length);
2563 session->auth_alg = auth_xform->algo;
2564 session->digest_length = auth_xform->digest_length;
2566 session->auth_key.data = NULL;
2567 session->auth_key.length = 0;
2568 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2571 switch (session->auth_alg) {
2572 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2573 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2574 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2576 case RTE_CRYPTO_AUTH_MD5_HMAC:
2577 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2578 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2580 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2581 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2582 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2583 if (session->digest_length != 16)
2585 "+++Using sha256-hmac truncated len is non-standard,"
2586 "it will not work with lookaside proto");
2588 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2589 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2590 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2592 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2593 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2594 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2596 case RTE_CRYPTO_AUTH_AES_CMAC:
2597 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2599 case RTE_CRYPTO_AUTH_NULL:
2600 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2602 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2603 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2604 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2605 case RTE_CRYPTO_AUTH_SHA1:
2606 case RTE_CRYPTO_AUTH_SHA256:
2607 case RTE_CRYPTO_AUTH_SHA512:
2608 case RTE_CRYPTO_AUTH_SHA224:
2609 case RTE_CRYPTO_AUTH_SHA384:
2610 case RTE_CRYPTO_AUTH_MD5:
2611 case RTE_CRYPTO_AUTH_AES_GMAC:
2612 case RTE_CRYPTO_AUTH_KASUMI_F9:
2613 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2614 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2615 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2619 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2624 switch (session->cipher_alg) {
2625 case RTE_CRYPTO_CIPHER_AES_CBC:
2626 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2627 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2629 case RTE_CRYPTO_CIPHER_3DES_CBC:
2630 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2631 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2633 case RTE_CRYPTO_CIPHER_AES_CTR:
2634 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2635 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2636 if (session->dir == DIR_ENC) {
2637 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2638 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2640 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2641 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2644 case RTE_CRYPTO_CIPHER_NULL:
2645 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2647 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2648 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2649 case RTE_CRYPTO_CIPHER_3DES_ECB:
2650 case RTE_CRYPTO_CIPHER_AES_ECB:
2651 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2652 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2653 session->cipher_alg);
2656 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2657 session->cipher_alg);
2665 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2666 struct rte_security_session_conf *conf,
2669 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2670 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2671 struct rte_crypto_auth_xform *auth_xform = NULL;
2672 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2673 struct rte_crypto_aead_xform *aead_xform = NULL;
2674 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2678 PMD_INIT_FUNC_TRACE();
2680 memset(session, 0, sizeof(dpaa_sec_session));
2681 session->proto_alg = conf->protocol;
2682 session->ctxt = DPAA_SEC_IPSEC;
2684 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2685 session->dir = DIR_ENC;
2687 session->dir = DIR_DEC;
2689 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2690 cipher_xform = &conf->crypto_xform->cipher;
2691 if (conf->crypto_xform->next)
2692 auth_xform = &conf->crypto_xform->next->auth;
2693 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2694 ipsec_xform, session);
2695 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2696 auth_xform = &conf->crypto_xform->auth;
2697 if (conf->crypto_xform->next)
2698 cipher_xform = &conf->crypto_xform->next->cipher;
2699 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2700 ipsec_xform, session);
2701 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2702 aead_xform = &conf->crypto_xform->aead;
2703 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2704 ipsec_xform, session);
2706 DPAA_SEC_ERR("XFORM not specified");
2711 DPAA_SEC_ERR("Failed to process xform");
2715 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2716 if (ipsec_xform->tunnel.type ==
2717 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2718 session->ip4_hdr.ip_v = IPVERSION;
2719 session->ip4_hdr.ip_hl = 5;
2720 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2721 sizeof(session->ip4_hdr));
2722 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2723 session->ip4_hdr.ip_id = 0;
2724 session->ip4_hdr.ip_off = 0;
2725 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2726 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2727 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2728 IPPROTO_ESP : IPPROTO_AH;
2729 session->ip4_hdr.ip_sum = 0;
2730 session->ip4_hdr.ip_src =
2731 ipsec_xform->tunnel.ipv4.src_ip;
2732 session->ip4_hdr.ip_dst =
2733 ipsec_xform->tunnel.ipv4.dst_ip;
2734 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2735 (void *)&session->ip4_hdr,
2737 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2738 } else if (ipsec_xform->tunnel.type ==
2739 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2740 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2741 DPAA_IPv6_DEFAULT_VTC_FLOW |
2742 ((ipsec_xform->tunnel.ipv6.dscp <<
2743 RTE_IPV6_HDR_TC_SHIFT) &
2744 RTE_IPV6_HDR_TC_MASK) |
2745 ((ipsec_xform->tunnel.ipv6.flabel <<
2746 RTE_IPV6_HDR_FL_SHIFT) &
2747 RTE_IPV6_HDR_FL_MASK));
2748 /* Payload length will be updated by HW */
2749 session->ip6_hdr.payload_len = 0;
2750 session->ip6_hdr.hop_limits =
2751 ipsec_xform->tunnel.ipv6.hlimit;
2752 session->ip6_hdr.proto = (ipsec_xform->proto ==
2753 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2754 IPPROTO_ESP : IPPROTO_AH;
2755 memcpy(&session->ip6_hdr.src_addr,
2756 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2757 memcpy(&session->ip6_hdr.dst_addr,
2758 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2759 session->encap_pdb.ip_hdr_len =
2760 sizeof(struct rte_ipv6_hdr);
2762 session->encap_pdb.options =
2763 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2764 PDBOPTS_ESP_OIHI_PDB_INL |
2766 PDBHMO_ESP_ENCAP_DTTL |
2768 if (ipsec_xform->options.esn)
2769 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2770 session->encap_pdb.spi = ipsec_xform->spi;
2772 } else if (ipsec_xform->direction ==
2773 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2774 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2775 session->decap_pdb.options = sizeof(struct ip) << 16;
2777 session->decap_pdb.options =
2778 sizeof(struct rte_ipv6_hdr) << 16;
2779 if (ipsec_xform->options.esn)
2780 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2781 if (ipsec_xform->replay_win_sz) {
2783 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2792 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2795 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2798 session->decap_pdb.options |=
2804 rte_spinlock_lock(&internals->lock);
2805 for (i = 0; i < MAX_DPAA_CORES; i++) {
2806 session->inq[i] = dpaa_sec_attach_rxq(internals);
2807 if (session->inq[i] == NULL) {
2808 DPAA_SEC_ERR("unable to attach sec queue");
2809 rte_spinlock_unlock(&internals->lock);
2813 rte_spinlock_unlock(&internals->lock);
2817 free_session_data(session);
2822 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2823 struct rte_security_session_conf *conf,
2826 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2827 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2828 struct rte_crypto_auth_xform *auth_xform = NULL;
2829 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2830 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2831 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2835 PMD_INIT_FUNC_TRACE();
2837 memset(session, 0, sizeof(dpaa_sec_session));
2839 /* find xfrm types */
2840 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2841 cipher_xform = &xform->cipher;
2842 if (xform->next != NULL)
2843 auth_xform = &xform->next->auth;
2844 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2845 auth_xform = &xform->auth;
2846 if (xform->next != NULL)
2847 cipher_xform = &xform->next->cipher;
2849 DPAA_SEC_ERR("Invalid crypto type");
2853 session->proto_alg = conf->protocol;
2854 session->ctxt = DPAA_SEC_PDCP;
2857 switch (cipher_xform->algo) {
2858 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2859 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2861 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2862 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2864 case RTE_CRYPTO_CIPHER_AES_CTR:
2865 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2867 case RTE_CRYPTO_CIPHER_NULL:
2868 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2871 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2872 session->cipher_alg);
2876 session->cipher_key.data = rte_zmalloc(NULL,
2877 cipher_xform->key.length,
2878 RTE_CACHE_LINE_SIZE);
2879 if (session->cipher_key.data == NULL &&
2880 cipher_xform->key.length > 0) {
2881 DPAA_SEC_ERR("No Memory for cipher key");
2884 session->cipher_key.length = cipher_xform->key.length;
2885 memcpy(session->cipher_key.data, cipher_xform->key.data,
2886 cipher_xform->key.length);
2887 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2889 session->cipher_alg = cipher_xform->algo;
2891 session->cipher_key.data = NULL;
2892 session->cipher_key.length = 0;
2893 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2894 session->dir = DIR_ENC;
2897 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2898 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2899 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2901 "PDCP Seq Num size should be 5/12 bits for cmode");
2908 switch (auth_xform->algo) {
2909 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2910 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2912 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2913 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2915 case RTE_CRYPTO_AUTH_AES_CMAC:
2916 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2918 case RTE_CRYPTO_AUTH_NULL:
2919 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2922 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2924 rte_free(session->cipher_key.data);
2927 session->auth_key.data = rte_zmalloc(NULL,
2928 auth_xform->key.length,
2929 RTE_CACHE_LINE_SIZE);
2930 if (!session->auth_key.data &&
2931 auth_xform->key.length > 0) {
2932 DPAA_SEC_ERR("No Memory for auth key");
2933 rte_free(session->cipher_key.data);
2936 session->auth_key.length = auth_xform->key.length;
2937 memcpy(session->auth_key.data, auth_xform->key.data,
2938 auth_xform->key.length);
2939 session->auth_alg = auth_xform->algo;
2941 session->auth_key.data = NULL;
2942 session->auth_key.length = 0;
2943 session->auth_alg = 0;
2945 session->pdcp.domain = pdcp_xform->domain;
2946 session->pdcp.bearer = pdcp_xform->bearer;
2947 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2948 session->pdcp.sn_size = pdcp_xform->sn_size;
2949 session->pdcp.hfn = pdcp_xform->hfn;
2950 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2951 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2952 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2954 rte_spinlock_lock(&dev_priv->lock);
2955 for (i = 0; i < MAX_DPAA_CORES; i++) {
2956 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2957 if (session->inq[i] == NULL) {
2958 DPAA_SEC_ERR("unable to attach sec queue");
2959 rte_spinlock_unlock(&dev_priv->lock);
2964 rte_spinlock_unlock(&dev_priv->lock);
2967 rte_free(session->auth_key.data);
2968 rte_free(session->cipher_key.data);
2969 memset(session, 0, sizeof(dpaa_sec_session));
2974 dpaa_sec_security_session_create(void *dev,
2975 struct rte_security_session_conf *conf,
2976 struct rte_security_session *sess,
2977 struct rte_mempool *mempool)
2979 void *sess_private_data;
2980 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2983 if (rte_mempool_get(mempool, &sess_private_data)) {
2984 DPAA_SEC_ERR("Couldn't get object from session mempool");
2988 switch (conf->protocol) {
2989 case RTE_SECURITY_PROTOCOL_IPSEC:
2990 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2993 case RTE_SECURITY_PROTOCOL_PDCP:
2994 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2997 case RTE_SECURITY_PROTOCOL_MACSEC:
3003 DPAA_SEC_ERR("failed to configure session parameters");
3004 /* Return session to mempool */
3005 rte_mempool_put(mempool, sess_private_data);
3009 set_sec_session_private_data(sess, sess_private_data);
3014 /** Clear the memory of session so it doesn't leave key material behind */
3016 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3017 struct rte_security_session *sess)
3019 PMD_INIT_FUNC_TRACE();
3020 void *sess_priv = get_sec_session_private_data(sess);
3021 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3024 free_session_memory((struct rte_cryptodev *)dev, s);
3025 set_sec_session_private_data(sess, NULL);
3031 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3032 struct rte_cryptodev_config *config __rte_unused)
3034 PMD_INIT_FUNC_TRACE();
3040 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3042 PMD_INIT_FUNC_TRACE();
3047 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3049 PMD_INIT_FUNC_TRACE();
3053 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3055 PMD_INIT_FUNC_TRACE();
3064 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3065 struct rte_cryptodev_info *info)
3067 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3069 PMD_INIT_FUNC_TRACE();
3071 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3072 info->feature_flags = dev->feature_flags;
3073 info->capabilities = dpaa_sec_capabilities;
3074 info->sym.max_nb_sessions = internals->max_nb_sessions;
3075 info->driver_id = cryptodev_driver_id;
3079 static enum qman_cb_dqrr_result
3080 dpaa_sec_process_parallel_event(void *event,
3081 struct qman_portal *qm __always_unused,
3082 struct qman_fq *outq,
3083 const struct qm_dqrr_entry *dqrr,
3086 const struct qm_fd *fd;
3087 struct dpaa_sec_job *job;
3088 struct dpaa_sec_op_ctx *ctx;
3089 struct rte_event *ev = (struct rte_event *)event;
3093 /* sg is embedded in an op ctx,
3094 * sg[0] is for output
3097 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3099 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3100 ctx->fd_status = fd->status;
3101 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3102 struct qm_sg_entry *sg_out;
3105 sg_out = &job->sg[0];
3106 hw_sg_to_cpu(sg_out);
3107 len = sg_out->length;
3108 ctx->op->sym->m_src->pkt_len = len;
3109 ctx->op->sym->m_src->data_len = len;
3111 if (!ctx->fd_status) {
3112 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3114 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3115 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3117 ev->event_ptr = (void *)ctx->op;
3119 ev->flow_id = outq->ev.flow_id;
3120 ev->sub_event_type = outq->ev.sub_event_type;
3121 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3122 ev->op = RTE_EVENT_OP_NEW;
3123 ev->sched_type = outq->ev.sched_type;
3124 ev->queue_id = outq->ev.queue_id;
3125 ev->priority = outq->ev.priority;
3126 *bufs = (void *)ctx->op;
3128 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3130 return qman_cb_dqrr_consume;
3133 static enum qman_cb_dqrr_result
3134 dpaa_sec_process_atomic_event(void *event,
3135 struct qman_portal *qm __rte_unused,
3136 struct qman_fq *outq,
3137 const struct qm_dqrr_entry *dqrr,
3141 const struct qm_fd *fd;
3142 struct dpaa_sec_job *job;
3143 struct dpaa_sec_op_ctx *ctx;
3144 struct rte_event *ev = (struct rte_event *)event;
3148 /* sg is embedded in an op ctx,
3149 * sg[0] is for output
3152 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3154 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3155 ctx->fd_status = fd->status;
3156 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3157 struct qm_sg_entry *sg_out;
3160 sg_out = &job->sg[0];
3161 hw_sg_to_cpu(sg_out);
3162 len = sg_out->length;
3163 ctx->op->sym->m_src->pkt_len = len;
3164 ctx->op->sym->m_src->data_len = len;
3166 if (!ctx->fd_status) {
3167 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3169 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3170 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3172 ev->event_ptr = (void *)ctx->op;
3173 ev->flow_id = outq->ev.flow_id;
3174 ev->sub_event_type = outq->ev.sub_event_type;
3175 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3176 ev->op = RTE_EVENT_OP_NEW;
3177 ev->sched_type = outq->ev.sched_type;
3178 ev->queue_id = outq->ev.queue_id;
3179 ev->priority = outq->ev.priority;
3181 /* Save active dqrr entries */
3182 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3183 DPAA_PER_LCORE_DQRR_SIZE++;
3184 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3185 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3186 ev->impl_opaque = index + 1;
3187 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3188 *bufs = (void *)ctx->op;
3190 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3192 return qman_cb_dqrr_defer;
3196 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3199 const struct rte_event *event)
3201 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3202 struct qm_mcc_initfq opts = {0};
3206 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3207 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3208 opts.fqd.dest.channel = ch_id;
3210 switch (event->sched_type) {
3211 case RTE_SCHED_TYPE_ATOMIC:
3212 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3213 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3214 * configuration with HOLD_ACTIVE setting
3216 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3217 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3219 case RTE_SCHED_TYPE_ORDERED:
3220 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3223 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3224 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3228 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3229 if (unlikely(ret)) {
3230 DPAA_SEC_ERR("unable to init caam source fq!");
3234 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3240 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3243 struct qm_mcc_initfq opts = {0};
3245 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3247 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3248 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3249 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3250 qp->outq.cb.ern = ern_sec_fq_handler;
3251 qman_retire_fq(&qp->outq, NULL);
3252 qman_oos_fq(&qp->outq);
3253 ret = qman_init_fq(&qp->outq, 0, &opts);
3255 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3256 qp->outq.cb.dqrr = NULL;
3261 static struct rte_cryptodev_ops crypto_ops = {
3262 .dev_configure = dpaa_sec_dev_configure,
3263 .dev_start = dpaa_sec_dev_start,
3264 .dev_stop = dpaa_sec_dev_stop,
3265 .dev_close = dpaa_sec_dev_close,
3266 .dev_infos_get = dpaa_sec_dev_infos_get,
3267 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3268 .queue_pair_release = dpaa_sec_queue_pair_release,
3269 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3270 .sym_session_configure = dpaa_sec_sym_session_configure,
3271 .sym_session_clear = dpaa_sec_sym_session_clear
3274 #ifdef RTE_LIBRTE_SECURITY
3275 static const struct rte_security_capability *
3276 dpaa_sec_capabilities_get(void *device __rte_unused)
3278 return dpaa_sec_security_cap;
3281 static const struct rte_security_ops dpaa_sec_security_ops = {
3282 .session_create = dpaa_sec_security_session_create,
3283 .session_update = NULL,
3284 .session_stats_get = NULL,
3285 .session_destroy = dpaa_sec_security_session_destroy,
3286 .set_pkt_metadata = NULL,
3287 .capabilities_get = dpaa_sec_capabilities_get
3291 dpaa_sec_uninit(struct rte_cryptodev *dev)
3293 struct dpaa_sec_dev_private *internals;
3298 internals = dev->data->dev_private;
3299 rte_free(dev->security_ctx);
3301 rte_free(internals);
3303 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3304 dev->data->name, rte_socket_id());
3310 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3312 struct dpaa_sec_dev_private *internals;
3313 #ifdef RTE_LIBRTE_SECURITY
3314 struct rte_security_ctx *security_instance;
3316 struct dpaa_sec_qp *qp;
3320 PMD_INIT_FUNC_TRACE();
3322 cryptodev->driver_id = cryptodev_driver_id;
3323 cryptodev->dev_ops = &crypto_ops;
3325 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3326 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3327 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3328 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3329 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3330 RTE_CRYPTODEV_FF_SECURITY |
3331 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3332 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3333 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3334 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3335 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3337 internals = cryptodev->data->dev_private;
3338 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3339 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3342 * For secondary processes, we don't initialise any further as primary
3343 * has already done this work. Only check we don't need a different
3346 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3347 DPAA_SEC_WARN("Device already init by primary process");
3350 #ifdef RTE_LIBRTE_SECURITY
3351 /* Initialize security_ctx only for primary process*/
3352 security_instance = rte_malloc("rte_security_instances_ops",
3353 sizeof(struct rte_security_ctx), 0);
3354 if (security_instance == NULL)
3356 security_instance->device = (void *)cryptodev;
3357 security_instance->ops = &dpaa_sec_security_ops;
3358 security_instance->sess_cnt = 0;
3359 cryptodev->security_ctx = security_instance;
3361 rte_spinlock_init(&internals->lock);
3362 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3363 /* init qman fq for queue pair */
3364 qp = &internals->qps[i];
3365 ret = dpaa_sec_init_tx(&qp->outq);
3367 DPAA_SEC_ERR("config tx of queue pair %d", i);
3372 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3373 QMAN_FQ_FLAG_TO_DCPORTAL;
3374 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3375 /* create rx qman fq for sessions*/
3376 ret = qman_create_fq(0, flags, &internals->inq[i]);
3377 if (unlikely(ret != 0)) {
3378 DPAA_SEC_ERR("sec qman_create_fq failed");
3383 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3387 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3389 rte_free(cryptodev->security_ctx);
3394 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3395 struct rte_dpaa_device *dpaa_dev)
3397 struct rte_cryptodev *cryptodev;
3398 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3402 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3404 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3405 if (cryptodev == NULL)
3408 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3409 cryptodev->data->dev_private = rte_zmalloc_socket(
3410 "cryptodev private structure",
3411 sizeof(struct dpaa_sec_dev_private),
3412 RTE_CACHE_LINE_SIZE,
3415 if (cryptodev->data->dev_private == NULL)
3416 rte_panic("Cannot allocate memzone for private "
3420 dpaa_dev->crypto_dev = cryptodev;
3421 cryptodev->device = &dpaa_dev->device;
3423 /* init user callbacks */
3424 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3426 /* if sec device version is not configured */
3427 if (!rta_get_sec_era()) {
3428 const struct device_node *caam_node;
3430 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3431 const uint32_t *prop = of_get_property(caam_node,
3436 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3442 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3443 retval = rte_dpaa_portal_init((void *)1);
3445 DPAA_SEC_ERR("Unable to initialize portal");
3450 /* Invoke PMD device initialization function */
3451 retval = dpaa_sec_dev_init(cryptodev);
3457 /* In case of error, cleanup is done */
3458 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3459 rte_free(cryptodev->data->dev_private);
3461 rte_cryptodev_pmd_release_device(cryptodev);
3467 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3469 struct rte_cryptodev *cryptodev;
3472 cryptodev = dpaa_dev->crypto_dev;
3473 if (cryptodev == NULL)
3476 ret = dpaa_sec_uninit(cryptodev);
3480 return rte_cryptodev_pmd_destroy(cryptodev);
3483 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3484 .drv_type = FSL_DPAA_CRYPTO,
3486 .name = "DPAA SEC PMD"
3488 .probe = cryptodev_dpaa_sec_probe,
3489 .remove = cryptodev_dpaa_sec_remove,
3492 static struct cryptodev_driver dpaa_sec_crypto_drv;
3494 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3495 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3496 cryptodev_driver_id);
3497 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);