1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (rta_inline_pdcp_query(authdata.algtype,
269 ses->pdcp.hfn_ovd)) {
271 (size_t)rte_dpaa_mem_vtop((void *)
272 (size_t)cipherdata.key);
273 cipherdata.key_type = RTA_DATA_PTR;
276 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 if (ses->dir == DIR_ENC)
278 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 cdb->sh_desc, 1, swap,
284 ses->pdcp.hfn_threshold,
285 &cipherdata, &authdata,
287 else if (ses->dir == DIR_DEC)
288 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 cdb->sh_desc, 1, swap,
294 ses->pdcp.hfn_threshold,
295 &cipherdata, &authdata,
298 if (ses->dir == DIR_ENC) {
299 if (ses->pdcp.sdap_enabled)
301 cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 cdb->sh_desc, 1, swap,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, p_authdata, 0);
311 cnstr_shdsc_pdcp_u_plane_encap(
312 cdb->sh_desc, 1, swap,
317 ses->pdcp.hfn_threshold,
318 &cipherdata, p_authdata, 0);
319 } else if (ses->dir == DIR_DEC) {
320 if (ses->pdcp.sdap_enabled)
322 cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, p_authdata, 0);
332 cnstr_shdsc_pdcp_u_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, p_authdata, 0);
342 return shared_desc_len;
345 /* prepare ipsec proto command block of the session */
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
349 struct alginfo cipherdata = {0}, authdata = {0};
350 struct sec_cdb *cdb = &ses->cdb;
351 int32_t shared_desc_len = 0;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
359 cipherdata.key = (size_t)ses->cipher_key.data;
360 cipherdata.keylen = ses->cipher_key.length;
361 cipherdata.key_enc_flags = 0;
362 cipherdata.key_type = RTA_DATA_IMM;
363 cipherdata.algtype = ses->cipher_key.alg;
364 cipherdata.algmode = ses->cipher_key.algmode;
366 if (ses->auth_key.length) {
367 authdata.key = (size_t)ses->auth_key.data;
368 authdata.keylen = ses->auth_key.length;
369 authdata.key_enc_flags = 0;
370 authdata.key_type = RTA_DATA_IMM;
371 authdata.algtype = ses->auth_key.alg;
372 authdata.algmode = ses->auth_key.algmode;
375 cdb->sh_desc[0] = cipherdata.keylen;
376 cdb->sh_desc[1] = authdata.keylen;
377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
379 (unsigned int *)cdb->sh_desc,
380 &cdb->sh_desc[2], 2);
383 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
386 if (cdb->sh_desc[2] & 1)
387 cipherdata.key_type = RTA_DATA_IMM;
389 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 (void *)(size_t)cipherdata.key);
391 cipherdata.key_type = RTA_DATA_PTR;
393 if (cdb->sh_desc[2] & (1<<1))
394 authdata.key_type = RTA_DATA_IMM;
396 authdata.key = (size_t)rte_dpaa_mem_vtop(
397 (void *)(size_t)authdata.key);
398 authdata.key_type = RTA_DATA_PTR;
404 if (ses->dir == DIR_ENC) {
405 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
407 true, swap, SHR_SERIAL,
409 (uint8_t *)&ses->ip4_hdr,
410 &cipherdata, &authdata);
411 } else if (ses->dir == DIR_DEC) {
412 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
414 true, swap, SHR_SERIAL,
416 &cipherdata, &authdata);
418 return shared_desc_len;
421 /* prepare command block of the session */
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
425 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 int32_t shared_desc_len = 0;
427 struct sec_cdb *cdb = &ses->cdb;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
435 memset(cdb, 0, sizeof(struct sec_cdb));
438 #ifdef RTE_LIB_SECURITY
440 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
443 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
446 case DPAA_SEC_CIPHER:
447 alginfo_c.key = (size_t)ses->cipher_key.data;
448 alginfo_c.keylen = ses->cipher_key.length;
449 alginfo_c.key_enc_flags = 0;
450 alginfo_c.key_type = RTA_DATA_IMM;
451 alginfo_c.algtype = ses->cipher_key.alg;
452 alginfo_c.algmode = ses->cipher_key.algmode;
454 switch (ses->cipher_alg) {
455 case RTE_CRYPTO_CIPHER_AES_CBC:
456 case RTE_CRYPTO_CIPHER_3DES_CBC:
457 case RTE_CRYPTO_CIPHER_DES_CBC:
458 case RTE_CRYPTO_CIPHER_AES_CTR:
459 case RTE_CRYPTO_CIPHER_3DES_CTR:
460 shared_desc_len = cnstr_shdsc_blkcipher(
462 swap, SHR_NEVER, &alginfo_c,
466 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
467 shared_desc_len = cnstr_shdsc_snow_f8(
468 cdb->sh_desc, true, swap,
472 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
473 shared_desc_len = cnstr_shdsc_zuce(
474 cdb->sh_desc, true, swap,
479 DPAA_SEC_ERR("unsupported cipher alg %d",
485 alginfo_a.key = (size_t)ses->auth_key.data;
486 alginfo_a.keylen = ses->auth_key.length;
487 alginfo_a.key_enc_flags = 0;
488 alginfo_a.key_type = RTA_DATA_IMM;
489 alginfo_a.algtype = ses->auth_key.alg;
490 alginfo_a.algmode = ses->auth_key.algmode;
491 switch (ses->auth_alg) {
492 case RTE_CRYPTO_AUTH_MD5_HMAC:
493 case RTE_CRYPTO_AUTH_SHA1_HMAC:
494 case RTE_CRYPTO_AUTH_SHA224_HMAC:
495 case RTE_CRYPTO_AUTH_SHA256_HMAC:
496 case RTE_CRYPTO_AUTH_SHA384_HMAC:
497 case RTE_CRYPTO_AUTH_SHA512_HMAC:
498 shared_desc_len = cnstr_shdsc_hmac(
500 swap, SHR_NEVER, &alginfo_a,
504 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
505 shared_desc_len = cnstr_shdsc_snow_f9(
506 cdb->sh_desc, true, swap,
511 case RTE_CRYPTO_AUTH_ZUC_EIA3:
512 shared_desc_len = cnstr_shdsc_zuca(
513 cdb->sh_desc, true, swap,
519 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
523 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
524 DPAA_SEC_ERR("not supported aead alg");
527 alginfo.key = (size_t)ses->aead_key.data;
528 alginfo.keylen = ses->aead_key.length;
529 alginfo.key_enc_flags = 0;
530 alginfo.key_type = RTA_DATA_IMM;
531 alginfo.algtype = ses->aead_key.alg;
532 alginfo.algmode = ses->aead_key.algmode;
534 if (ses->dir == DIR_ENC)
535 shared_desc_len = cnstr_shdsc_gcm_encap(
536 cdb->sh_desc, true, swap, SHR_NEVER,
541 shared_desc_len = cnstr_shdsc_gcm_decap(
542 cdb->sh_desc, true, swap, SHR_NEVER,
547 case DPAA_SEC_CIPHER_HASH:
548 alginfo_c.key = (size_t)ses->cipher_key.data;
549 alginfo_c.keylen = ses->cipher_key.length;
550 alginfo_c.key_enc_flags = 0;
551 alginfo_c.key_type = RTA_DATA_IMM;
552 alginfo_c.algtype = ses->cipher_key.alg;
553 alginfo_c.algmode = ses->cipher_key.algmode;
555 alginfo_a.key = (size_t)ses->auth_key.data;
556 alginfo_a.keylen = ses->auth_key.length;
557 alginfo_a.key_enc_flags = 0;
558 alginfo_a.key_type = RTA_DATA_IMM;
559 alginfo_a.algtype = ses->auth_key.alg;
560 alginfo_a.algmode = ses->auth_key.algmode;
562 cdb->sh_desc[0] = alginfo_c.keylen;
563 cdb->sh_desc[1] = alginfo_a.keylen;
564 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
566 (unsigned int *)cdb->sh_desc,
567 &cdb->sh_desc[2], 2);
570 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
573 if (cdb->sh_desc[2] & 1)
574 alginfo_c.key_type = RTA_DATA_IMM;
576 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
577 (void *)(size_t)alginfo_c.key);
578 alginfo_c.key_type = RTA_DATA_PTR;
580 if (cdb->sh_desc[2] & (1<<1))
581 alginfo_a.key_type = RTA_DATA_IMM;
583 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
584 (void *)(size_t)alginfo_a.key);
585 alginfo_a.key_type = RTA_DATA_PTR;
590 /* Auth_only_len is set as 0 here and it will be
591 * overwritten in fd for each packet.
593 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
594 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
596 ses->digest_length, ses->dir);
598 case DPAA_SEC_HASH_CIPHER:
600 DPAA_SEC_ERR("error: Unsupported session");
604 if (shared_desc_len < 0) {
605 DPAA_SEC_ERR("error in preparing command block");
606 return shared_desc_len;
609 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
610 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
611 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
616 /* qp is lockless, should be accessed by only one thread */
618 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
621 unsigned int pkts = 0;
622 int num_rx_bufs, ret;
623 struct qm_dqrr_entry *dq;
624 uint32_t vdqcr_flags = 0;
628 * Until request for four buffers, we provide exact number of buffers.
629 * Otherwise we do not set the QM_VDQCR_EXACT flag.
630 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
631 * requested, so we request two less in this case.
634 vdqcr_flags = QM_VDQCR_EXACT;
635 num_rx_bufs = nb_ops;
637 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
638 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
640 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
645 const struct qm_fd *fd;
646 struct dpaa_sec_job *job;
647 struct dpaa_sec_op_ctx *ctx;
648 struct rte_crypto_op *op;
650 dq = qman_dequeue(fq);
655 /* sg is embedded in an op ctx,
656 * sg[0] is for output
659 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
661 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
662 ctx->fd_status = fd->status;
664 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
665 struct qm_sg_entry *sg_out;
667 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
668 op->sym->m_src : op->sym->m_dst;
670 sg_out = &job->sg[0];
671 hw_sg_to_cpu(sg_out);
672 len = sg_out->length;
674 while (mbuf->next != NULL) {
675 len -= mbuf->data_len;
678 mbuf->data_len = len;
680 if (!ctx->fd_status) {
681 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
683 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
684 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
688 /* report op status to sym->op and then free the ctx memeory */
689 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
691 qman_dqrr_consume(fq, dq);
692 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
697 static inline struct dpaa_sec_job *
698 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
700 struct rte_crypto_sym_op *sym = op->sym;
701 struct rte_mbuf *mbuf = sym->m_src;
702 struct dpaa_sec_job *cf;
703 struct dpaa_sec_op_ctx *ctx;
704 struct qm_sg_entry *sg, *out_sg, *in_sg;
705 phys_addr_t start_addr;
706 uint8_t *old_digest, extra_segs;
707 int data_len, data_offset;
709 data_len = sym->auth.data.length;
710 data_offset = sym->auth.data.offset;
712 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
713 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
714 if ((data_len & 7) || (data_offset & 7)) {
715 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
719 data_len = data_len >> 3;
720 data_offset = data_offset >> 3;
728 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
729 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
733 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
739 old_digest = ctx->digest;
743 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
744 out_sg->length = ses->digest_length;
745 cpu_to_hw_sg(out_sg);
749 /* need to extend the input to a compound frame */
750 in_sg->extension = 1;
752 in_sg->length = data_len;
753 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
758 if (ses->iv.length) {
761 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
764 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
765 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
767 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
768 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
771 sg->length = ses->iv.length;
773 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
774 in_sg->length += sg->length;
779 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
780 sg->offset = data_offset;
782 if (data_len <= (mbuf->data_len - data_offset)) {
783 sg->length = data_len;
785 sg->length = mbuf->data_len - data_offset;
787 /* remaining i/p segs */
788 while ((data_len = data_len - sg->length) &&
789 (mbuf = mbuf->next)) {
792 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
793 if (data_len > mbuf->data_len)
794 sg->length = mbuf->data_len;
796 sg->length = data_len;
800 if (is_decode(ses)) {
801 /* Digest verification case */
804 rte_memcpy(old_digest, sym->auth.digest.data,
806 start_addr = rte_dpaa_mem_vtop(old_digest);
807 qm_sg_entry_set64(sg, start_addr);
808 sg->length = ses->digest_length;
809 in_sg->length += ses->digest_length;
820 * |<----data_len------->|
821 * |ip_header|ah_header|icv|payload|
826 static inline struct dpaa_sec_job *
827 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
829 struct rte_crypto_sym_op *sym = op->sym;
830 struct rte_mbuf *mbuf = sym->m_src;
831 struct dpaa_sec_job *cf;
832 struct dpaa_sec_op_ctx *ctx;
833 struct qm_sg_entry *sg, *in_sg;
834 rte_iova_t start_addr;
836 int data_len, data_offset;
838 data_len = sym->auth.data.length;
839 data_offset = sym->auth.data.offset;
841 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
842 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
843 if ((data_len & 7) || (data_offset & 7)) {
844 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
848 data_len = data_len >> 3;
849 data_offset = data_offset >> 3;
852 ctx = dpaa_sec_alloc_ctx(ses, 4);
858 old_digest = ctx->digest;
860 start_addr = rte_pktmbuf_iova(mbuf);
863 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
864 sg->length = ses->digest_length;
869 /* need to extend the input to a compound frame */
870 in_sg->extension = 1;
872 in_sg->length = data_len;
873 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
876 if (ses->iv.length) {
879 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
882 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
883 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
885 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
886 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
889 sg->length = ses->iv.length;
891 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
892 in_sg->length += sg->length;
897 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
898 sg->offset = data_offset;
899 sg->length = data_len;
901 if (is_decode(ses)) {
902 /* Digest verification case */
904 /* hash result or digest, save digest first */
905 rte_memcpy(old_digest, sym->auth.digest.data,
907 /* let's check digest by hw */
908 start_addr = rte_dpaa_mem_vtop(old_digest);
910 qm_sg_entry_set64(sg, start_addr);
911 sg->length = ses->digest_length;
912 in_sg->length += ses->digest_length;
921 static inline struct dpaa_sec_job *
922 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
924 struct rte_crypto_sym_op *sym = op->sym;
925 struct dpaa_sec_job *cf;
926 struct dpaa_sec_op_ctx *ctx;
927 struct qm_sg_entry *sg, *out_sg, *in_sg;
928 struct rte_mbuf *mbuf;
930 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932 int data_len, data_offset;
934 data_len = sym->cipher.data.length;
935 data_offset = sym->cipher.data.offset;
937 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
938 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
939 if ((data_len & 7) || (data_offset & 7)) {
940 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
944 data_len = data_len >> 3;
945 data_offset = data_offset >> 3;
950 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
953 req_segs = mbuf->nb_segs * 2 + 3;
955 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
956 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
961 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
970 out_sg->extension = 1;
971 out_sg->length = data_len;
972 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
973 cpu_to_hw_sg(out_sg);
977 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
978 sg->length = mbuf->data_len - data_offset;
979 sg->offset = data_offset;
981 /* Successive segs */
986 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
987 sg->length = mbuf->data_len;
996 in_sg->extension = 1;
998 in_sg->length = data_len + ses->iv.length;
1001 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1002 cpu_to_hw_sg(in_sg);
1005 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1006 sg->length = ses->iv.length;
1011 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1012 sg->length = mbuf->data_len - data_offset;
1013 sg->offset = data_offset;
1015 /* Successive segs */
1020 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1021 sg->length = mbuf->data_len;
1030 static inline struct dpaa_sec_job *
1031 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1033 struct rte_crypto_sym_op *sym = op->sym;
1034 struct dpaa_sec_job *cf;
1035 struct dpaa_sec_op_ctx *ctx;
1036 struct qm_sg_entry *sg;
1037 rte_iova_t src_start_addr, dst_start_addr;
1038 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1040 int data_len, data_offset;
1042 data_len = sym->cipher.data.length;
1043 data_offset = sym->cipher.data.offset;
1045 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1046 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1047 if ((data_len & 7) || (data_offset & 7)) {
1048 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1052 data_len = data_len >> 3;
1053 data_offset = data_offset >> 3;
1056 ctx = dpaa_sec_alloc_ctx(ses, 4);
1063 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1066 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1068 dst_start_addr = src_start_addr;
1072 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1073 sg->length = data_len + ses->iv.length;
1079 /* need to extend the input to a compound frame */
1082 sg->length = data_len + ses->iv.length;
1083 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1087 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1088 sg->length = ses->iv.length;
1092 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1093 sg->length = data_len;
1100 static inline struct dpaa_sec_job *
1101 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1103 struct rte_crypto_sym_op *sym = op->sym;
1104 struct dpaa_sec_job *cf;
1105 struct dpaa_sec_op_ctx *ctx;
1106 struct qm_sg_entry *sg, *out_sg, *in_sg;
1107 struct rte_mbuf *mbuf;
1109 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1114 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1117 req_segs = mbuf->nb_segs * 2 + 4;
1120 if (ses->auth_only_len)
1123 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1124 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1129 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1136 rte_prefetch0(cf->sg);
1139 out_sg = &cf->sg[0];
1140 out_sg->extension = 1;
1142 out_sg->length = sym->aead.data.length + ses->digest_length;
1144 out_sg->length = sym->aead.data.length;
1146 /* output sg entries */
1148 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1149 cpu_to_hw_sg(out_sg);
1152 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1153 sg->length = mbuf->data_len - sym->aead.data.offset;
1154 sg->offset = sym->aead.data.offset;
1156 /* Successive segs */
1161 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1162 sg->length = mbuf->data_len;
1165 sg->length -= ses->digest_length;
1167 if (is_encode(ses)) {
1169 /* set auth output */
1171 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1172 sg->length = ses->digest_length;
1180 in_sg->extension = 1;
1183 in_sg->length = ses->iv.length + sym->aead.data.length
1184 + ses->auth_only_len;
1186 in_sg->length = ses->iv.length + sym->aead.data.length
1187 + ses->auth_only_len + ses->digest_length;
1189 /* input sg entries */
1191 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1192 cpu_to_hw_sg(in_sg);
1195 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1196 sg->length = ses->iv.length;
1199 /* 2nd seg auth only */
1200 if (ses->auth_only_len) {
1202 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1203 sg->length = ses->auth_only_len;
1209 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1210 sg->length = mbuf->data_len - sym->aead.data.offset;
1211 sg->offset = sym->aead.data.offset;
1213 /* Successive segs */
1218 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1219 sg->length = mbuf->data_len;
1223 if (is_decode(ses)) {
1226 memcpy(ctx->digest, sym->aead.digest.data,
1227 ses->digest_length);
1228 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1229 sg->length = ses->digest_length;
1237 static inline struct dpaa_sec_job *
1238 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1240 struct rte_crypto_sym_op *sym = op->sym;
1241 struct dpaa_sec_job *cf;
1242 struct dpaa_sec_op_ctx *ctx;
1243 struct qm_sg_entry *sg;
1244 uint32_t length = 0;
1245 rte_iova_t src_start_addr, dst_start_addr;
1246 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1249 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1252 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1254 dst_start_addr = src_start_addr;
1256 ctx = dpaa_sec_alloc_ctx(ses, 7);
1264 rte_prefetch0(cf->sg);
1266 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1267 if (is_encode(ses)) {
1268 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1269 sg->length = ses->iv.length;
1270 length += sg->length;
1274 if (ses->auth_only_len) {
1275 qm_sg_entry_set64(sg,
1276 rte_dpaa_mem_vtop(sym->aead.aad.data));
1277 sg->length = ses->auth_only_len;
1278 length += sg->length;
1282 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1283 sg->length = sym->aead.data.length;
1284 length += sg->length;
1288 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1289 sg->length = ses->iv.length;
1290 length += sg->length;
1294 if (ses->auth_only_len) {
1295 qm_sg_entry_set64(sg,
1296 rte_dpaa_mem_vtop(sym->aead.aad.data));
1297 sg->length = ses->auth_only_len;
1298 length += sg->length;
1302 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1303 sg->length = sym->aead.data.length;
1304 length += sg->length;
1307 memcpy(ctx->digest, sym->aead.digest.data,
1308 ses->digest_length);
1311 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1312 sg->length = ses->digest_length;
1313 length += sg->length;
1317 /* input compound frame */
1318 cf->sg[1].length = length;
1319 cf->sg[1].extension = 1;
1320 cf->sg[1].final = 1;
1321 cpu_to_hw_sg(&cf->sg[1]);
1325 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1326 qm_sg_entry_set64(sg,
1327 dst_start_addr + sym->aead.data.offset);
1328 sg->length = sym->aead.data.length;
1329 length = sg->length;
1330 if (is_encode(ses)) {
1332 /* set auth output */
1334 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1335 sg->length = ses->digest_length;
1336 length += sg->length;
1341 /* output compound frame */
1342 cf->sg[0].length = length;
1343 cf->sg[0].extension = 1;
1344 cpu_to_hw_sg(&cf->sg[0]);
1349 static inline struct dpaa_sec_job *
1350 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1352 struct rte_crypto_sym_op *sym = op->sym;
1353 struct dpaa_sec_job *cf;
1354 struct dpaa_sec_op_ctx *ctx;
1355 struct qm_sg_entry *sg, *out_sg, *in_sg;
1356 struct rte_mbuf *mbuf;
1358 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1363 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1366 req_segs = mbuf->nb_segs * 2 + 4;
1369 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1370 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1375 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1382 rte_prefetch0(cf->sg);
1385 out_sg = &cf->sg[0];
1386 out_sg->extension = 1;
1388 out_sg->length = sym->auth.data.length + ses->digest_length;
1390 out_sg->length = sym->auth.data.length;
1392 /* output sg entries */
1394 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1395 cpu_to_hw_sg(out_sg);
1398 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1399 sg->length = mbuf->data_len - sym->auth.data.offset;
1400 sg->offset = sym->auth.data.offset;
1402 /* Successive segs */
1407 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1408 sg->length = mbuf->data_len;
1411 sg->length -= ses->digest_length;
1413 if (is_encode(ses)) {
1415 /* set auth output */
1417 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1418 sg->length = ses->digest_length;
1426 in_sg->extension = 1;
1429 in_sg->length = ses->iv.length + sym->auth.data.length;
1431 in_sg->length = ses->iv.length + sym->auth.data.length
1432 + ses->digest_length;
1434 /* input sg entries */
1436 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1437 cpu_to_hw_sg(in_sg);
1440 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1441 sg->length = ses->iv.length;
1446 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1447 sg->length = mbuf->data_len - sym->auth.data.offset;
1448 sg->offset = sym->auth.data.offset;
1450 /* Successive segs */
1455 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1456 sg->length = mbuf->data_len;
1460 sg->length -= ses->digest_length;
1461 if (is_decode(ses)) {
1464 memcpy(ctx->digest, sym->auth.digest.data,
1465 ses->digest_length);
1466 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1467 sg->length = ses->digest_length;
1475 static inline struct dpaa_sec_job *
1476 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1478 struct rte_crypto_sym_op *sym = op->sym;
1479 struct dpaa_sec_job *cf;
1480 struct dpaa_sec_op_ctx *ctx;
1481 struct qm_sg_entry *sg;
1482 rte_iova_t src_start_addr, dst_start_addr;
1483 uint32_t length = 0;
1484 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1487 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1489 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1491 dst_start_addr = src_start_addr;
1493 ctx = dpaa_sec_alloc_ctx(ses, 7);
1501 rte_prefetch0(cf->sg);
1503 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1504 if (is_encode(ses)) {
1505 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1506 sg->length = ses->iv.length;
1507 length += sg->length;
1511 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1512 sg->length = sym->auth.data.length;
1513 length += sg->length;
1517 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1518 sg->length = ses->iv.length;
1519 length += sg->length;
1524 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1525 sg->length = sym->auth.data.length;
1526 length += sg->length;
1529 memcpy(ctx->digest, sym->auth.digest.data,
1530 ses->digest_length);
1533 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1534 sg->length = ses->digest_length;
1535 length += sg->length;
1539 /* input compound frame */
1540 cf->sg[1].length = length;
1541 cf->sg[1].extension = 1;
1542 cf->sg[1].final = 1;
1543 cpu_to_hw_sg(&cf->sg[1]);
1547 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1548 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1549 sg->length = sym->cipher.data.length;
1550 length = sg->length;
1551 if (is_encode(ses)) {
1553 /* set auth output */
1555 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1556 sg->length = ses->digest_length;
1557 length += sg->length;
1562 /* output compound frame */
1563 cf->sg[0].length = length;
1564 cf->sg[0].extension = 1;
1565 cpu_to_hw_sg(&cf->sg[0]);
1570 #ifdef RTE_LIB_SECURITY
1571 static inline struct dpaa_sec_job *
1572 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1574 struct rte_crypto_sym_op *sym = op->sym;
1575 struct dpaa_sec_job *cf;
1576 struct dpaa_sec_op_ctx *ctx;
1577 struct qm_sg_entry *sg;
1578 phys_addr_t src_start_addr, dst_start_addr;
1580 ctx = dpaa_sec_alloc_ctx(ses, 2);
1586 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1589 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1591 dst_start_addr = src_start_addr;
1595 qm_sg_entry_set64(sg, src_start_addr);
1596 sg->length = sym->m_src->pkt_len;
1600 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1603 qm_sg_entry_set64(sg, dst_start_addr);
1604 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1610 static inline struct dpaa_sec_job *
1611 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1613 struct rte_crypto_sym_op *sym = op->sym;
1614 struct dpaa_sec_job *cf;
1615 struct dpaa_sec_op_ctx *ctx;
1616 struct qm_sg_entry *sg, *out_sg, *in_sg;
1617 struct rte_mbuf *mbuf;
1619 uint32_t in_len = 0, out_len = 0;
1626 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1627 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1628 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1633 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1639 out_sg = &cf->sg[0];
1640 out_sg->extension = 1;
1641 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1645 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1648 /* Successive segs */
1649 while (mbuf->next) {
1650 sg->length = mbuf->data_len;
1651 out_len += sg->length;
1655 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1658 sg->length = mbuf->buf_len - mbuf->data_off;
1659 out_len += sg->length;
1663 out_sg->length = out_len;
1664 cpu_to_hw_sg(out_sg);
1669 in_sg->extension = 1;
1671 in_len = mbuf->data_len;
1674 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1677 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1678 sg->length = mbuf->data_len;
1681 /* Successive segs */
1686 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1687 sg->length = mbuf->data_len;
1689 in_len += sg->length;
1695 in_sg->length = in_len;
1696 cpu_to_hw_sg(in_sg);
1698 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1705 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1708 /* Function to transmit the frames to given device and queuepair */
1710 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1711 uint16_t num_tx = 0;
1712 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1713 uint32_t frames_to_send;
1714 struct rte_crypto_op *op;
1715 struct dpaa_sec_job *cf;
1716 dpaa_sec_session *ses;
1717 uint16_t auth_hdr_len, auth_tail_len;
1718 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1719 struct qman_fq *inq[DPAA_SEC_BURST];
1721 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1722 if (rte_dpaa_portal_init((void *)0)) {
1723 DPAA_SEC_ERR("Failure in affining portal");
1729 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1730 DPAA_SEC_BURST : nb_ops;
1731 for (loop = 0; loop < frames_to_send; loop++) {
1733 if (*dpaa_seqn(op->sym->m_src) != 0) {
1734 index = *dpaa_seqn(op->sym->m_src) - 1;
1735 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1736 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1737 flags[loop] = ((index & 0x0f) << 8);
1738 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1739 DPAA_PER_LCORE_DQRR_SIZE--;
1740 DPAA_PER_LCORE_DQRR_HELD &=
1745 switch (op->sess_type) {
1746 case RTE_CRYPTO_OP_WITH_SESSION:
1747 ses = (dpaa_sec_session *)
1748 get_sym_session_private_data(
1750 cryptodev_driver_id);
1752 #ifdef RTE_LIB_SECURITY
1753 case RTE_CRYPTO_OP_SECURITY_SESSION:
1754 ses = (dpaa_sec_session *)
1755 get_sec_session_private_data(
1756 op->sym->sec_session);
1761 "sessionless crypto op not supported");
1762 frames_to_send = loop;
1768 DPAA_SEC_DP_ERR("session not available");
1769 frames_to_send = loop;
1774 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1775 if (dpaa_sec_attach_sess_q(qp, ses)) {
1776 frames_to_send = loop;
1780 } else if (unlikely(ses->qp[rte_lcore_id() %
1781 MAX_DPAA_CORES] != qp)) {
1782 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1784 ses->qp[rte_lcore_id() %
1785 MAX_DPAA_CORES], qp);
1786 frames_to_send = loop;
1791 auth_hdr_len = op->sym->auth.data.length -
1792 op->sym->cipher.data.length;
1795 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1796 ((op->sym->m_dst == NULL) ||
1797 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1798 switch (ses->ctxt) {
1799 #ifdef RTE_LIB_SECURITY
1801 case DPAA_SEC_IPSEC:
1802 cf = build_proto(op, ses);
1806 cf = build_auth_only(op, ses);
1808 case DPAA_SEC_CIPHER:
1809 cf = build_cipher_only(op, ses);
1812 cf = build_cipher_auth_gcm(op, ses);
1813 auth_hdr_len = ses->auth_only_len;
1815 case DPAA_SEC_CIPHER_HASH:
1817 op->sym->cipher.data.offset
1818 - op->sym->auth.data.offset;
1820 op->sym->auth.data.length
1821 - op->sym->cipher.data.length
1823 cf = build_cipher_auth(op, ses);
1826 DPAA_SEC_DP_ERR("not supported ops");
1827 frames_to_send = loop;
1832 switch (ses->ctxt) {
1833 #ifdef RTE_LIB_SECURITY
1835 case DPAA_SEC_IPSEC:
1836 cf = build_proto_sg(op, ses);
1840 cf = build_auth_only_sg(op, ses);
1842 case DPAA_SEC_CIPHER:
1843 cf = build_cipher_only_sg(op, ses);
1846 cf = build_cipher_auth_gcm_sg(op, ses);
1847 auth_hdr_len = ses->auth_only_len;
1849 case DPAA_SEC_CIPHER_HASH:
1851 op->sym->cipher.data.offset
1852 - op->sym->auth.data.offset;
1854 op->sym->auth.data.length
1855 - op->sym->cipher.data.length
1857 cf = build_cipher_auth_sg(op, ses);
1860 DPAA_SEC_DP_ERR("not supported ops");
1861 frames_to_send = loop;
1866 if (unlikely(!cf)) {
1867 frames_to_send = loop;
1873 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1874 fd->opaque_addr = 0;
1876 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1877 fd->_format1 = qm_fd_compound;
1878 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1880 /* Auth_only_len is set as 0 in descriptor and it is
1881 * overwritten here in the fd.cmd which will update
1884 if (auth_hdr_len || auth_tail_len) {
1885 fd->cmd = 0x80000000;
1887 ((auth_tail_len << 16) | auth_hdr_len);
1890 #ifdef RTE_LIB_SECURITY
1891 /* In case of PDCP, per packet HFN is stored in
1892 * mbuf priv after sym_op.
1894 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1895 fd->cmd = 0x80000000 |
1896 *((uint32_t *)((uint8_t *)op +
1897 ses->pdcp.hfn_ovd_offset));
1898 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1899 *((uint32_t *)((uint8_t *)op +
1900 ses->pdcp.hfn_ovd_offset)),
1907 while (loop < frames_to_send) {
1908 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1909 &flags[loop], frames_to_send - loop);
1911 nb_ops -= frames_to_send;
1912 num_tx += frames_to_send;
1915 dpaa_qp->tx_pkts += num_tx;
1916 dpaa_qp->tx_errs += nb_ops - num_tx;
1922 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1926 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1928 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1929 if (rte_dpaa_portal_init((void *)0)) {
1930 DPAA_SEC_ERR("Failure in affining portal");
1935 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1937 dpaa_qp->rx_pkts += num_rx;
1938 dpaa_qp->rx_errs += nb_ops - num_rx;
1940 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1945 /** Release queue pair */
1947 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1950 struct dpaa_sec_dev_private *internals;
1951 struct dpaa_sec_qp *qp = NULL;
1953 PMD_INIT_FUNC_TRACE();
1955 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1957 internals = dev->data->dev_private;
1958 if (qp_id >= internals->max_nb_queue_pairs) {
1959 DPAA_SEC_ERR("Max supported qpid %d",
1960 internals->max_nb_queue_pairs);
1964 qp = &internals->qps[qp_id];
1965 rte_mempool_free(qp->ctx_pool);
1966 qp->internals = NULL;
1967 dev->data->queue_pairs[qp_id] = NULL;
1972 /** Setup a queue pair */
1974 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1975 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1976 __rte_unused int socket_id)
1978 struct dpaa_sec_dev_private *internals;
1979 struct dpaa_sec_qp *qp = NULL;
1982 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1984 internals = dev->data->dev_private;
1985 if (qp_id >= internals->max_nb_queue_pairs) {
1986 DPAA_SEC_ERR("Max supported qpid %d",
1987 internals->max_nb_queue_pairs);
1991 qp = &internals->qps[qp_id];
1992 qp->internals = internals;
1993 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1994 dev->data->dev_id, qp_id);
1995 if (!qp->ctx_pool) {
1996 qp->ctx_pool = rte_mempool_create((const char *)str,
1999 CTX_POOL_CACHE_SIZE, 0,
2000 NULL, NULL, NULL, NULL,
2002 if (!qp->ctx_pool) {
2003 DPAA_SEC_ERR("%s create failed\n", str);
2007 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2008 dev->data->dev_id, qp_id);
2009 dev->data->queue_pairs[qp_id] = qp;
2014 /** Returns the size of session structure */
2016 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2018 PMD_INIT_FUNC_TRACE();
2020 return sizeof(dpaa_sec_session);
2024 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2025 struct rte_crypto_sym_xform *xform,
2026 dpaa_sec_session *session)
2028 session->ctxt = DPAA_SEC_CIPHER;
2029 session->cipher_alg = xform->cipher.algo;
2030 session->iv.length = xform->cipher.iv.length;
2031 session->iv.offset = xform->cipher.iv.offset;
2032 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2033 RTE_CACHE_LINE_SIZE);
2034 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2035 DPAA_SEC_ERR("No Memory for cipher key");
2038 session->cipher_key.length = xform->cipher.key.length;
2040 memcpy(session->cipher_key.data, xform->cipher.key.data,
2041 xform->cipher.key.length);
2042 switch (xform->cipher.algo) {
2043 case RTE_CRYPTO_CIPHER_AES_CBC:
2044 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2045 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2047 case RTE_CRYPTO_CIPHER_DES_CBC:
2048 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2049 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2051 case RTE_CRYPTO_CIPHER_3DES_CBC:
2052 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2053 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2055 case RTE_CRYPTO_CIPHER_AES_CTR:
2056 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2057 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2059 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2060 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2062 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2063 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2066 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2067 xform->cipher.algo);
2070 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2077 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2078 struct rte_crypto_sym_xform *xform,
2079 dpaa_sec_session *session)
2081 session->ctxt = DPAA_SEC_AUTH;
2082 session->auth_alg = xform->auth.algo;
2083 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2084 RTE_CACHE_LINE_SIZE);
2085 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2086 DPAA_SEC_ERR("No Memory for auth key");
2089 session->auth_key.length = xform->auth.key.length;
2090 session->digest_length = xform->auth.digest_length;
2091 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2092 session->iv.offset = xform->auth.iv.offset;
2093 session->iv.length = xform->auth.iv.length;
2096 memcpy(session->auth_key.data, xform->auth.key.data,
2097 xform->auth.key.length);
2099 switch (xform->auth.algo) {
2100 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2101 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2102 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2104 case RTE_CRYPTO_AUTH_MD5_HMAC:
2105 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2106 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2108 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2109 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2110 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2112 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2113 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2114 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2116 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2117 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2118 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2120 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2121 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2122 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2124 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2125 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2126 session->auth_key.algmode = OP_ALG_AAI_F9;
2128 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2129 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2130 session->auth_key.algmode = OP_ALG_AAI_F9;
2133 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2138 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2145 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2146 struct rte_crypto_sym_xform *xform,
2147 dpaa_sec_session *session)
2150 struct rte_crypto_cipher_xform *cipher_xform;
2151 struct rte_crypto_auth_xform *auth_xform;
2153 session->ctxt = DPAA_SEC_CIPHER_HASH;
2154 if (session->auth_cipher_text) {
2155 cipher_xform = &xform->cipher;
2156 auth_xform = &xform->next->auth;
2158 cipher_xform = &xform->next->cipher;
2159 auth_xform = &xform->auth;
2162 /* Set IV parameters */
2163 session->iv.offset = cipher_xform->iv.offset;
2164 session->iv.length = cipher_xform->iv.length;
2166 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2167 RTE_CACHE_LINE_SIZE);
2168 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2169 DPAA_SEC_ERR("No Memory for cipher key");
2172 session->cipher_key.length = cipher_xform->key.length;
2173 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2174 RTE_CACHE_LINE_SIZE);
2175 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2176 DPAA_SEC_ERR("No Memory for auth key");
2179 session->auth_key.length = auth_xform->key.length;
2180 memcpy(session->cipher_key.data, cipher_xform->key.data,
2181 cipher_xform->key.length);
2182 memcpy(session->auth_key.data, auth_xform->key.data,
2183 auth_xform->key.length);
2185 session->digest_length = auth_xform->digest_length;
2186 session->auth_alg = auth_xform->algo;
2188 switch (auth_xform->algo) {
2189 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2190 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2191 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2193 case RTE_CRYPTO_AUTH_MD5_HMAC:
2194 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2195 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2197 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2198 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2199 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2201 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2202 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2203 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2205 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2206 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2207 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2209 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2210 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2211 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2214 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2219 session->cipher_alg = cipher_xform->algo;
2221 switch (cipher_xform->algo) {
2222 case RTE_CRYPTO_CIPHER_AES_CBC:
2223 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2224 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2226 case RTE_CRYPTO_CIPHER_DES_CBC:
2227 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2228 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2230 case RTE_CRYPTO_CIPHER_3DES_CBC:
2231 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2232 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2234 case RTE_CRYPTO_CIPHER_AES_CTR:
2235 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2236 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2239 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2240 cipher_xform->algo);
2243 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2249 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2250 struct rte_crypto_sym_xform *xform,
2251 dpaa_sec_session *session)
2253 session->aead_alg = xform->aead.algo;
2254 session->ctxt = DPAA_SEC_AEAD;
2255 session->iv.length = xform->aead.iv.length;
2256 session->iv.offset = xform->aead.iv.offset;
2257 session->auth_only_len = xform->aead.aad_length;
2258 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2259 RTE_CACHE_LINE_SIZE);
2260 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2261 DPAA_SEC_ERR("No Memory for aead key\n");
2264 session->aead_key.length = xform->aead.key.length;
2265 session->digest_length = xform->aead.digest_length;
2267 memcpy(session->aead_key.data, xform->aead.key.data,
2268 xform->aead.key.length);
2270 switch (session->aead_alg) {
2271 case RTE_CRYPTO_AEAD_AES_GCM:
2272 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2273 session->aead_key.algmode = OP_ALG_AAI_GCM;
2276 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2280 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2286 static struct qman_fq *
2287 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2291 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2292 if (qi->inq_attach[i] == 0) {
2293 qi->inq_attach[i] = 1;
2297 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2303 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2307 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2308 if (&qi->inq[i] == fq) {
2309 if (qman_retire_fq(fq, NULL) != 0)
2310 DPAA_SEC_DEBUG("Queue is not retired\n");
2312 qi->inq_attach[i] = 0;
2320 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2324 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2325 ret = dpaa_sec_prep_cdb(sess);
2327 DPAA_SEC_ERR("Unable to prepare sec cdb");
2330 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2331 ret = rte_dpaa_portal_init((void *)0);
2333 DPAA_SEC_ERR("Failure in affining portal");
2337 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2338 rte_dpaa_mem_vtop(&sess->cdb),
2339 qman_fq_fqid(&qp->outq));
2341 DPAA_SEC_ERR("Unable to init sec queue");
2347 free_session_data(dpaa_sec_session *s)
2350 rte_free(s->aead_key.data);
2352 rte_free(s->auth_key.data);
2353 rte_free(s->cipher_key.data);
2355 memset(s, 0, sizeof(dpaa_sec_session));
2359 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2360 struct rte_crypto_sym_xform *xform, void *sess)
2362 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2363 dpaa_sec_session *session = sess;
2367 PMD_INIT_FUNC_TRACE();
2369 if (unlikely(sess == NULL)) {
2370 DPAA_SEC_ERR("invalid session struct");
2373 memset(session, 0, sizeof(dpaa_sec_session));
2375 /* Default IV length = 0 */
2376 session->iv.length = 0;
2379 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2380 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2381 ret = dpaa_sec_cipher_init(dev, xform, session);
2383 /* Authentication Only */
2384 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2385 xform->next == NULL) {
2386 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2387 session->ctxt = DPAA_SEC_AUTH;
2388 ret = dpaa_sec_auth_init(dev, xform, session);
2390 /* Cipher then Authenticate */
2391 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2392 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2393 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2394 session->auth_cipher_text = 1;
2395 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2396 ret = dpaa_sec_auth_init(dev, xform, session);
2397 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2398 ret = dpaa_sec_cipher_init(dev, xform, session);
2400 ret = dpaa_sec_chain_init(dev, xform, session);
2402 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2405 /* Authenticate then Cipher */
2406 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2407 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2408 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2409 session->auth_cipher_text = 0;
2410 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2411 ret = dpaa_sec_cipher_init(dev, xform, session);
2412 else if (xform->next->cipher.algo
2413 == RTE_CRYPTO_CIPHER_NULL)
2414 ret = dpaa_sec_auth_init(dev, xform, session);
2416 ret = dpaa_sec_chain_init(dev, xform, session);
2418 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2422 /* AEAD operation for AES-GCM kind of Algorithms */
2423 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2424 xform->next == NULL) {
2425 ret = dpaa_sec_aead_init(dev, xform, session);
2428 DPAA_SEC_ERR("Invalid crypto type");
2432 DPAA_SEC_ERR("unable to init session");
2436 rte_spinlock_lock(&internals->lock);
2437 for (i = 0; i < MAX_DPAA_CORES; i++) {
2438 session->inq[i] = dpaa_sec_attach_rxq(internals);
2439 if (session->inq[i] == NULL) {
2440 DPAA_SEC_ERR("unable to attach sec queue");
2441 rte_spinlock_unlock(&internals->lock);
2446 rte_spinlock_unlock(&internals->lock);
2451 free_session_data(session);
2456 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2457 struct rte_crypto_sym_xform *xform,
2458 struct rte_cryptodev_sym_session *sess,
2459 struct rte_mempool *mempool)
2461 void *sess_private_data;
2464 PMD_INIT_FUNC_TRACE();
2466 if (rte_mempool_get(mempool, &sess_private_data)) {
2467 DPAA_SEC_ERR("Couldn't get object from session mempool");
2471 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2473 DPAA_SEC_ERR("failed to configure session parameters");
2475 /* Return session to mempool */
2476 rte_mempool_put(mempool, sess_private_data);
2480 set_sym_session_private_data(sess, dev->driver_id,
2488 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2490 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2491 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2494 for (i = 0; i < MAX_DPAA_CORES; i++) {
2496 dpaa_sec_detach_rxq(qi, s->inq[i]);
2500 free_session_data(s);
2501 rte_mempool_put(sess_mp, (void *)s);
2504 /** Clear the memory of session so it doesn't leave key material behind */
2506 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2507 struct rte_cryptodev_sym_session *sess)
2509 PMD_INIT_FUNC_TRACE();
2510 uint8_t index = dev->driver_id;
2511 void *sess_priv = get_sym_session_private_data(sess, index);
2512 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2515 free_session_memory(dev, s);
2516 set_sym_session_private_data(sess, index, NULL);
2520 #ifdef RTE_LIB_SECURITY
2522 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2523 struct rte_security_ipsec_xform *ipsec_xform,
2524 dpaa_sec_session *session)
2526 PMD_INIT_FUNC_TRACE();
2528 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2529 RTE_CACHE_LINE_SIZE);
2530 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2531 DPAA_SEC_ERR("No Memory for aead key");
2534 memcpy(session->aead_key.data, aead_xform->key.data,
2535 aead_xform->key.length);
2537 session->digest_length = aead_xform->digest_length;
2538 session->aead_key.length = aead_xform->key.length;
2540 switch (aead_xform->algo) {
2541 case RTE_CRYPTO_AEAD_AES_GCM:
2542 switch (session->digest_length) {
2544 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2547 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2550 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2553 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2554 session->digest_length);
2557 if (session->dir == DIR_ENC) {
2558 memcpy(session->encap_pdb.gcm.salt,
2559 (uint8_t *)&(ipsec_xform->salt), 4);
2561 memcpy(session->decap_pdb.gcm.salt,
2562 (uint8_t *)&(ipsec_xform->salt), 4);
2564 session->aead_key.algmode = OP_ALG_AAI_GCM;
2565 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2568 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2576 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2577 struct rte_crypto_auth_xform *auth_xform,
2578 struct rte_security_ipsec_xform *ipsec_xform,
2579 dpaa_sec_session *session)
2582 session->cipher_key.data = rte_zmalloc(NULL,
2583 cipher_xform->key.length,
2584 RTE_CACHE_LINE_SIZE);
2585 if (session->cipher_key.data == NULL &&
2586 cipher_xform->key.length > 0) {
2587 DPAA_SEC_ERR("No Memory for cipher key");
2591 session->cipher_key.length = cipher_xform->key.length;
2592 memcpy(session->cipher_key.data, cipher_xform->key.data,
2593 cipher_xform->key.length);
2594 session->cipher_alg = cipher_xform->algo;
2596 session->cipher_key.data = NULL;
2597 session->cipher_key.length = 0;
2598 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2602 session->auth_key.data = rte_zmalloc(NULL,
2603 auth_xform->key.length,
2604 RTE_CACHE_LINE_SIZE);
2605 if (session->auth_key.data == NULL &&
2606 auth_xform->key.length > 0) {
2607 DPAA_SEC_ERR("No Memory for auth key");
2610 session->auth_key.length = auth_xform->key.length;
2611 memcpy(session->auth_key.data, auth_xform->key.data,
2612 auth_xform->key.length);
2613 session->auth_alg = auth_xform->algo;
2614 session->digest_length = auth_xform->digest_length;
2616 session->auth_key.data = NULL;
2617 session->auth_key.length = 0;
2618 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2621 switch (session->auth_alg) {
2622 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2623 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2624 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2626 case RTE_CRYPTO_AUTH_MD5_HMAC:
2627 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2628 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2630 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2631 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2632 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2633 if (session->digest_length != 16)
2635 "+++Using sha256-hmac truncated len is non-standard,"
2636 "it will not work with lookaside proto");
2638 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2639 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2640 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2642 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2643 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2644 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2646 case RTE_CRYPTO_AUTH_AES_CMAC:
2647 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2649 case RTE_CRYPTO_AUTH_NULL:
2650 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2652 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2653 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2654 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2655 case RTE_CRYPTO_AUTH_SHA1:
2656 case RTE_CRYPTO_AUTH_SHA256:
2657 case RTE_CRYPTO_AUTH_SHA512:
2658 case RTE_CRYPTO_AUTH_SHA224:
2659 case RTE_CRYPTO_AUTH_SHA384:
2660 case RTE_CRYPTO_AUTH_MD5:
2661 case RTE_CRYPTO_AUTH_AES_GMAC:
2662 case RTE_CRYPTO_AUTH_KASUMI_F9:
2663 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2664 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2665 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2669 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2674 switch (session->cipher_alg) {
2675 case RTE_CRYPTO_CIPHER_AES_CBC:
2676 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2677 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2679 case RTE_CRYPTO_CIPHER_DES_CBC:
2680 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2681 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2683 case RTE_CRYPTO_CIPHER_3DES_CBC:
2684 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2685 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2687 case RTE_CRYPTO_CIPHER_AES_CTR:
2688 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2689 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2690 if (session->dir == DIR_ENC) {
2691 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2692 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2694 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2695 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2698 case RTE_CRYPTO_CIPHER_NULL:
2699 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2701 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2702 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2703 case RTE_CRYPTO_CIPHER_3DES_ECB:
2704 case RTE_CRYPTO_CIPHER_AES_ECB:
2705 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2706 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2707 session->cipher_alg);
2710 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2711 session->cipher_alg);
2719 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2720 struct rte_security_session_conf *conf,
2723 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2724 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2725 struct rte_crypto_auth_xform *auth_xform = NULL;
2726 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2727 struct rte_crypto_aead_xform *aead_xform = NULL;
2728 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2732 PMD_INIT_FUNC_TRACE();
2734 memset(session, 0, sizeof(dpaa_sec_session));
2735 session->proto_alg = conf->protocol;
2736 session->ctxt = DPAA_SEC_IPSEC;
2738 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2739 session->dir = DIR_ENC;
2741 session->dir = DIR_DEC;
2743 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2744 cipher_xform = &conf->crypto_xform->cipher;
2745 if (conf->crypto_xform->next)
2746 auth_xform = &conf->crypto_xform->next->auth;
2747 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2748 ipsec_xform, session);
2749 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2750 auth_xform = &conf->crypto_xform->auth;
2751 if (conf->crypto_xform->next)
2752 cipher_xform = &conf->crypto_xform->next->cipher;
2753 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2754 ipsec_xform, session);
2755 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2756 aead_xform = &conf->crypto_xform->aead;
2757 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2758 ipsec_xform, session);
2760 DPAA_SEC_ERR("XFORM not specified");
2765 DPAA_SEC_ERR("Failed to process xform");
2769 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2770 if (ipsec_xform->tunnel.type ==
2771 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2772 session->ip4_hdr.ip_v = IPVERSION;
2773 session->ip4_hdr.ip_hl = 5;
2774 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2775 sizeof(session->ip4_hdr));
2776 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2777 session->ip4_hdr.ip_id = 0;
2778 session->ip4_hdr.ip_off = 0;
2779 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2780 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2781 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2782 IPPROTO_ESP : IPPROTO_AH;
2783 session->ip4_hdr.ip_sum = 0;
2784 session->ip4_hdr.ip_src =
2785 ipsec_xform->tunnel.ipv4.src_ip;
2786 session->ip4_hdr.ip_dst =
2787 ipsec_xform->tunnel.ipv4.dst_ip;
2788 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2789 (void *)&session->ip4_hdr,
2791 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2792 } else if (ipsec_xform->tunnel.type ==
2793 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2794 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2795 DPAA_IPv6_DEFAULT_VTC_FLOW |
2796 ((ipsec_xform->tunnel.ipv6.dscp <<
2797 RTE_IPV6_HDR_TC_SHIFT) &
2798 RTE_IPV6_HDR_TC_MASK) |
2799 ((ipsec_xform->tunnel.ipv6.flabel <<
2800 RTE_IPV6_HDR_FL_SHIFT) &
2801 RTE_IPV6_HDR_FL_MASK));
2802 /* Payload length will be updated by HW */
2803 session->ip6_hdr.payload_len = 0;
2804 session->ip6_hdr.hop_limits =
2805 ipsec_xform->tunnel.ipv6.hlimit;
2806 session->ip6_hdr.proto = (ipsec_xform->proto ==
2807 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2808 IPPROTO_ESP : IPPROTO_AH;
2809 memcpy(&session->ip6_hdr.src_addr,
2810 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2811 memcpy(&session->ip6_hdr.dst_addr,
2812 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2813 session->encap_pdb.ip_hdr_len =
2814 sizeof(struct rte_ipv6_hdr);
2816 session->encap_pdb.options =
2817 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2818 PDBOPTS_ESP_OIHI_PDB_INL |
2820 PDBHMO_ESP_ENCAP_DTTL |
2822 if (ipsec_xform->options.esn)
2823 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2824 session->encap_pdb.spi = ipsec_xform->spi;
2826 } else if (ipsec_xform->direction ==
2827 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2828 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2829 session->decap_pdb.options = sizeof(struct ip) << 16;
2831 session->decap_pdb.options =
2832 sizeof(struct rte_ipv6_hdr) << 16;
2833 if (ipsec_xform->options.esn)
2834 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2835 if (ipsec_xform->replay_win_sz) {
2837 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2846 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2849 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2852 session->decap_pdb.options |=
2858 rte_spinlock_lock(&internals->lock);
2859 for (i = 0; i < MAX_DPAA_CORES; i++) {
2860 session->inq[i] = dpaa_sec_attach_rxq(internals);
2861 if (session->inq[i] == NULL) {
2862 DPAA_SEC_ERR("unable to attach sec queue");
2863 rte_spinlock_unlock(&internals->lock);
2867 rte_spinlock_unlock(&internals->lock);
2871 free_session_data(session);
2876 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2877 struct rte_security_session_conf *conf,
2880 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2881 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2882 struct rte_crypto_auth_xform *auth_xform = NULL;
2883 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2884 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2885 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2889 PMD_INIT_FUNC_TRACE();
2891 memset(session, 0, sizeof(dpaa_sec_session));
2893 /* find xfrm types */
2894 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2895 cipher_xform = &xform->cipher;
2896 if (xform->next != NULL)
2897 auth_xform = &xform->next->auth;
2898 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2899 auth_xform = &xform->auth;
2900 if (xform->next != NULL)
2901 cipher_xform = &xform->next->cipher;
2903 DPAA_SEC_ERR("Invalid crypto type");
2907 session->proto_alg = conf->protocol;
2908 session->ctxt = DPAA_SEC_PDCP;
2911 switch (cipher_xform->algo) {
2912 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2913 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2915 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2916 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2918 case RTE_CRYPTO_CIPHER_AES_CTR:
2919 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2921 case RTE_CRYPTO_CIPHER_NULL:
2922 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2925 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2926 session->cipher_alg);
2930 session->cipher_key.data = rte_zmalloc(NULL,
2931 cipher_xform->key.length,
2932 RTE_CACHE_LINE_SIZE);
2933 if (session->cipher_key.data == NULL &&
2934 cipher_xform->key.length > 0) {
2935 DPAA_SEC_ERR("No Memory for cipher key");
2938 session->cipher_key.length = cipher_xform->key.length;
2939 memcpy(session->cipher_key.data, cipher_xform->key.data,
2940 cipher_xform->key.length);
2941 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2943 session->cipher_alg = cipher_xform->algo;
2945 session->cipher_key.data = NULL;
2946 session->cipher_key.length = 0;
2947 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2948 session->dir = DIR_ENC;
2951 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2952 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2953 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2955 "PDCP Seq Num size should be 5/12 bits for cmode");
2962 switch (auth_xform->algo) {
2963 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2964 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2966 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2967 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2969 case RTE_CRYPTO_AUTH_AES_CMAC:
2970 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2972 case RTE_CRYPTO_AUTH_NULL:
2973 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2976 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2978 rte_free(session->cipher_key.data);
2981 session->auth_key.data = rte_zmalloc(NULL,
2982 auth_xform->key.length,
2983 RTE_CACHE_LINE_SIZE);
2984 if (!session->auth_key.data &&
2985 auth_xform->key.length > 0) {
2986 DPAA_SEC_ERR("No Memory for auth key");
2987 rte_free(session->cipher_key.data);
2990 session->auth_key.length = auth_xform->key.length;
2991 memcpy(session->auth_key.data, auth_xform->key.data,
2992 auth_xform->key.length);
2993 session->auth_alg = auth_xform->algo;
2995 session->auth_key.data = NULL;
2996 session->auth_key.length = 0;
2997 session->auth_alg = 0;
2999 session->pdcp.domain = pdcp_xform->domain;
3000 session->pdcp.bearer = pdcp_xform->bearer;
3001 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3002 session->pdcp.sn_size = pdcp_xform->sn_size;
3003 session->pdcp.hfn = pdcp_xform->hfn;
3004 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3005 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3006 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3008 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3010 rte_spinlock_lock(&dev_priv->lock);
3011 for (i = 0; i < MAX_DPAA_CORES; i++) {
3012 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3013 if (session->inq[i] == NULL) {
3014 DPAA_SEC_ERR("unable to attach sec queue");
3015 rte_spinlock_unlock(&dev_priv->lock);
3020 rte_spinlock_unlock(&dev_priv->lock);
3023 rte_free(session->auth_key.data);
3024 rte_free(session->cipher_key.data);
3025 memset(session, 0, sizeof(dpaa_sec_session));
3030 dpaa_sec_security_session_create(void *dev,
3031 struct rte_security_session_conf *conf,
3032 struct rte_security_session *sess,
3033 struct rte_mempool *mempool)
3035 void *sess_private_data;
3036 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3039 if (rte_mempool_get(mempool, &sess_private_data)) {
3040 DPAA_SEC_ERR("Couldn't get object from session mempool");
3044 switch (conf->protocol) {
3045 case RTE_SECURITY_PROTOCOL_IPSEC:
3046 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3049 case RTE_SECURITY_PROTOCOL_PDCP:
3050 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3053 case RTE_SECURITY_PROTOCOL_MACSEC:
3059 DPAA_SEC_ERR("failed to configure session parameters");
3060 /* Return session to mempool */
3061 rte_mempool_put(mempool, sess_private_data);
3065 set_sec_session_private_data(sess, sess_private_data);
3070 /** Clear the memory of session so it doesn't leave key material behind */
3072 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3073 struct rte_security_session *sess)
3075 PMD_INIT_FUNC_TRACE();
3076 void *sess_priv = get_sec_session_private_data(sess);
3077 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3080 free_session_memory((struct rte_cryptodev *)dev, s);
3081 set_sec_session_private_data(sess, NULL);
3087 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3088 struct rte_cryptodev_config *config __rte_unused)
3090 PMD_INIT_FUNC_TRACE();
3096 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3098 PMD_INIT_FUNC_TRACE();
3103 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3105 PMD_INIT_FUNC_TRACE();
3109 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3111 PMD_INIT_FUNC_TRACE();
3120 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3121 struct rte_cryptodev_info *info)
3123 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3125 PMD_INIT_FUNC_TRACE();
3127 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3128 info->feature_flags = dev->feature_flags;
3129 info->capabilities = dpaa_sec_capabilities;
3130 info->sym.max_nb_sessions = internals->max_nb_sessions;
3131 info->driver_id = cryptodev_driver_id;
3135 static enum qman_cb_dqrr_result
3136 dpaa_sec_process_parallel_event(void *event,
3137 struct qman_portal *qm __always_unused,
3138 struct qman_fq *outq,
3139 const struct qm_dqrr_entry *dqrr,
3142 const struct qm_fd *fd;
3143 struct dpaa_sec_job *job;
3144 struct dpaa_sec_op_ctx *ctx;
3145 struct rte_event *ev = (struct rte_event *)event;
3149 /* sg is embedded in an op ctx,
3150 * sg[0] is for output
3153 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3155 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3156 ctx->fd_status = fd->status;
3157 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3158 struct qm_sg_entry *sg_out;
3161 sg_out = &job->sg[0];
3162 hw_sg_to_cpu(sg_out);
3163 len = sg_out->length;
3164 ctx->op->sym->m_src->pkt_len = len;
3165 ctx->op->sym->m_src->data_len = len;
3167 if (!ctx->fd_status) {
3168 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3170 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3171 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3173 ev->event_ptr = (void *)ctx->op;
3175 ev->flow_id = outq->ev.flow_id;
3176 ev->sub_event_type = outq->ev.sub_event_type;
3177 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3178 ev->op = RTE_EVENT_OP_NEW;
3179 ev->sched_type = outq->ev.sched_type;
3180 ev->queue_id = outq->ev.queue_id;
3181 ev->priority = outq->ev.priority;
3182 *bufs = (void *)ctx->op;
3184 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3186 return qman_cb_dqrr_consume;
3189 static enum qman_cb_dqrr_result
3190 dpaa_sec_process_atomic_event(void *event,
3191 struct qman_portal *qm __rte_unused,
3192 struct qman_fq *outq,
3193 const struct qm_dqrr_entry *dqrr,
3197 const struct qm_fd *fd;
3198 struct dpaa_sec_job *job;
3199 struct dpaa_sec_op_ctx *ctx;
3200 struct rte_event *ev = (struct rte_event *)event;
3204 /* sg is embedded in an op ctx,
3205 * sg[0] is for output
3208 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3210 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3211 ctx->fd_status = fd->status;
3212 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3213 struct qm_sg_entry *sg_out;
3216 sg_out = &job->sg[0];
3217 hw_sg_to_cpu(sg_out);
3218 len = sg_out->length;
3219 ctx->op->sym->m_src->pkt_len = len;
3220 ctx->op->sym->m_src->data_len = len;
3222 if (!ctx->fd_status) {
3223 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3225 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3226 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3228 ev->event_ptr = (void *)ctx->op;
3229 ev->flow_id = outq->ev.flow_id;
3230 ev->sub_event_type = outq->ev.sub_event_type;
3231 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3232 ev->op = RTE_EVENT_OP_NEW;
3233 ev->sched_type = outq->ev.sched_type;
3234 ev->queue_id = outq->ev.queue_id;
3235 ev->priority = outq->ev.priority;
3237 /* Save active dqrr entries */
3238 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3239 DPAA_PER_LCORE_DQRR_SIZE++;
3240 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3241 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3242 ev->impl_opaque = index + 1;
3243 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3244 *bufs = (void *)ctx->op;
3246 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3248 return qman_cb_dqrr_defer;
3252 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3255 const struct rte_event *event)
3257 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3258 struct qm_mcc_initfq opts = {0};
3262 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3263 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3264 opts.fqd.dest.channel = ch_id;
3266 switch (event->sched_type) {
3267 case RTE_SCHED_TYPE_ATOMIC:
3268 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3269 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3270 * configuration with HOLD_ACTIVE setting
3272 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3273 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3275 case RTE_SCHED_TYPE_ORDERED:
3276 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3279 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3280 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3284 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3285 if (unlikely(ret)) {
3286 DPAA_SEC_ERR("unable to init caam source fq!");
3290 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3296 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3299 struct qm_mcc_initfq opts = {0};
3301 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3303 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3304 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3305 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3306 qp->outq.cb.ern = ern_sec_fq_handler;
3307 qman_retire_fq(&qp->outq, NULL);
3308 qman_oos_fq(&qp->outq);
3309 ret = qman_init_fq(&qp->outq, 0, &opts);
3311 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3312 qp->outq.cb.dqrr = NULL;
3317 static struct rte_cryptodev_ops crypto_ops = {
3318 .dev_configure = dpaa_sec_dev_configure,
3319 .dev_start = dpaa_sec_dev_start,
3320 .dev_stop = dpaa_sec_dev_stop,
3321 .dev_close = dpaa_sec_dev_close,
3322 .dev_infos_get = dpaa_sec_dev_infos_get,
3323 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3324 .queue_pair_release = dpaa_sec_queue_pair_release,
3325 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3326 .sym_session_configure = dpaa_sec_sym_session_configure,
3327 .sym_session_clear = dpaa_sec_sym_session_clear
3330 #ifdef RTE_LIB_SECURITY
3331 static const struct rte_security_capability *
3332 dpaa_sec_capabilities_get(void *device __rte_unused)
3334 return dpaa_sec_security_cap;
3337 static const struct rte_security_ops dpaa_sec_security_ops = {
3338 .session_create = dpaa_sec_security_session_create,
3339 .session_update = NULL,
3340 .session_stats_get = NULL,
3341 .session_destroy = dpaa_sec_security_session_destroy,
3342 .set_pkt_metadata = NULL,
3343 .capabilities_get = dpaa_sec_capabilities_get
3347 dpaa_sec_uninit(struct rte_cryptodev *dev)
3349 struct dpaa_sec_dev_private *internals;
3354 internals = dev->data->dev_private;
3355 rte_free(dev->security_ctx);
3357 rte_free(internals);
3359 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3360 dev->data->name, rte_socket_id());
3366 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3368 struct dpaa_sec_dev_private *internals;
3369 #ifdef RTE_LIB_SECURITY
3370 struct rte_security_ctx *security_instance;
3372 struct dpaa_sec_qp *qp;
3376 PMD_INIT_FUNC_TRACE();
3378 cryptodev->driver_id = cryptodev_driver_id;
3379 cryptodev->dev_ops = &crypto_ops;
3381 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3382 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3383 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3384 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3385 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3386 RTE_CRYPTODEV_FF_SECURITY |
3387 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3388 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3389 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3390 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3391 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3393 internals = cryptodev->data->dev_private;
3394 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3395 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3398 * For secondary processes, we don't initialise any further as primary
3399 * has already done this work. Only check we don't need a different
3402 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3403 DPAA_SEC_WARN("Device already init by primary process");
3406 #ifdef RTE_LIB_SECURITY
3407 /* Initialize security_ctx only for primary process*/
3408 security_instance = rte_malloc("rte_security_instances_ops",
3409 sizeof(struct rte_security_ctx), 0);
3410 if (security_instance == NULL)
3412 security_instance->device = (void *)cryptodev;
3413 security_instance->ops = &dpaa_sec_security_ops;
3414 security_instance->sess_cnt = 0;
3415 cryptodev->security_ctx = security_instance;
3417 rte_spinlock_init(&internals->lock);
3418 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3419 /* init qman fq for queue pair */
3420 qp = &internals->qps[i];
3421 ret = dpaa_sec_init_tx(&qp->outq);
3423 DPAA_SEC_ERR("config tx of queue pair %d", i);
3428 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3429 QMAN_FQ_FLAG_TO_DCPORTAL;
3430 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3431 /* create rx qman fq for sessions*/
3432 ret = qman_create_fq(0, flags, &internals->inq[i]);
3433 if (unlikely(ret != 0)) {
3434 DPAA_SEC_ERR("sec qman_create_fq failed");
3439 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3443 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3445 rte_free(cryptodev->security_ctx);
3450 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3451 struct rte_dpaa_device *dpaa_dev)
3453 struct rte_cryptodev *cryptodev;
3454 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3458 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3460 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3461 if (cryptodev == NULL)
3464 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3465 cryptodev->data->dev_private = rte_zmalloc_socket(
3466 "cryptodev private structure",
3467 sizeof(struct dpaa_sec_dev_private),
3468 RTE_CACHE_LINE_SIZE,
3471 if (cryptodev->data->dev_private == NULL)
3472 rte_panic("Cannot allocate memzone for private "
3476 dpaa_dev->crypto_dev = cryptodev;
3477 cryptodev->device = &dpaa_dev->device;
3479 /* init user callbacks */
3480 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3482 /* if sec device version is not configured */
3483 if (!rta_get_sec_era()) {
3484 const struct device_node *caam_node;
3486 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3487 const uint32_t *prop = of_get_property(caam_node,
3492 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3498 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3499 retval = rte_dpaa_portal_init((void *)1);
3501 DPAA_SEC_ERR("Unable to initialize portal");
3506 /* Invoke PMD device initialization function */
3507 retval = dpaa_sec_dev_init(cryptodev);
3513 /* In case of error, cleanup is done */
3514 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3515 rte_free(cryptodev->data->dev_private);
3517 rte_cryptodev_pmd_release_device(cryptodev);
3523 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3525 struct rte_cryptodev *cryptodev;
3528 cryptodev = dpaa_dev->crypto_dev;
3529 if (cryptodev == NULL)
3532 ret = dpaa_sec_uninit(cryptodev);
3536 return rte_cryptodev_pmd_destroy(cryptodev);
3539 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3540 .drv_type = FSL_DPAA_CRYPTO,
3542 .name = "DPAA SEC PMD"
3544 .probe = cryptodev_dpaa_sec_probe,
3545 .remove = cryptodev_dpaa_sec_remove,
3548 static struct cryptodev_driver dpaa_sec_crypto_drv;
3550 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3551 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3552 cryptodev_driver_id);
3553 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);