1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (rta_inline_pdcp_query(authdata.algtype,
269 ses->pdcp.hfn_ovd)) {
271 (size_t)rte_dpaa_mem_vtop((void *)
272 (size_t)cipherdata.key);
273 cipherdata.key_type = RTA_DATA_PTR;
276 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 if (ses->dir == DIR_ENC)
278 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 cdb->sh_desc, 1, swap,
284 ses->pdcp.hfn_threshold,
285 &cipherdata, &authdata,
287 else if (ses->dir == DIR_DEC)
288 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 cdb->sh_desc, 1, swap,
294 ses->pdcp.hfn_threshold,
295 &cipherdata, &authdata,
298 if (ses->dir == DIR_ENC) {
299 if (ses->pdcp.sdap_enabled)
301 cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 cdb->sh_desc, 1, swap,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, p_authdata, 0);
311 cnstr_shdsc_pdcp_u_plane_encap(
312 cdb->sh_desc, 1, swap,
317 ses->pdcp.hfn_threshold,
318 &cipherdata, p_authdata, 0);
319 } else if (ses->dir == DIR_DEC) {
320 if (ses->pdcp.sdap_enabled)
322 cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, p_authdata, 0);
332 cnstr_shdsc_pdcp_u_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, p_authdata, 0);
342 return shared_desc_len;
345 /* prepare ipsec proto command block of the session */
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
349 struct alginfo cipherdata = {0}, authdata = {0};
350 struct sec_cdb *cdb = &ses->cdb;
351 int32_t shared_desc_len = 0;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
359 cipherdata.key = (size_t)ses->cipher_key.data;
360 cipherdata.keylen = ses->cipher_key.length;
361 cipherdata.key_enc_flags = 0;
362 cipherdata.key_type = RTA_DATA_IMM;
363 cipherdata.algtype = ses->cipher_key.alg;
364 cipherdata.algmode = ses->cipher_key.algmode;
366 if (ses->auth_key.length) {
367 authdata.key = (size_t)ses->auth_key.data;
368 authdata.keylen = ses->auth_key.length;
369 authdata.key_enc_flags = 0;
370 authdata.key_type = RTA_DATA_IMM;
371 authdata.algtype = ses->auth_key.alg;
372 authdata.algmode = ses->auth_key.algmode;
375 cdb->sh_desc[0] = cipherdata.keylen;
376 cdb->sh_desc[1] = authdata.keylen;
377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
379 (unsigned int *)cdb->sh_desc,
380 &cdb->sh_desc[2], 2);
383 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
386 if (cdb->sh_desc[2] & 1)
387 cipherdata.key_type = RTA_DATA_IMM;
389 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 (void *)(size_t)cipherdata.key);
391 cipherdata.key_type = RTA_DATA_PTR;
393 if (cdb->sh_desc[2] & (1<<1))
394 authdata.key_type = RTA_DATA_IMM;
396 authdata.key = (size_t)rte_dpaa_mem_vtop(
397 (void *)(size_t)authdata.key);
398 authdata.key_type = RTA_DATA_PTR;
404 if (ses->dir == DIR_ENC) {
405 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
407 true, swap, SHR_SERIAL,
409 (uint8_t *)&ses->ip4_hdr,
410 &cipherdata, &authdata);
411 } else if (ses->dir == DIR_DEC) {
412 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
414 true, swap, SHR_SERIAL,
416 &cipherdata, &authdata);
418 return shared_desc_len;
421 /* prepare command block of the session */
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
425 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 int32_t shared_desc_len = 0;
427 struct sec_cdb *cdb = &ses->cdb;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
435 memset(cdb, 0, sizeof(struct sec_cdb));
438 #ifdef RTE_LIB_SECURITY
440 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
443 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
446 case DPAA_SEC_CIPHER:
447 alginfo_c.key = (size_t)ses->cipher_key.data;
448 alginfo_c.keylen = ses->cipher_key.length;
449 alginfo_c.key_enc_flags = 0;
450 alginfo_c.key_type = RTA_DATA_IMM;
451 alginfo_c.algtype = ses->cipher_key.alg;
452 alginfo_c.algmode = ses->cipher_key.algmode;
454 switch (ses->cipher_alg) {
455 case RTE_CRYPTO_CIPHER_AES_CBC:
456 case RTE_CRYPTO_CIPHER_3DES_CBC:
457 case RTE_CRYPTO_CIPHER_AES_CTR:
458 case RTE_CRYPTO_CIPHER_3DES_CTR:
459 shared_desc_len = cnstr_shdsc_blkcipher(
461 swap, SHR_NEVER, &alginfo_c,
465 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
466 shared_desc_len = cnstr_shdsc_snow_f8(
467 cdb->sh_desc, true, swap,
471 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
472 shared_desc_len = cnstr_shdsc_zuce(
473 cdb->sh_desc, true, swap,
478 DPAA_SEC_ERR("unsupported cipher alg %d",
484 alginfo_a.key = (size_t)ses->auth_key.data;
485 alginfo_a.keylen = ses->auth_key.length;
486 alginfo_a.key_enc_flags = 0;
487 alginfo_a.key_type = RTA_DATA_IMM;
488 alginfo_a.algtype = ses->auth_key.alg;
489 alginfo_a.algmode = ses->auth_key.algmode;
490 switch (ses->auth_alg) {
491 case RTE_CRYPTO_AUTH_MD5_HMAC:
492 case RTE_CRYPTO_AUTH_SHA1_HMAC:
493 case RTE_CRYPTO_AUTH_SHA224_HMAC:
494 case RTE_CRYPTO_AUTH_SHA256_HMAC:
495 case RTE_CRYPTO_AUTH_SHA384_HMAC:
496 case RTE_CRYPTO_AUTH_SHA512_HMAC:
497 shared_desc_len = cnstr_shdsc_hmac(
499 swap, SHR_NEVER, &alginfo_a,
503 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
504 shared_desc_len = cnstr_shdsc_snow_f9(
505 cdb->sh_desc, true, swap,
510 case RTE_CRYPTO_AUTH_ZUC_EIA3:
511 shared_desc_len = cnstr_shdsc_zuca(
512 cdb->sh_desc, true, swap,
518 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
522 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
523 DPAA_SEC_ERR("not supported aead alg");
526 alginfo.key = (size_t)ses->aead_key.data;
527 alginfo.keylen = ses->aead_key.length;
528 alginfo.key_enc_flags = 0;
529 alginfo.key_type = RTA_DATA_IMM;
530 alginfo.algtype = ses->aead_key.alg;
531 alginfo.algmode = ses->aead_key.algmode;
533 if (ses->dir == DIR_ENC)
534 shared_desc_len = cnstr_shdsc_gcm_encap(
535 cdb->sh_desc, true, swap, SHR_NEVER,
540 shared_desc_len = cnstr_shdsc_gcm_decap(
541 cdb->sh_desc, true, swap, SHR_NEVER,
546 case DPAA_SEC_CIPHER_HASH:
547 alginfo_c.key = (size_t)ses->cipher_key.data;
548 alginfo_c.keylen = ses->cipher_key.length;
549 alginfo_c.key_enc_flags = 0;
550 alginfo_c.key_type = RTA_DATA_IMM;
551 alginfo_c.algtype = ses->cipher_key.alg;
552 alginfo_c.algmode = ses->cipher_key.algmode;
554 alginfo_a.key = (size_t)ses->auth_key.data;
555 alginfo_a.keylen = ses->auth_key.length;
556 alginfo_a.key_enc_flags = 0;
557 alginfo_a.key_type = RTA_DATA_IMM;
558 alginfo_a.algtype = ses->auth_key.alg;
559 alginfo_a.algmode = ses->auth_key.algmode;
561 cdb->sh_desc[0] = alginfo_c.keylen;
562 cdb->sh_desc[1] = alginfo_a.keylen;
563 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
565 (unsigned int *)cdb->sh_desc,
566 &cdb->sh_desc[2], 2);
569 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
572 if (cdb->sh_desc[2] & 1)
573 alginfo_c.key_type = RTA_DATA_IMM;
575 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
576 (void *)(size_t)alginfo_c.key);
577 alginfo_c.key_type = RTA_DATA_PTR;
579 if (cdb->sh_desc[2] & (1<<1))
580 alginfo_a.key_type = RTA_DATA_IMM;
582 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
583 (void *)(size_t)alginfo_a.key);
584 alginfo_a.key_type = RTA_DATA_PTR;
589 /* Auth_only_len is set as 0 here and it will be
590 * overwritten in fd for each packet.
592 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
593 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
595 ses->digest_length, ses->dir);
597 case DPAA_SEC_HASH_CIPHER:
599 DPAA_SEC_ERR("error: Unsupported session");
603 if (shared_desc_len < 0) {
604 DPAA_SEC_ERR("error in preparing command block");
605 return shared_desc_len;
608 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
609 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
610 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
615 /* qp is lockless, should be accessed by only one thread */
617 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
620 unsigned int pkts = 0;
621 int num_rx_bufs, ret;
622 struct qm_dqrr_entry *dq;
623 uint32_t vdqcr_flags = 0;
627 * Until request for four buffers, we provide exact number of buffers.
628 * Otherwise we do not set the QM_VDQCR_EXACT flag.
629 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
630 * requested, so we request two less in this case.
633 vdqcr_flags = QM_VDQCR_EXACT;
634 num_rx_bufs = nb_ops;
636 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
637 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
639 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
644 const struct qm_fd *fd;
645 struct dpaa_sec_job *job;
646 struct dpaa_sec_op_ctx *ctx;
647 struct rte_crypto_op *op;
649 dq = qman_dequeue(fq);
654 /* sg is embedded in an op ctx,
655 * sg[0] is for output
658 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
660 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
661 ctx->fd_status = fd->status;
663 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
664 struct qm_sg_entry *sg_out;
666 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
667 op->sym->m_src : op->sym->m_dst;
669 sg_out = &job->sg[0];
670 hw_sg_to_cpu(sg_out);
671 len = sg_out->length;
673 while (mbuf->next != NULL) {
674 len -= mbuf->data_len;
677 mbuf->data_len = len;
679 if (!ctx->fd_status) {
680 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
682 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
683 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
687 /* report op status to sym->op and then free the ctx memeory */
688 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
690 qman_dqrr_consume(fq, dq);
691 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
696 static inline struct dpaa_sec_job *
697 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
699 struct rte_crypto_sym_op *sym = op->sym;
700 struct rte_mbuf *mbuf = sym->m_src;
701 struct dpaa_sec_job *cf;
702 struct dpaa_sec_op_ctx *ctx;
703 struct qm_sg_entry *sg, *out_sg, *in_sg;
704 phys_addr_t start_addr;
705 uint8_t *old_digest, extra_segs;
706 int data_len, data_offset;
708 data_len = sym->auth.data.length;
709 data_offset = sym->auth.data.offset;
711 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
712 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
713 if ((data_len & 7) || (data_offset & 7)) {
714 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
718 data_len = data_len >> 3;
719 data_offset = data_offset >> 3;
727 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
728 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
732 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
738 old_digest = ctx->digest;
742 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
743 out_sg->length = ses->digest_length;
744 cpu_to_hw_sg(out_sg);
748 /* need to extend the input to a compound frame */
749 in_sg->extension = 1;
751 in_sg->length = data_len;
752 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
757 if (ses->iv.length) {
760 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
763 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
764 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
766 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
767 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
770 sg->length = ses->iv.length;
772 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
773 in_sg->length += sg->length;
778 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
779 sg->offset = data_offset;
781 if (data_len <= (mbuf->data_len - data_offset)) {
782 sg->length = data_len;
784 sg->length = mbuf->data_len - data_offset;
786 /* remaining i/p segs */
787 while ((data_len = data_len - sg->length) &&
788 (mbuf = mbuf->next)) {
791 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
792 if (data_len > mbuf->data_len)
793 sg->length = mbuf->data_len;
795 sg->length = data_len;
799 if (is_decode(ses)) {
800 /* Digest verification case */
803 rte_memcpy(old_digest, sym->auth.digest.data,
805 start_addr = rte_dpaa_mem_vtop(old_digest);
806 qm_sg_entry_set64(sg, start_addr);
807 sg->length = ses->digest_length;
808 in_sg->length += ses->digest_length;
819 * |<----data_len------->|
820 * |ip_header|ah_header|icv|payload|
825 static inline struct dpaa_sec_job *
826 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
828 struct rte_crypto_sym_op *sym = op->sym;
829 struct rte_mbuf *mbuf = sym->m_src;
830 struct dpaa_sec_job *cf;
831 struct dpaa_sec_op_ctx *ctx;
832 struct qm_sg_entry *sg, *in_sg;
833 rte_iova_t start_addr;
835 int data_len, data_offset;
837 data_len = sym->auth.data.length;
838 data_offset = sym->auth.data.offset;
840 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
841 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
842 if ((data_len & 7) || (data_offset & 7)) {
843 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
847 data_len = data_len >> 3;
848 data_offset = data_offset >> 3;
851 ctx = dpaa_sec_alloc_ctx(ses, 4);
857 old_digest = ctx->digest;
859 start_addr = rte_pktmbuf_iova(mbuf);
862 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
863 sg->length = ses->digest_length;
868 /* need to extend the input to a compound frame */
869 in_sg->extension = 1;
871 in_sg->length = data_len;
872 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
875 if (ses->iv.length) {
878 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
881 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
882 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
884 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
888 sg->length = ses->iv.length;
890 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
891 in_sg->length += sg->length;
896 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
897 sg->offset = data_offset;
898 sg->length = data_len;
900 if (is_decode(ses)) {
901 /* Digest verification case */
903 /* hash result or digest, save digest first */
904 rte_memcpy(old_digest, sym->auth.digest.data,
906 /* let's check digest by hw */
907 start_addr = rte_dpaa_mem_vtop(old_digest);
909 qm_sg_entry_set64(sg, start_addr);
910 sg->length = ses->digest_length;
911 in_sg->length += ses->digest_length;
920 static inline struct dpaa_sec_job *
921 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
923 struct rte_crypto_sym_op *sym = op->sym;
924 struct dpaa_sec_job *cf;
925 struct dpaa_sec_op_ctx *ctx;
926 struct qm_sg_entry *sg, *out_sg, *in_sg;
927 struct rte_mbuf *mbuf;
929 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
931 int data_len, data_offset;
933 data_len = sym->cipher.data.length;
934 data_offset = sym->cipher.data.offset;
936 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
937 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
938 if ((data_len & 7) || (data_offset & 7)) {
939 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
943 data_len = data_len >> 3;
944 data_offset = data_offset >> 3;
949 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
952 req_segs = mbuf->nb_segs * 2 + 3;
954 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
955 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
960 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
969 out_sg->extension = 1;
970 out_sg->length = data_len;
971 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
972 cpu_to_hw_sg(out_sg);
976 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
977 sg->length = mbuf->data_len - data_offset;
978 sg->offset = data_offset;
980 /* Successive segs */
985 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
986 sg->length = mbuf->data_len;
995 in_sg->extension = 1;
997 in_sg->length = data_len + ses->iv.length;
1000 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1001 cpu_to_hw_sg(in_sg);
1004 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1005 sg->length = ses->iv.length;
1010 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1011 sg->length = mbuf->data_len - data_offset;
1012 sg->offset = data_offset;
1014 /* Successive segs */
1019 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1020 sg->length = mbuf->data_len;
1029 static inline struct dpaa_sec_job *
1030 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1032 struct rte_crypto_sym_op *sym = op->sym;
1033 struct dpaa_sec_job *cf;
1034 struct dpaa_sec_op_ctx *ctx;
1035 struct qm_sg_entry *sg;
1036 rte_iova_t src_start_addr, dst_start_addr;
1037 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1039 int data_len, data_offset;
1041 data_len = sym->cipher.data.length;
1042 data_offset = sym->cipher.data.offset;
1044 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1045 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1046 if ((data_len & 7) || (data_offset & 7)) {
1047 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1051 data_len = data_len >> 3;
1052 data_offset = data_offset >> 3;
1055 ctx = dpaa_sec_alloc_ctx(ses, 4);
1062 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1065 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1067 dst_start_addr = src_start_addr;
1071 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1072 sg->length = data_len + ses->iv.length;
1078 /* need to extend the input to a compound frame */
1081 sg->length = data_len + ses->iv.length;
1082 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1086 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1087 sg->length = ses->iv.length;
1091 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1092 sg->length = data_len;
1099 static inline struct dpaa_sec_job *
1100 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1102 struct rte_crypto_sym_op *sym = op->sym;
1103 struct dpaa_sec_job *cf;
1104 struct dpaa_sec_op_ctx *ctx;
1105 struct qm_sg_entry *sg, *out_sg, *in_sg;
1106 struct rte_mbuf *mbuf;
1108 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1113 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1116 req_segs = mbuf->nb_segs * 2 + 4;
1119 if (ses->auth_only_len)
1122 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1123 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1128 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1135 rte_prefetch0(cf->sg);
1138 out_sg = &cf->sg[0];
1139 out_sg->extension = 1;
1141 out_sg->length = sym->aead.data.length + ses->digest_length;
1143 out_sg->length = sym->aead.data.length;
1145 /* output sg entries */
1147 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1148 cpu_to_hw_sg(out_sg);
1151 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1152 sg->length = mbuf->data_len - sym->aead.data.offset;
1153 sg->offset = sym->aead.data.offset;
1155 /* Successive segs */
1160 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1161 sg->length = mbuf->data_len;
1164 sg->length -= ses->digest_length;
1166 if (is_encode(ses)) {
1168 /* set auth output */
1170 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1171 sg->length = ses->digest_length;
1179 in_sg->extension = 1;
1182 in_sg->length = ses->iv.length + sym->aead.data.length
1183 + ses->auth_only_len;
1185 in_sg->length = ses->iv.length + sym->aead.data.length
1186 + ses->auth_only_len + ses->digest_length;
1188 /* input sg entries */
1190 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1191 cpu_to_hw_sg(in_sg);
1194 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1195 sg->length = ses->iv.length;
1198 /* 2nd seg auth only */
1199 if (ses->auth_only_len) {
1201 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1202 sg->length = ses->auth_only_len;
1208 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1209 sg->length = mbuf->data_len - sym->aead.data.offset;
1210 sg->offset = sym->aead.data.offset;
1212 /* Successive segs */
1217 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1218 sg->length = mbuf->data_len;
1222 if (is_decode(ses)) {
1225 memcpy(ctx->digest, sym->aead.digest.data,
1226 ses->digest_length);
1227 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1228 sg->length = ses->digest_length;
1236 static inline struct dpaa_sec_job *
1237 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1239 struct rte_crypto_sym_op *sym = op->sym;
1240 struct dpaa_sec_job *cf;
1241 struct dpaa_sec_op_ctx *ctx;
1242 struct qm_sg_entry *sg;
1243 uint32_t length = 0;
1244 rte_iova_t src_start_addr, dst_start_addr;
1245 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1248 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1251 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1253 dst_start_addr = src_start_addr;
1255 ctx = dpaa_sec_alloc_ctx(ses, 7);
1263 rte_prefetch0(cf->sg);
1265 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1266 if (is_encode(ses)) {
1267 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1268 sg->length = ses->iv.length;
1269 length += sg->length;
1273 if (ses->auth_only_len) {
1274 qm_sg_entry_set64(sg,
1275 rte_dpaa_mem_vtop(sym->aead.aad.data));
1276 sg->length = ses->auth_only_len;
1277 length += sg->length;
1281 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1282 sg->length = sym->aead.data.length;
1283 length += sg->length;
1287 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1288 sg->length = ses->iv.length;
1289 length += sg->length;
1293 if (ses->auth_only_len) {
1294 qm_sg_entry_set64(sg,
1295 rte_dpaa_mem_vtop(sym->aead.aad.data));
1296 sg->length = ses->auth_only_len;
1297 length += sg->length;
1301 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1302 sg->length = sym->aead.data.length;
1303 length += sg->length;
1306 memcpy(ctx->digest, sym->aead.digest.data,
1307 ses->digest_length);
1310 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1311 sg->length = ses->digest_length;
1312 length += sg->length;
1316 /* input compound frame */
1317 cf->sg[1].length = length;
1318 cf->sg[1].extension = 1;
1319 cf->sg[1].final = 1;
1320 cpu_to_hw_sg(&cf->sg[1]);
1324 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1325 qm_sg_entry_set64(sg,
1326 dst_start_addr + sym->aead.data.offset);
1327 sg->length = sym->aead.data.length;
1328 length = sg->length;
1329 if (is_encode(ses)) {
1331 /* set auth output */
1333 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1334 sg->length = ses->digest_length;
1335 length += sg->length;
1340 /* output compound frame */
1341 cf->sg[0].length = length;
1342 cf->sg[0].extension = 1;
1343 cpu_to_hw_sg(&cf->sg[0]);
1348 static inline struct dpaa_sec_job *
1349 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1351 struct rte_crypto_sym_op *sym = op->sym;
1352 struct dpaa_sec_job *cf;
1353 struct dpaa_sec_op_ctx *ctx;
1354 struct qm_sg_entry *sg, *out_sg, *in_sg;
1355 struct rte_mbuf *mbuf;
1357 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1362 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1365 req_segs = mbuf->nb_segs * 2 + 4;
1368 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1369 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1374 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1381 rte_prefetch0(cf->sg);
1384 out_sg = &cf->sg[0];
1385 out_sg->extension = 1;
1387 out_sg->length = sym->auth.data.length + ses->digest_length;
1389 out_sg->length = sym->auth.data.length;
1391 /* output sg entries */
1393 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1394 cpu_to_hw_sg(out_sg);
1397 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1398 sg->length = mbuf->data_len - sym->auth.data.offset;
1399 sg->offset = sym->auth.data.offset;
1401 /* Successive segs */
1406 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1407 sg->length = mbuf->data_len;
1410 sg->length -= ses->digest_length;
1412 if (is_encode(ses)) {
1414 /* set auth output */
1416 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1417 sg->length = ses->digest_length;
1425 in_sg->extension = 1;
1428 in_sg->length = ses->iv.length + sym->auth.data.length;
1430 in_sg->length = ses->iv.length + sym->auth.data.length
1431 + ses->digest_length;
1433 /* input sg entries */
1435 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1436 cpu_to_hw_sg(in_sg);
1439 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1440 sg->length = ses->iv.length;
1445 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1446 sg->length = mbuf->data_len - sym->auth.data.offset;
1447 sg->offset = sym->auth.data.offset;
1449 /* Successive segs */
1454 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1455 sg->length = mbuf->data_len;
1459 sg->length -= ses->digest_length;
1460 if (is_decode(ses)) {
1463 memcpy(ctx->digest, sym->auth.digest.data,
1464 ses->digest_length);
1465 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1466 sg->length = ses->digest_length;
1474 static inline struct dpaa_sec_job *
1475 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1477 struct rte_crypto_sym_op *sym = op->sym;
1478 struct dpaa_sec_job *cf;
1479 struct dpaa_sec_op_ctx *ctx;
1480 struct qm_sg_entry *sg;
1481 rte_iova_t src_start_addr, dst_start_addr;
1482 uint32_t length = 0;
1483 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1486 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1488 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1490 dst_start_addr = src_start_addr;
1492 ctx = dpaa_sec_alloc_ctx(ses, 7);
1500 rte_prefetch0(cf->sg);
1502 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1503 if (is_encode(ses)) {
1504 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1505 sg->length = ses->iv.length;
1506 length += sg->length;
1510 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1511 sg->length = sym->auth.data.length;
1512 length += sg->length;
1516 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1517 sg->length = ses->iv.length;
1518 length += sg->length;
1523 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1524 sg->length = sym->auth.data.length;
1525 length += sg->length;
1528 memcpy(ctx->digest, sym->auth.digest.data,
1529 ses->digest_length);
1532 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1533 sg->length = ses->digest_length;
1534 length += sg->length;
1538 /* input compound frame */
1539 cf->sg[1].length = length;
1540 cf->sg[1].extension = 1;
1541 cf->sg[1].final = 1;
1542 cpu_to_hw_sg(&cf->sg[1]);
1546 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1547 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1548 sg->length = sym->cipher.data.length;
1549 length = sg->length;
1550 if (is_encode(ses)) {
1552 /* set auth output */
1554 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1555 sg->length = ses->digest_length;
1556 length += sg->length;
1561 /* output compound frame */
1562 cf->sg[0].length = length;
1563 cf->sg[0].extension = 1;
1564 cpu_to_hw_sg(&cf->sg[0]);
1569 #ifdef RTE_LIB_SECURITY
1570 static inline struct dpaa_sec_job *
1571 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1573 struct rte_crypto_sym_op *sym = op->sym;
1574 struct dpaa_sec_job *cf;
1575 struct dpaa_sec_op_ctx *ctx;
1576 struct qm_sg_entry *sg;
1577 phys_addr_t src_start_addr, dst_start_addr;
1579 ctx = dpaa_sec_alloc_ctx(ses, 2);
1585 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1588 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1590 dst_start_addr = src_start_addr;
1594 qm_sg_entry_set64(sg, src_start_addr);
1595 sg->length = sym->m_src->pkt_len;
1599 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1602 qm_sg_entry_set64(sg, dst_start_addr);
1603 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1609 static inline struct dpaa_sec_job *
1610 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1612 struct rte_crypto_sym_op *sym = op->sym;
1613 struct dpaa_sec_job *cf;
1614 struct dpaa_sec_op_ctx *ctx;
1615 struct qm_sg_entry *sg, *out_sg, *in_sg;
1616 struct rte_mbuf *mbuf;
1618 uint32_t in_len = 0, out_len = 0;
1625 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1626 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1627 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1632 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1638 out_sg = &cf->sg[0];
1639 out_sg->extension = 1;
1640 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1644 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1647 /* Successive segs */
1648 while (mbuf->next) {
1649 sg->length = mbuf->data_len;
1650 out_len += sg->length;
1654 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1657 sg->length = mbuf->buf_len - mbuf->data_off;
1658 out_len += sg->length;
1662 out_sg->length = out_len;
1663 cpu_to_hw_sg(out_sg);
1668 in_sg->extension = 1;
1670 in_len = mbuf->data_len;
1673 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1676 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1677 sg->length = mbuf->data_len;
1680 /* Successive segs */
1685 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1686 sg->length = mbuf->data_len;
1688 in_len += sg->length;
1694 in_sg->length = in_len;
1695 cpu_to_hw_sg(in_sg);
1697 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1704 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1707 /* Function to transmit the frames to given device and queuepair */
1709 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1710 uint16_t num_tx = 0;
1711 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1712 uint32_t frames_to_send;
1713 struct rte_crypto_op *op;
1714 struct dpaa_sec_job *cf;
1715 dpaa_sec_session *ses;
1716 uint16_t auth_hdr_len, auth_tail_len;
1717 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1718 struct qman_fq *inq[DPAA_SEC_BURST];
1721 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1722 DPAA_SEC_BURST : nb_ops;
1723 for (loop = 0; loop < frames_to_send; loop++) {
1725 if (*dpaa_seqn(op->sym->m_src) != 0) {
1726 index = *dpaa_seqn(op->sym->m_src) - 1;
1727 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1728 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1729 flags[loop] = ((index & 0x0f) << 8);
1730 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1731 DPAA_PER_LCORE_DQRR_SIZE--;
1732 DPAA_PER_LCORE_DQRR_HELD &=
1737 switch (op->sess_type) {
1738 case RTE_CRYPTO_OP_WITH_SESSION:
1739 ses = (dpaa_sec_session *)
1740 get_sym_session_private_data(
1742 cryptodev_driver_id);
1744 #ifdef RTE_LIB_SECURITY
1745 case RTE_CRYPTO_OP_SECURITY_SESSION:
1746 ses = (dpaa_sec_session *)
1747 get_sec_session_private_data(
1748 op->sym->sec_session);
1753 "sessionless crypto op not supported");
1754 frames_to_send = loop;
1760 DPAA_SEC_DP_ERR("session not available");
1761 frames_to_send = loop;
1766 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1767 if (dpaa_sec_attach_sess_q(qp, ses)) {
1768 frames_to_send = loop;
1772 } else if (unlikely(ses->qp[rte_lcore_id() %
1773 MAX_DPAA_CORES] != qp)) {
1774 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1776 ses->qp[rte_lcore_id() %
1777 MAX_DPAA_CORES], qp);
1778 frames_to_send = loop;
1783 auth_hdr_len = op->sym->auth.data.length -
1784 op->sym->cipher.data.length;
1787 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1788 ((op->sym->m_dst == NULL) ||
1789 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1790 switch (ses->ctxt) {
1791 #ifdef RTE_LIB_SECURITY
1793 case DPAA_SEC_IPSEC:
1794 cf = build_proto(op, ses);
1798 cf = build_auth_only(op, ses);
1800 case DPAA_SEC_CIPHER:
1801 cf = build_cipher_only(op, ses);
1804 cf = build_cipher_auth_gcm(op, ses);
1805 auth_hdr_len = ses->auth_only_len;
1807 case DPAA_SEC_CIPHER_HASH:
1809 op->sym->cipher.data.offset
1810 - op->sym->auth.data.offset;
1812 op->sym->auth.data.length
1813 - op->sym->cipher.data.length
1815 cf = build_cipher_auth(op, ses);
1818 DPAA_SEC_DP_ERR("not supported ops");
1819 frames_to_send = loop;
1824 switch (ses->ctxt) {
1825 #ifdef RTE_LIB_SECURITY
1827 case DPAA_SEC_IPSEC:
1828 cf = build_proto_sg(op, ses);
1832 cf = build_auth_only_sg(op, ses);
1834 case DPAA_SEC_CIPHER:
1835 cf = build_cipher_only_sg(op, ses);
1838 cf = build_cipher_auth_gcm_sg(op, ses);
1839 auth_hdr_len = ses->auth_only_len;
1841 case DPAA_SEC_CIPHER_HASH:
1843 op->sym->cipher.data.offset
1844 - op->sym->auth.data.offset;
1846 op->sym->auth.data.length
1847 - op->sym->cipher.data.length
1849 cf = build_cipher_auth_sg(op, ses);
1852 DPAA_SEC_DP_ERR("not supported ops");
1853 frames_to_send = loop;
1858 if (unlikely(!cf)) {
1859 frames_to_send = loop;
1865 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1866 fd->opaque_addr = 0;
1868 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1869 fd->_format1 = qm_fd_compound;
1870 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1872 /* Auth_only_len is set as 0 in descriptor and it is
1873 * overwritten here in the fd.cmd which will update
1876 if (auth_hdr_len || auth_tail_len) {
1877 fd->cmd = 0x80000000;
1879 ((auth_tail_len << 16) | auth_hdr_len);
1882 #ifdef RTE_LIB_SECURITY
1883 /* In case of PDCP, per packet HFN is stored in
1884 * mbuf priv after sym_op.
1886 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1887 fd->cmd = 0x80000000 |
1888 *((uint32_t *)((uint8_t *)op +
1889 ses->pdcp.hfn_ovd_offset));
1890 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1891 *((uint32_t *)((uint8_t *)op +
1892 ses->pdcp.hfn_ovd_offset)),
1899 while (loop < frames_to_send) {
1900 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1901 &flags[loop], frames_to_send - loop);
1903 nb_ops -= frames_to_send;
1904 num_tx += frames_to_send;
1907 dpaa_qp->tx_pkts += num_tx;
1908 dpaa_qp->tx_errs += nb_ops - num_tx;
1914 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1918 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1920 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1922 dpaa_qp->rx_pkts += num_rx;
1923 dpaa_qp->rx_errs += nb_ops - num_rx;
1925 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1930 /** Release queue pair */
1932 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1935 struct dpaa_sec_dev_private *internals;
1936 struct dpaa_sec_qp *qp = NULL;
1938 PMD_INIT_FUNC_TRACE();
1940 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1942 internals = dev->data->dev_private;
1943 if (qp_id >= internals->max_nb_queue_pairs) {
1944 DPAA_SEC_ERR("Max supported qpid %d",
1945 internals->max_nb_queue_pairs);
1949 qp = &internals->qps[qp_id];
1950 rte_mempool_free(qp->ctx_pool);
1951 qp->internals = NULL;
1952 dev->data->queue_pairs[qp_id] = NULL;
1957 /** Setup a queue pair */
1959 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1960 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1961 __rte_unused int socket_id)
1963 struct dpaa_sec_dev_private *internals;
1964 struct dpaa_sec_qp *qp = NULL;
1967 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1969 internals = dev->data->dev_private;
1970 if (qp_id >= internals->max_nb_queue_pairs) {
1971 DPAA_SEC_ERR("Max supported qpid %d",
1972 internals->max_nb_queue_pairs);
1976 qp = &internals->qps[qp_id];
1977 qp->internals = internals;
1978 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1979 dev->data->dev_id, qp_id);
1980 if (!qp->ctx_pool) {
1981 qp->ctx_pool = rte_mempool_create((const char *)str,
1984 CTX_POOL_CACHE_SIZE, 0,
1985 NULL, NULL, NULL, NULL,
1987 if (!qp->ctx_pool) {
1988 DPAA_SEC_ERR("%s create failed\n", str);
1992 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1993 dev->data->dev_id, qp_id);
1994 dev->data->queue_pairs[qp_id] = qp;
1999 /** Returns the size of session structure */
2001 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2003 PMD_INIT_FUNC_TRACE();
2005 return sizeof(dpaa_sec_session);
2009 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2010 struct rte_crypto_sym_xform *xform,
2011 dpaa_sec_session *session)
2013 session->ctxt = DPAA_SEC_CIPHER;
2014 session->cipher_alg = xform->cipher.algo;
2015 session->iv.length = xform->cipher.iv.length;
2016 session->iv.offset = xform->cipher.iv.offset;
2017 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2018 RTE_CACHE_LINE_SIZE);
2019 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2020 DPAA_SEC_ERR("No Memory for cipher key");
2023 session->cipher_key.length = xform->cipher.key.length;
2025 memcpy(session->cipher_key.data, xform->cipher.key.data,
2026 xform->cipher.key.length);
2027 switch (xform->cipher.algo) {
2028 case RTE_CRYPTO_CIPHER_AES_CBC:
2029 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2030 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2032 case RTE_CRYPTO_CIPHER_3DES_CBC:
2033 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2034 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2036 case RTE_CRYPTO_CIPHER_AES_CTR:
2037 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2038 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2040 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2041 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2043 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2044 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2047 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2048 xform->cipher.algo);
2051 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2058 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2059 struct rte_crypto_sym_xform *xform,
2060 dpaa_sec_session *session)
2062 session->ctxt = DPAA_SEC_AUTH;
2063 session->auth_alg = xform->auth.algo;
2064 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2065 RTE_CACHE_LINE_SIZE);
2066 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2067 DPAA_SEC_ERR("No Memory for auth key");
2070 session->auth_key.length = xform->auth.key.length;
2071 session->digest_length = xform->auth.digest_length;
2072 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2073 session->iv.offset = xform->auth.iv.offset;
2074 session->iv.length = xform->auth.iv.length;
2077 memcpy(session->auth_key.data, xform->auth.key.data,
2078 xform->auth.key.length);
2080 switch (xform->auth.algo) {
2081 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2082 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2083 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2085 case RTE_CRYPTO_AUTH_MD5_HMAC:
2086 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2087 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2089 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2090 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2091 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2093 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2094 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2095 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2097 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2098 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2099 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2101 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2102 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2103 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2105 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2106 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2107 session->auth_key.algmode = OP_ALG_AAI_F9;
2109 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2110 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2111 session->auth_key.algmode = OP_ALG_AAI_F9;
2114 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2119 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2126 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2127 struct rte_crypto_sym_xform *xform,
2128 dpaa_sec_session *session)
2131 struct rte_crypto_cipher_xform *cipher_xform;
2132 struct rte_crypto_auth_xform *auth_xform;
2134 session->ctxt = DPAA_SEC_CIPHER_HASH;
2135 if (session->auth_cipher_text) {
2136 cipher_xform = &xform->cipher;
2137 auth_xform = &xform->next->auth;
2139 cipher_xform = &xform->next->cipher;
2140 auth_xform = &xform->auth;
2143 /* Set IV parameters */
2144 session->iv.offset = cipher_xform->iv.offset;
2145 session->iv.length = cipher_xform->iv.length;
2147 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2148 RTE_CACHE_LINE_SIZE);
2149 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2150 DPAA_SEC_ERR("No Memory for cipher key");
2153 session->cipher_key.length = cipher_xform->key.length;
2154 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2155 RTE_CACHE_LINE_SIZE);
2156 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2157 DPAA_SEC_ERR("No Memory for auth key");
2160 session->auth_key.length = auth_xform->key.length;
2161 memcpy(session->cipher_key.data, cipher_xform->key.data,
2162 cipher_xform->key.length);
2163 memcpy(session->auth_key.data, auth_xform->key.data,
2164 auth_xform->key.length);
2166 session->digest_length = auth_xform->digest_length;
2167 session->auth_alg = auth_xform->algo;
2169 switch (auth_xform->algo) {
2170 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2171 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2172 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2174 case RTE_CRYPTO_AUTH_MD5_HMAC:
2175 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2176 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2178 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2179 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2180 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2182 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2183 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2184 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2186 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2187 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2188 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2190 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2191 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2192 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2195 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2200 session->cipher_alg = cipher_xform->algo;
2202 switch (cipher_xform->algo) {
2203 case RTE_CRYPTO_CIPHER_AES_CBC:
2204 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2205 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2207 case RTE_CRYPTO_CIPHER_3DES_CBC:
2208 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2209 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2211 case RTE_CRYPTO_CIPHER_AES_CTR:
2212 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2213 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2216 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2217 cipher_xform->algo);
2220 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2226 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2227 struct rte_crypto_sym_xform *xform,
2228 dpaa_sec_session *session)
2230 session->aead_alg = xform->aead.algo;
2231 session->ctxt = DPAA_SEC_AEAD;
2232 session->iv.length = xform->aead.iv.length;
2233 session->iv.offset = xform->aead.iv.offset;
2234 session->auth_only_len = xform->aead.aad_length;
2235 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2236 RTE_CACHE_LINE_SIZE);
2237 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2238 DPAA_SEC_ERR("No Memory for aead key\n");
2241 session->aead_key.length = xform->aead.key.length;
2242 session->digest_length = xform->aead.digest_length;
2244 memcpy(session->aead_key.data, xform->aead.key.data,
2245 xform->aead.key.length);
2247 switch (session->aead_alg) {
2248 case RTE_CRYPTO_AEAD_AES_GCM:
2249 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2250 session->aead_key.algmode = OP_ALG_AAI_GCM;
2253 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2257 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2263 static struct qman_fq *
2264 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2268 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2269 if (qi->inq_attach[i] == 0) {
2270 qi->inq_attach[i] = 1;
2274 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2280 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2284 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2285 if (&qi->inq[i] == fq) {
2286 if (qman_retire_fq(fq, NULL) != 0)
2287 DPAA_SEC_DEBUG("Queue is not retired\n");
2289 qi->inq_attach[i] = 0;
2297 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2301 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2302 ret = dpaa_sec_prep_cdb(sess);
2304 DPAA_SEC_ERR("Unable to prepare sec cdb");
2307 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2308 ret = rte_dpaa_portal_init((void *)0);
2310 DPAA_SEC_ERR("Failure in affining portal");
2314 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2315 rte_dpaa_mem_vtop(&sess->cdb),
2316 qman_fq_fqid(&qp->outq));
2318 DPAA_SEC_ERR("Unable to init sec queue");
2324 free_session_data(dpaa_sec_session *s)
2327 rte_free(s->aead_key.data);
2329 rte_free(s->auth_key.data);
2330 rte_free(s->cipher_key.data);
2332 memset(s, 0, sizeof(dpaa_sec_session));
2336 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2337 struct rte_crypto_sym_xform *xform, void *sess)
2339 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2340 dpaa_sec_session *session = sess;
2344 PMD_INIT_FUNC_TRACE();
2346 if (unlikely(sess == NULL)) {
2347 DPAA_SEC_ERR("invalid session struct");
2350 memset(session, 0, sizeof(dpaa_sec_session));
2352 /* Default IV length = 0 */
2353 session->iv.length = 0;
2356 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2357 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2358 ret = dpaa_sec_cipher_init(dev, xform, session);
2360 /* Authentication Only */
2361 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2362 xform->next == NULL) {
2363 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2364 session->ctxt = DPAA_SEC_AUTH;
2365 ret = dpaa_sec_auth_init(dev, xform, session);
2367 /* Cipher then Authenticate */
2368 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2369 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2370 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2371 session->auth_cipher_text = 1;
2372 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2373 ret = dpaa_sec_auth_init(dev, xform, session);
2374 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2375 ret = dpaa_sec_cipher_init(dev, xform, session);
2377 ret = dpaa_sec_chain_init(dev, xform, session);
2379 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2382 /* Authenticate then Cipher */
2383 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2384 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2385 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2386 session->auth_cipher_text = 0;
2387 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2388 ret = dpaa_sec_cipher_init(dev, xform, session);
2389 else if (xform->next->cipher.algo
2390 == RTE_CRYPTO_CIPHER_NULL)
2391 ret = dpaa_sec_auth_init(dev, xform, session);
2393 ret = dpaa_sec_chain_init(dev, xform, session);
2395 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2399 /* AEAD operation for AES-GCM kind of Algorithms */
2400 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2401 xform->next == NULL) {
2402 ret = dpaa_sec_aead_init(dev, xform, session);
2405 DPAA_SEC_ERR("Invalid crypto type");
2409 DPAA_SEC_ERR("unable to init session");
2413 rte_spinlock_lock(&internals->lock);
2414 for (i = 0; i < MAX_DPAA_CORES; i++) {
2415 session->inq[i] = dpaa_sec_attach_rxq(internals);
2416 if (session->inq[i] == NULL) {
2417 DPAA_SEC_ERR("unable to attach sec queue");
2418 rte_spinlock_unlock(&internals->lock);
2423 rte_spinlock_unlock(&internals->lock);
2428 free_session_data(session);
2433 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2434 struct rte_crypto_sym_xform *xform,
2435 struct rte_cryptodev_sym_session *sess,
2436 struct rte_mempool *mempool)
2438 void *sess_private_data;
2441 PMD_INIT_FUNC_TRACE();
2443 if (rte_mempool_get(mempool, &sess_private_data)) {
2444 DPAA_SEC_ERR("Couldn't get object from session mempool");
2448 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2450 DPAA_SEC_ERR("failed to configure session parameters");
2452 /* Return session to mempool */
2453 rte_mempool_put(mempool, sess_private_data);
2457 set_sym_session_private_data(sess, dev->driver_id,
2465 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2467 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2468 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2471 for (i = 0; i < MAX_DPAA_CORES; i++) {
2473 dpaa_sec_detach_rxq(qi, s->inq[i]);
2477 free_session_data(s);
2478 rte_mempool_put(sess_mp, (void *)s);
2481 /** Clear the memory of session so it doesn't leave key material behind */
2483 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2484 struct rte_cryptodev_sym_session *sess)
2486 PMD_INIT_FUNC_TRACE();
2487 uint8_t index = dev->driver_id;
2488 void *sess_priv = get_sym_session_private_data(sess, index);
2489 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2492 free_session_memory(dev, s);
2493 set_sym_session_private_data(sess, index, NULL);
2497 #ifdef RTE_LIB_SECURITY
2499 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2500 struct rte_security_ipsec_xform *ipsec_xform,
2501 dpaa_sec_session *session)
2503 PMD_INIT_FUNC_TRACE();
2505 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2506 RTE_CACHE_LINE_SIZE);
2507 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2508 DPAA_SEC_ERR("No Memory for aead key");
2511 memcpy(session->aead_key.data, aead_xform->key.data,
2512 aead_xform->key.length);
2514 session->digest_length = aead_xform->digest_length;
2515 session->aead_key.length = aead_xform->key.length;
2517 switch (aead_xform->algo) {
2518 case RTE_CRYPTO_AEAD_AES_GCM:
2519 switch (session->digest_length) {
2521 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2524 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2527 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2530 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2531 session->digest_length);
2534 if (session->dir == DIR_ENC) {
2535 memcpy(session->encap_pdb.gcm.salt,
2536 (uint8_t *)&(ipsec_xform->salt), 4);
2538 memcpy(session->decap_pdb.gcm.salt,
2539 (uint8_t *)&(ipsec_xform->salt), 4);
2541 session->aead_key.algmode = OP_ALG_AAI_GCM;
2542 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2545 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2553 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2554 struct rte_crypto_auth_xform *auth_xform,
2555 struct rte_security_ipsec_xform *ipsec_xform,
2556 dpaa_sec_session *session)
2559 session->cipher_key.data = rte_zmalloc(NULL,
2560 cipher_xform->key.length,
2561 RTE_CACHE_LINE_SIZE);
2562 if (session->cipher_key.data == NULL &&
2563 cipher_xform->key.length > 0) {
2564 DPAA_SEC_ERR("No Memory for cipher key");
2568 session->cipher_key.length = cipher_xform->key.length;
2569 memcpy(session->cipher_key.data, cipher_xform->key.data,
2570 cipher_xform->key.length);
2571 session->cipher_alg = cipher_xform->algo;
2573 session->cipher_key.data = NULL;
2574 session->cipher_key.length = 0;
2575 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2579 session->auth_key.data = rte_zmalloc(NULL,
2580 auth_xform->key.length,
2581 RTE_CACHE_LINE_SIZE);
2582 if (session->auth_key.data == NULL &&
2583 auth_xform->key.length > 0) {
2584 DPAA_SEC_ERR("No Memory for auth key");
2587 session->auth_key.length = auth_xform->key.length;
2588 memcpy(session->auth_key.data, auth_xform->key.data,
2589 auth_xform->key.length);
2590 session->auth_alg = auth_xform->algo;
2591 session->digest_length = auth_xform->digest_length;
2593 session->auth_key.data = NULL;
2594 session->auth_key.length = 0;
2595 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2598 switch (session->auth_alg) {
2599 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2600 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2601 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2603 case RTE_CRYPTO_AUTH_MD5_HMAC:
2604 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2605 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2607 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2608 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2609 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2610 if (session->digest_length != 16)
2612 "+++Using sha256-hmac truncated len is non-standard,"
2613 "it will not work with lookaside proto");
2615 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2616 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2617 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2619 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2620 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2621 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2623 case RTE_CRYPTO_AUTH_AES_CMAC:
2624 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2626 case RTE_CRYPTO_AUTH_NULL:
2627 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2629 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2630 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2631 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2632 case RTE_CRYPTO_AUTH_SHA1:
2633 case RTE_CRYPTO_AUTH_SHA256:
2634 case RTE_CRYPTO_AUTH_SHA512:
2635 case RTE_CRYPTO_AUTH_SHA224:
2636 case RTE_CRYPTO_AUTH_SHA384:
2637 case RTE_CRYPTO_AUTH_MD5:
2638 case RTE_CRYPTO_AUTH_AES_GMAC:
2639 case RTE_CRYPTO_AUTH_KASUMI_F9:
2640 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2641 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2642 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2646 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2651 switch (session->cipher_alg) {
2652 case RTE_CRYPTO_CIPHER_AES_CBC:
2653 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2654 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2656 case RTE_CRYPTO_CIPHER_3DES_CBC:
2657 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2658 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2660 case RTE_CRYPTO_CIPHER_AES_CTR:
2661 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2662 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2663 if (session->dir == DIR_ENC) {
2664 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2665 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2667 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2668 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2671 case RTE_CRYPTO_CIPHER_NULL:
2672 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2674 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2675 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2676 case RTE_CRYPTO_CIPHER_3DES_ECB:
2677 case RTE_CRYPTO_CIPHER_AES_ECB:
2678 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2679 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2680 session->cipher_alg);
2683 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2684 session->cipher_alg);
2692 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2693 struct rte_security_session_conf *conf,
2696 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2697 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2698 struct rte_crypto_auth_xform *auth_xform = NULL;
2699 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2700 struct rte_crypto_aead_xform *aead_xform = NULL;
2701 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2705 PMD_INIT_FUNC_TRACE();
2707 memset(session, 0, sizeof(dpaa_sec_session));
2708 session->proto_alg = conf->protocol;
2709 session->ctxt = DPAA_SEC_IPSEC;
2711 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2712 session->dir = DIR_ENC;
2714 session->dir = DIR_DEC;
2716 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2717 cipher_xform = &conf->crypto_xform->cipher;
2718 if (conf->crypto_xform->next)
2719 auth_xform = &conf->crypto_xform->next->auth;
2720 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2721 ipsec_xform, session);
2722 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2723 auth_xform = &conf->crypto_xform->auth;
2724 if (conf->crypto_xform->next)
2725 cipher_xform = &conf->crypto_xform->next->cipher;
2726 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2727 ipsec_xform, session);
2728 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2729 aead_xform = &conf->crypto_xform->aead;
2730 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2731 ipsec_xform, session);
2733 DPAA_SEC_ERR("XFORM not specified");
2738 DPAA_SEC_ERR("Failed to process xform");
2742 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2743 if (ipsec_xform->tunnel.type ==
2744 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2745 session->ip4_hdr.ip_v = IPVERSION;
2746 session->ip4_hdr.ip_hl = 5;
2747 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2748 sizeof(session->ip4_hdr));
2749 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2750 session->ip4_hdr.ip_id = 0;
2751 session->ip4_hdr.ip_off = 0;
2752 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2753 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2754 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2755 IPPROTO_ESP : IPPROTO_AH;
2756 session->ip4_hdr.ip_sum = 0;
2757 session->ip4_hdr.ip_src =
2758 ipsec_xform->tunnel.ipv4.src_ip;
2759 session->ip4_hdr.ip_dst =
2760 ipsec_xform->tunnel.ipv4.dst_ip;
2761 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2762 (void *)&session->ip4_hdr,
2764 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2765 } else if (ipsec_xform->tunnel.type ==
2766 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2767 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2768 DPAA_IPv6_DEFAULT_VTC_FLOW |
2769 ((ipsec_xform->tunnel.ipv6.dscp <<
2770 RTE_IPV6_HDR_TC_SHIFT) &
2771 RTE_IPV6_HDR_TC_MASK) |
2772 ((ipsec_xform->tunnel.ipv6.flabel <<
2773 RTE_IPV6_HDR_FL_SHIFT) &
2774 RTE_IPV6_HDR_FL_MASK));
2775 /* Payload length will be updated by HW */
2776 session->ip6_hdr.payload_len = 0;
2777 session->ip6_hdr.hop_limits =
2778 ipsec_xform->tunnel.ipv6.hlimit;
2779 session->ip6_hdr.proto = (ipsec_xform->proto ==
2780 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2781 IPPROTO_ESP : IPPROTO_AH;
2782 memcpy(&session->ip6_hdr.src_addr,
2783 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2784 memcpy(&session->ip6_hdr.dst_addr,
2785 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2786 session->encap_pdb.ip_hdr_len =
2787 sizeof(struct rte_ipv6_hdr);
2789 session->encap_pdb.options =
2790 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2791 PDBOPTS_ESP_OIHI_PDB_INL |
2793 PDBHMO_ESP_ENCAP_DTTL |
2795 if (ipsec_xform->options.esn)
2796 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2797 session->encap_pdb.spi = ipsec_xform->spi;
2799 } else if (ipsec_xform->direction ==
2800 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2801 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2802 session->decap_pdb.options = sizeof(struct ip) << 16;
2804 session->decap_pdb.options =
2805 sizeof(struct rte_ipv6_hdr) << 16;
2806 if (ipsec_xform->options.esn)
2807 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2808 if (ipsec_xform->replay_win_sz) {
2810 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2819 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2822 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2825 session->decap_pdb.options |=
2831 rte_spinlock_lock(&internals->lock);
2832 for (i = 0; i < MAX_DPAA_CORES; i++) {
2833 session->inq[i] = dpaa_sec_attach_rxq(internals);
2834 if (session->inq[i] == NULL) {
2835 DPAA_SEC_ERR("unable to attach sec queue");
2836 rte_spinlock_unlock(&internals->lock);
2840 rte_spinlock_unlock(&internals->lock);
2844 free_session_data(session);
2849 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2850 struct rte_security_session_conf *conf,
2853 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2854 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2855 struct rte_crypto_auth_xform *auth_xform = NULL;
2856 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2857 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2858 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2862 PMD_INIT_FUNC_TRACE();
2864 memset(session, 0, sizeof(dpaa_sec_session));
2866 /* find xfrm types */
2867 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2868 cipher_xform = &xform->cipher;
2869 if (xform->next != NULL)
2870 auth_xform = &xform->next->auth;
2871 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2872 auth_xform = &xform->auth;
2873 if (xform->next != NULL)
2874 cipher_xform = &xform->next->cipher;
2876 DPAA_SEC_ERR("Invalid crypto type");
2880 session->proto_alg = conf->protocol;
2881 session->ctxt = DPAA_SEC_PDCP;
2884 switch (cipher_xform->algo) {
2885 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2886 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2888 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2889 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2891 case RTE_CRYPTO_CIPHER_AES_CTR:
2892 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2894 case RTE_CRYPTO_CIPHER_NULL:
2895 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2898 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2899 session->cipher_alg);
2903 session->cipher_key.data = rte_zmalloc(NULL,
2904 cipher_xform->key.length,
2905 RTE_CACHE_LINE_SIZE);
2906 if (session->cipher_key.data == NULL &&
2907 cipher_xform->key.length > 0) {
2908 DPAA_SEC_ERR("No Memory for cipher key");
2911 session->cipher_key.length = cipher_xform->key.length;
2912 memcpy(session->cipher_key.data, cipher_xform->key.data,
2913 cipher_xform->key.length);
2914 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2916 session->cipher_alg = cipher_xform->algo;
2918 session->cipher_key.data = NULL;
2919 session->cipher_key.length = 0;
2920 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2921 session->dir = DIR_ENC;
2924 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2925 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2926 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2928 "PDCP Seq Num size should be 5/12 bits for cmode");
2935 switch (auth_xform->algo) {
2936 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2937 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2939 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2940 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2942 case RTE_CRYPTO_AUTH_AES_CMAC:
2943 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2945 case RTE_CRYPTO_AUTH_NULL:
2946 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2949 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2951 rte_free(session->cipher_key.data);
2954 session->auth_key.data = rte_zmalloc(NULL,
2955 auth_xform->key.length,
2956 RTE_CACHE_LINE_SIZE);
2957 if (!session->auth_key.data &&
2958 auth_xform->key.length > 0) {
2959 DPAA_SEC_ERR("No Memory for auth key");
2960 rte_free(session->cipher_key.data);
2963 session->auth_key.length = auth_xform->key.length;
2964 memcpy(session->auth_key.data, auth_xform->key.data,
2965 auth_xform->key.length);
2966 session->auth_alg = auth_xform->algo;
2968 session->auth_key.data = NULL;
2969 session->auth_key.length = 0;
2970 session->auth_alg = 0;
2972 session->pdcp.domain = pdcp_xform->domain;
2973 session->pdcp.bearer = pdcp_xform->bearer;
2974 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2975 session->pdcp.sn_size = pdcp_xform->sn_size;
2976 session->pdcp.hfn = pdcp_xform->hfn;
2977 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2978 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2979 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
2981 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2983 rte_spinlock_lock(&dev_priv->lock);
2984 for (i = 0; i < MAX_DPAA_CORES; i++) {
2985 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2986 if (session->inq[i] == NULL) {
2987 DPAA_SEC_ERR("unable to attach sec queue");
2988 rte_spinlock_unlock(&dev_priv->lock);
2993 rte_spinlock_unlock(&dev_priv->lock);
2996 rte_free(session->auth_key.data);
2997 rte_free(session->cipher_key.data);
2998 memset(session, 0, sizeof(dpaa_sec_session));
3003 dpaa_sec_security_session_create(void *dev,
3004 struct rte_security_session_conf *conf,
3005 struct rte_security_session *sess,
3006 struct rte_mempool *mempool)
3008 void *sess_private_data;
3009 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3012 if (rte_mempool_get(mempool, &sess_private_data)) {
3013 DPAA_SEC_ERR("Couldn't get object from session mempool");
3017 switch (conf->protocol) {
3018 case RTE_SECURITY_PROTOCOL_IPSEC:
3019 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3022 case RTE_SECURITY_PROTOCOL_PDCP:
3023 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3026 case RTE_SECURITY_PROTOCOL_MACSEC:
3032 DPAA_SEC_ERR("failed to configure session parameters");
3033 /* Return session to mempool */
3034 rte_mempool_put(mempool, sess_private_data);
3038 set_sec_session_private_data(sess, sess_private_data);
3043 /** Clear the memory of session so it doesn't leave key material behind */
3045 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3046 struct rte_security_session *sess)
3048 PMD_INIT_FUNC_TRACE();
3049 void *sess_priv = get_sec_session_private_data(sess);
3050 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3053 free_session_memory((struct rte_cryptodev *)dev, s);
3054 set_sec_session_private_data(sess, NULL);
3060 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3061 struct rte_cryptodev_config *config __rte_unused)
3063 PMD_INIT_FUNC_TRACE();
3069 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3071 PMD_INIT_FUNC_TRACE();
3076 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3078 PMD_INIT_FUNC_TRACE();
3082 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3084 PMD_INIT_FUNC_TRACE();
3093 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3094 struct rte_cryptodev_info *info)
3096 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3098 PMD_INIT_FUNC_TRACE();
3100 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3101 info->feature_flags = dev->feature_flags;
3102 info->capabilities = dpaa_sec_capabilities;
3103 info->sym.max_nb_sessions = internals->max_nb_sessions;
3104 info->driver_id = cryptodev_driver_id;
3108 static enum qman_cb_dqrr_result
3109 dpaa_sec_process_parallel_event(void *event,
3110 struct qman_portal *qm __always_unused,
3111 struct qman_fq *outq,
3112 const struct qm_dqrr_entry *dqrr,
3115 const struct qm_fd *fd;
3116 struct dpaa_sec_job *job;
3117 struct dpaa_sec_op_ctx *ctx;
3118 struct rte_event *ev = (struct rte_event *)event;
3122 /* sg is embedded in an op ctx,
3123 * sg[0] is for output
3126 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3128 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3129 ctx->fd_status = fd->status;
3130 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3131 struct qm_sg_entry *sg_out;
3134 sg_out = &job->sg[0];
3135 hw_sg_to_cpu(sg_out);
3136 len = sg_out->length;
3137 ctx->op->sym->m_src->pkt_len = len;
3138 ctx->op->sym->m_src->data_len = len;
3140 if (!ctx->fd_status) {
3141 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3143 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3144 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3146 ev->event_ptr = (void *)ctx->op;
3148 ev->flow_id = outq->ev.flow_id;
3149 ev->sub_event_type = outq->ev.sub_event_type;
3150 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3151 ev->op = RTE_EVENT_OP_NEW;
3152 ev->sched_type = outq->ev.sched_type;
3153 ev->queue_id = outq->ev.queue_id;
3154 ev->priority = outq->ev.priority;
3155 *bufs = (void *)ctx->op;
3157 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3159 return qman_cb_dqrr_consume;
3162 static enum qman_cb_dqrr_result
3163 dpaa_sec_process_atomic_event(void *event,
3164 struct qman_portal *qm __rte_unused,
3165 struct qman_fq *outq,
3166 const struct qm_dqrr_entry *dqrr,
3170 const struct qm_fd *fd;
3171 struct dpaa_sec_job *job;
3172 struct dpaa_sec_op_ctx *ctx;
3173 struct rte_event *ev = (struct rte_event *)event;
3177 /* sg is embedded in an op ctx,
3178 * sg[0] is for output
3181 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3183 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3184 ctx->fd_status = fd->status;
3185 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3186 struct qm_sg_entry *sg_out;
3189 sg_out = &job->sg[0];
3190 hw_sg_to_cpu(sg_out);
3191 len = sg_out->length;
3192 ctx->op->sym->m_src->pkt_len = len;
3193 ctx->op->sym->m_src->data_len = len;
3195 if (!ctx->fd_status) {
3196 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3198 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3199 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3201 ev->event_ptr = (void *)ctx->op;
3202 ev->flow_id = outq->ev.flow_id;
3203 ev->sub_event_type = outq->ev.sub_event_type;
3204 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3205 ev->op = RTE_EVENT_OP_NEW;
3206 ev->sched_type = outq->ev.sched_type;
3207 ev->queue_id = outq->ev.queue_id;
3208 ev->priority = outq->ev.priority;
3210 /* Save active dqrr entries */
3211 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3212 DPAA_PER_LCORE_DQRR_SIZE++;
3213 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3214 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3215 ev->impl_opaque = index + 1;
3216 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3217 *bufs = (void *)ctx->op;
3219 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3221 return qman_cb_dqrr_defer;
3225 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3228 const struct rte_event *event)
3230 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3231 struct qm_mcc_initfq opts = {0};
3235 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3236 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3237 opts.fqd.dest.channel = ch_id;
3239 switch (event->sched_type) {
3240 case RTE_SCHED_TYPE_ATOMIC:
3241 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3242 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3243 * configuration with HOLD_ACTIVE setting
3245 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3246 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3248 case RTE_SCHED_TYPE_ORDERED:
3249 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3252 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3253 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3257 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3258 if (unlikely(ret)) {
3259 DPAA_SEC_ERR("unable to init caam source fq!");
3263 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3269 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3272 struct qm_mcc_initfq opts = {0};
3274 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3276 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3277 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3278 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3279 qp->outq.cb.ern = ern_sec_fq_handler;
3280 qman_retire_fq(&qp->outq, NULL);
3281 qman_oos_fq(&qp->outq);
3282 ret = qman_init_fq(&qp->outq, 0, &opts);
3284 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3285 qp->outq.cb.dqrr = NULL;
3290 static struct rte_cryptodev_ops crypto_ops = {
3291 .dev_configure = dpaa_sec_dev_configure,
3292 .dev_start = dpaa_sec_dev_start,
3293 .dev_stop = dpaa_sec_dev_stop,
3294 .dev_close = dpaa_sec_dev_close,
3295 .dev_infos_get = dpaa_sec_dev_infos_get,
3296 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3297 .queue_pair_release = dpaa_sec_queue_pair_release,
3298 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3299 .sym_session_configure = dpaa_sec_sym_session_configure,
3300 .sym_session_clear = dpaa_sec_sym_session_clear
3303 #ifdef RTE_LIB_SECURITY
3304 static const struct rte_security_capability *
3305 dpaa_sec_capabilities_get(void *device __rte_unused)
3307 return dpaa_sec_security_cap;
3310 static const struct rte_security_ops dpaa_sec_security_ops = {
3311 .session_create = dpaa_sec_security_session_create,
3312 .session_update = NULL,
3313 .session_stats_get = NULL,
3314 .session_destroy = dpaa_sec_security_session_destroy,
3315 .set_pkt_metadata = NULL,
3316 .capabilities_get = dpaa_sec_capabilities_get
3320 dpaa_sec_uninit(struct rte_cryptodev *dev)
3322 struct dpaa_sec_dev_private *internals;
3327 internals = dev->data->dev_private;
3328 rte_free(dev->security_ctx);
3330 rte_free(internals);
3332 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3333 dev->data->name, rte_socket_id());
3339 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3341 struct dpaa_sec_dev_private *internals;
3342 #ifdef RTE_LIB_SECURITY
3343 struct rte_security_ctx *security_instance;
3345 struct dpaa_sec_qp *qp;
3349 PMD_INIT_FUNC_TRACE();
3351 cryptodev->driver_id = cryptodev_driver_id;
3352 cryptodev->dev_ops = &crypto_ops;
3354 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3355 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3356 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3357 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3358 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3359 RTE_CRYPTODEV_FF_SECURITY |
3360 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3361 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3362 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3363 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3364 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3366 internals = cryptodev->data->dev_private;
3367 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3368 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3371 * For secondary processes, we don't initialise any further as primary
3372 * has already done this work. Only check we don't need a different
3375 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3376 DPAA_SEC_WARN("Device already init by primary process");
3379 #ifdef RTE_LIB_SECURITY
3380 /* Initialize security_ctx only for primary process*/
3381 security_instance = rte_malloc("rte_security_instances_ops",
3382 sizeof(struct rte_security_ctx), 0);
3383 if (security_instance == NULL)
3385 security_instance->device = (void *)cryptodev;
3386 security_instance->ops = &dpaa_sec_security_ops;
3387 security_instance->sess_cnt = 0;
3388 cryptodev->security_ctx = security_instance;
3390 rte_spinlock_init(&internals->lock);
3391 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3392 /* init qman fq for queue pair */
3393 qp = &internals->qps[i];
3394 ret = dpaa_sec_init_tx(&qp->outq);
3396 DPAA_SEC_ERR("config tx of queue pair %d", i);
3401 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3402 QMAN_FQ_FLAG_TO_DCPORTAL;
3403 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3404 /* create rx qman fq for sessions*/
3405 ret = qman_create_fq(0, flags, &internals->inq[i]);
3406 if (unlikely(ret != 0)) {
3407 DPAA_SEC_ERR("sec qman_create_fq failed");
3412 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3416 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3418 rte_free(cryptodev->security_ctx);
3423 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3424 struct rte_dpaa_device *dpaa_dev)
3426 struct rte_cryptodev *cryptodev;
3427 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3431 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3433 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3434 if (cryptodev == NULL)
3437 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3438 cryptodev->data->dev_private = rte_zmalloc_socket(
3439 "cryptodev private structure",
3440 sizeof(struct dpaa_sec_dev_private),
3441 RTE_CACHE_LINE_SIZE,
3444 if (cryptodev->data->dev_private == NULL)
3445 rte_panic("Cannot allocate memzone for private "
3449 dpaa_dev->crypto_dev = cryptodev;
3450 cryptodev->device = &dpaa_dev->device;
3452 /* init user callbacks */
3453 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3455 /* if sec device version is not configured */
3456 if (!rta_get_sec_era()) {
3457 const struct device_node *caam_node;
3459 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3460 const uint32_t *prop = of_get_property(caam_node,
3465 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3471 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3472 retval = rte_dpaa_portal_init((void *)1);
3474 DPAA_SEC_ERR("Unable to initialize portal");
3479 /* Invoke PMD device initialization function */
3480 retval = dpaa_sec_dev_init(cryptodev);
3486 /* In case of error, cleanup is done */
3487 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3488 rte_free(cryptodev->data->dev_private);
3490 rte_cryptodev_pmd_release_device(cryptodev);
3496 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3498 struct rte_cryptodev *cryptodev;
3501 cryptodev = dpaa_dev->crypto_dev;
3502 if (cryptodev == NULL)
3505 ret = dpaa_sec_uninit(cryptodev);
3509 return rte_cryptodev_pmd_destroy(cryptodev);
3512 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3513 .drv_type = FSL_DPAA_CRYPTO,
3515 .name = "DPAA SEC PMD"
3517 .probe = cryptodev_dpaa_sec_probe,
3518 .remove = cryptodev_dpaa_sec_remove,
3521 static struct cryptodev_driver dpaa_sec_crypto_drv;
3523 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3524 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3525 cryptodev_driver_id);
3526 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);