1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2021 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 uint8_t dpaa_cryptodev_driver_id;
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
64 struct dpaa_sec_op_ctx *ctx;
67 retval = rte_mempool_get(
68 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
71 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78 * each packet, memset is costlier than dcbz_64().
80 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81 dcbz_64(&ctx->job.sg[i]);
83 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
90 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
92 const struct qm_mr_entry *msg)
94 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
95 fq->fqid, msg->ern.rc, msg->ern.seqnum);
98 /* initialize the queue with dest chan as caam chan so that
99 * all the packets in this queue could be dispatched into caam
102 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
105 struct qm_mcc_initfq fq_opts;
109 /* Clear FQ options */
110 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
112 flags = QMAN_INITFQ_FLAG_SCHED;
113 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
114 QM_INITFQ_WE_CONTEXTB;
116 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
117 fq_opts.fqd.context_b = fqid_out;
118 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
119 fq_opts.fqd.dest.wq = 0;
121 fq_in->cb.ern = ern_sec_fq_handler;
123 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
125 ret = qman_init_fq(fq_in, flags, &fq_opts);
126 if (unlikely(ret != 0))
127 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
132 /* something is put into in_fq and caam put the crypto result into out_fq */
133 static enum qman_cb_dqrr_result
134 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
135 struct qman_fq *fq __always_unused,
136 const struct qm_dqrr_entry *dqrr)
138 const struct qm_fd *fd;
139 struct dpaa_sec_job *job;
140 struct dpaa_sec_op_ctx *ctx;
142 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
143 return qman_cb_dqrr_defer;
145 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
146 return qman_cb_dqrr_consume;
149 /* sg is embedded in an op ctx,
150 * sg[0] is for output
153 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
155 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
156 ctx->fd_status = fd->status;
157 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
158 struct qm_sg_entry *sg_out;
160 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
161 ctx->op->sym->m_src : ctx->op->sym->m_dst;
163 sg_out = &job->sg[0];
164 hw_sg_to_cpu(sg_out);
165 len = sg_out->length;
167 while (mbuf->next != NULL) {
168 len -= mbuf->data_len;
171 mbuf->data_len = len;
173 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
174 dpaa_sec_op_ending(ctx);
176 return qman_cb_dqrr_consume;
179 /* caam result is put into this queue */
181 dpaa_sec_init_tx(struct qman_fq *fq)
184 struct qm_mcc_initfq opts;
187 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
188 QMAN_FQ_FLAG_DYNAMIC_FQID;
190 ret = qman_create_fq(0, flags, fq);
192 DPAA_SEC_ERR("qman_create_fq failed");
196 memset(&opts, 0, sizeof(opts));
197 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
198 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
200 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
202 fq->cb.dqrr = dqrr_out_fq_cb_rx;
203 fq->cb.ern = ern_sec_fq_handler;
205 ret = qman_init_fq(fq, 0, &opts);
207 DPAA_SEC_ERR("unable to init caam source fq!");
214 static inline int is_aead(dpaa_sec_session *ses)
216 return ((ses->cipher_alg == 0) &&
217 (ses->auth_alg == 0) &&
218 (ses->aead_alg != 0));
221 static inline int is_encode(dpaa_sec_session *ses)
223 return ses->dir == DIR_ENC;
226 static inline int is_decode(dpaa_sec_session *ses)
228 return ses->dir == DIR_DEC;
231 #ifdef RTE_LIB_SECURITY
233 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
235 struct alginfo authdata = {0}, cipherdata = {0};
236 struct sec_cdb *cdb = &ses->cdb;
237 struct alginfo *p_authdata = NULL;
238 int32_t shared_desc_len = 0;
239 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
245 cipherdata.key = (size_t)ses->cipher_key.data;
246 cipherdata.keylen = ses->cipher_key.length;
247 cipherdata.key_enc_flags = 0;
248 cipherdata.key_type = RTA_DATA_IMM;
249 cipherdata.algtype = ses->cipher_key.alg;
250 cipherdata.algmode = ses->cipher_key.algmode;
253 authdata.key = (size_t)ses->auth_key.data;
254 authdata.keylen = ses->auth_key.length;
255 authdata.key_enc_flags = 0;
256 authdata.key_type = RTA_DATA_IMM;
257 authdata.algtype = ses->auth_key.alg;
258 authdata.algmode = ses->auth_key.algmode;
260 p_authdata = &authdata;
263 if (ses->pdcp.sdap_enabled) {
264 int nb_keys_to_inline =
265 rta_inline_pdcp_sdap_query(authdata.algtype,
269 if (nb_keys_to_inline >= 1) {
270 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
271 (size_t)cipherdata.key);
272 cipherdata.key_type = RTA_DATA_PTR;
274 if (nb_keys_to_inline >= 2) {
275 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
276 (size_t)authdata.key);
277 authdata.key_type = RTA_DATA_PTR;
280 if (rta_inline_pdcp_query(authdata.algtype,
283 ses->pdcp.hfn_ovd)) {
284 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
285 (size_t)cipherdata.key);
286 cipherdata.key_type = RTA_DATA_PTR;
290 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
291 if (ses->dir == DIR_ENC)
292 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
293 cdb->sh_desc, 1, swap,
298 ses->pdcp.hfn_threshold,
299 &cipherdata, &authdata);
300 else if (ses->dir == DIR_DEC)
301 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
302 cdb->sh_desc, 1, swap,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, &authdata);
309 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
310 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
313 if (ses->dir == DIR_ENC) {
314 if (ses->pdcp.sdap_enabled)
316 cnstr_shdsc_pdcp_sdap_u_plane_encap(
317 cdb->sh_desc, 1, swap,
322 ses->pdcp.hfn_threshold,
323 &cipherdata, p_authdata);
326 cnstr_shdsc_pdcp_u_plane_encap(
327 cdb->sh_desc, 1, swap,
332 ses->pdcp.hfn_threshold,
333 &cipherdata, p_authdata);
334 } else if (ses->dir == DIR_DEC) {
335 if (ses->pdcp.sdap_enabled)
337 cnstr_shdsc_pdcp_sdap_u_plane_decap(
338 cdb->sh_desc, 1, swap,
343 ses->pdcp.hfn_threshold,
344 &cipherdata, p_authdata);
347 cnstr_shdsc_pdcp_u_plane_decap(
348 cdb->sh_desc, 1, swap,
353 ses->pdcp.hfn_threshold,
354 &cipherdata, p_authdata);
357 return shared_desc_len;
360 /* prepare ipsec proto command block of the session */
362 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
364 struct alginfo cipherdata = {0}, authdata = {0};
365 struct sec_cdb *cdb = &ses->cdb;
366 int32_t shared_desc_len = 0;
368 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
374 cipherdata.key = (size_t)ses->cipher_key.data;
375 cipherdata.keylen = ses->cipher_key.length;
376 cipherdata.key_enc_flags = 0;
377 cipherdata.key_type = RTA_DATA_IMM;
378 cipherdata.algtype = ses->cipher_key.alg;
379 cipherdata.algmode = ses->cipher_key.algmode;
381 if (ses->auth_key.length) {
382 authdata.key = (size_t)ses->auth_key.data;
383 authdata.keylen = ses->auth_key.length;
384 authdata.key_enc_flags = 0;
385 authdata.key_type = RTA_DATA_IMM;
386 authdata.algtype = ses->auth_key.alg;
387 authdata.algmode = ses->auth_key.algmode;
390 cdb->sh_desc[0] = cipherdata.keylen;
391 cdb->sh_desc[1] = authdata.keylen;
392 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
394 (unsigned int *)cdb->sh_desc,
395 &cdb->sh_desc[2], 2);
398 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
401 if (cdb->sh_desc[2] & 1)
402 cipherdata.key_type = RTA_DATA_IMM;
404 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
405 (void *)(size_t)cipherdata.key);
406 cipherdata.key_type = RTA_DATA_PTR;
408 if (cdb->sh_desc[2] & (1<<1))
409 authdata.key_type = RTA_DATA_IMM;
411 authdata.key = (size_t)rte_dpaa_mem_vtop(
412 (void *)(size_t)authdata.key);
413 authdata.key_type = RTA_DATA_PTR;
419 if (ses->dir == DIR_ENC) {
420 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
422 true, swap, SHR_SERIAL,
424 (uint8_t *)&ses->ip4_hdr,
425 &cipherdata, &authdata);
426 } else if (ses->dir == DIR_DEC) {
427 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
429 true, swap, SHR_SERIAL,
431 &cipherdata, &authdata);
433 return shared_desc_len;
436 /* prepare command block of the session */
438 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
440 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
441 int32_t shared_desc_len = 0;
442 struct sec_cdb *cdb = &ses->cdb;
444 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
450 memset(cdb, 0, sizeof(struct sec_cdb));
453 #ifdef RTE_LIB_SECURITY
455 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
458 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
461 case DPAA_SEC_CIPHER:
462 alginfo_c.key = (size_t)ses->cipher_key.data;
463 alginfo_c.keylen = ses->cipher_key.length;
464 alginfo_c.key_enc_flags = 0;
465 alginfo_c.key_type = RTA_DATA_IMM;
466 alginfo_c.algtype = ses->cipher_key.alg;
467 alginfo_c.algmode = ses->cipher_key.algmode;
469 switch (ses->cipher_alg) {
470 case RTE_CRYPTO_CIPHER_AES_CBC:
471 case RTE_CRYPTO_CIPHER_3DES_CBC:
472 case RTE_CRYPTO_CIPHER_DES_CBC:
473 case RTE_CRYPTO_CIPHER_AES_CTR:
474 case RTE_CRYPTO_CIPHER_3DES_CTR:
475 shared_desc_len = cnstr_shdsc_blkcipher(
477 swap, SHR_NEVER, &alginfo_c,
481 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
482 shared_desc_len = cnstr_shdsc_snow_f8(
483 cdb->sh_desc, true, swap,
487 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
488 shared_desc_len = cnstr_shdsc_zuce(
489 cdb->sh_desc, true, swap,
494 DPAA_SEC_ERR("unsupported cipher alg %d",
500 alginfo_a.key = (size_t)ses->auth_key.data;
501 alginfo_a.keylen = ses->auth_key.length;
502 alginfo_a.key_enc_flags = 0;
503 alginfo_a.key_type = RTA_DATA_IMM;
504 alginfo_a.algtype = ses->auth_key.alg;
505 alginfo_a.algmode = ses->auth_key.algmode;
506 switch (ses->auth_alg) {
507 case RTE_CRYPTO_AUTH_MD5:
508 case RTE_CRYPTO_AUTH_SHA1:
509 case RTE_CRYPTO_AUTH_SHA224:
510 case RTE_CRYPTO_AUTH_SHA256:
511 case RTE_CRYPTO_AUTH_SHA384:
512 case RTE_CRYPTO_AUTH_SHA512:
513 shared_desc_len = cnstr_shdsc_hash(
515 swap, SHR_NEVER, &alginfo_a,
519 case RTE_CRYPTO_AUTH_MD5_HMAC:
520 case RTE_CRYPTO_AUTH_SHA1_HMAC:
521 case RTE_CRYPTO_AUTH_SHA224_HMAC:
522 case RTE_CRYPTO_AUTH_SHA256_HMAC:
523 case RTE_CRYPTO_AUTH_SHA384_HMAC:
524 case RTE_CRYPTO_AUTH_SHA512_HMAC:
525 shared_desc_len = cnstr_shdsc_hmac(
527 swap, SHR_NEVER, &alginfo_a,
531 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
532 shared_desc_len = cnstr_shdsc_snow_f9(
533 cdb->sh_desc, true, swap,
538 case RTE_CRYPTO_AUTH_ZUC_EIA3:
539 shared_desc_len = cnstr_shdsc_zuca(
540 cdb->sh_desc, true, swap,
545 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
546 case RTE_CRYPTO_AUTH_AES_CMAC:
547 shared_desc_len = cnstr_shdsc_aes_mac(
549 true, swap, SHR_NEVER,
555 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
559 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
560 DPAA_SEC_ERR("not supported aead alg");
563 alginfo.key = (size_t)ses->aead_key.data;
564 alginfo.keylen = ses->aead_key.length;
565 alginfo.key_enc_flags = 0;
566 alginfo.key_type = RTA_DATA_IMM;
567 alginfo.algtype = ses->aead_key.alg;
568 alginfo.algmode = ses->aead_key.algmode;
570 if (ses->dir == DIR_ENC)
571 shared_desc_len = cnstr_shdsc_gcm_encap(
572 cdb->sh_desc, true, swap, SHR_NEVER,
577 shared_desc_len = cnstr_shdsc_gcm_decap(
578 cdb->sh_desc, true, swap, SHR_NEVER,
583 case DPAA_SEC_CIPHER_HASH:
584 alginfo_c.key = (size_t)ses->cipher_key.data;
585 alginfo_c.keylen = ses->cipher_key.length;
586 alginfo_c.key_enc_flags = 0;
587 alginfo_c.key_type = RTA_DATA_IMM;
588 alginfo_c.algtype = ses->cipher_key.alg;
589 alginfo_c.algmode = ses->cipher_key.algmode;
591 alginfo_a.key = (size_t)ses->auth_key.data;
592 alginfo_a.keylen = ses->auth_key.length;
593 alginfo_a.key_enc_flags = 0;
594 alginfo_a.key_type = RTA_DATA_IMM;
595 alginfo_a.algtype = ses->auth_key.alg;
596 alginfo_a.algmode = ses->auth_key.algmode;
598 cdb->sh_desc[0] = alginfo_c.keylen;
599 cdb->sh_desc[1] = alginfo_a.keylen;
600 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
602 (unsigned int *)cdb->sh_desc,
603 &cdb->sh_desc[2], 2);
606 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
609 if (cdb->sh_desc[2] & 1)
610 alginfo_c.key_type = RTA_DATA_IMM;
612 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
613 (void *)(size_t)alginfo_c.key);
614 alginfo_c.key_type = RTA_DATA_PTR;
616 if (cdb->sh_desc[2] & (1<<1))
617 alginfo_a.key_type = RTA_DATA_IMM;
619 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
620 (void *)(size_t)alginfo_a.key);
621 alginfo_a.key_type = RTA_DATA_PTR;
626 /* Auth_only_len is set as 0 here and it will be
627 * overwritten in fd for each packet.
629 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
630 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
632 ses->digest_length, ses->dir);
634 case DPAA_SEC_HASH_CIPHER:
636 DPAA_SEC_ERR("error: Unsupported session");
640 if (shared_desc_len < 0) {
641 DPAA_SEC_ERR("error in preparing command block");
642 return shared_desc_len;
645 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
646 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
647 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
652 /* qp is lockless, should be accessed by only one thread */
654 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
657 unsigned int pkts = 0;
658 int num_rx_bufs, ret;
659 struct qm_dqrr_entry *dq;
660 uint32_t vdqcr_flags = 0;
664 * Until request for four buffers, we provide exact number of buffers.
665 * Otherwise we do not set the QM_VDQCR_EXACT flag.
666 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
667 * requested, so we request two less in this case.
670 vdqcr_flags = QM_VDQCR_EXACT;
671 num_rx_bufs = nb_ops;
673 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
674 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
676 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
681 const struct qm_fd *fd;
682 struct dpaa_sec_job *job;
683 struct dpaa_sec_op_ctx *ctx;
684 struct rte_crypto_op *op;
686 dq = qman_dequeue(fq);
691 /* sg is embedded in an op ctx,
692 * sg[0] is for output
695 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
697 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
698 ctx->fd_status = fd->status;
700 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
701 struct qm_sg_entry *sg_out;
703 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
704 op->sym->m_src : op->sym->m_dst;
706 sg_out = &job->sg[0];
707 hw_sg_to_cpu(sg_out);
708 len = sg_out->length;
710 while (mbuf->next != NULL) {
711 len -= mbuf->data_len;
714 mbuf->data_len = len;
716 if (!ctx->fd_status) {
717 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
719 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
720 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
724 /* report op status to sym->op and then free the ctx memory */
725 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
727 qman_dqrr_consume(fq, dq);
728 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
733 static inline struct dpaa_sec_job *
734 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
736 struct rte_crypto_sym_op *sym = op->sym;
737 struct rte_mbuf *mbuf = sym->m_src;
738 struct dpaa_sec_job *cf;
739 struct dpaa_sec_op_ctx *ctx;
740 struct qm_sg_entry *sg, *out_sg, *in_sg;
741 phys_addr_t start_addr;
742 uint8_t *old_digest, extra_segs;
743 int data_len, data_offset;
745 data_len = sym->auth.data.length;
746 data_offset = sym->auth.data.offset;
748 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
749 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
750 if ((data_len & 7) || (data_offset & 7)) {
751 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
755 data_len = data_len >> 3;
756 data_offset = data_offset >> 3;
764 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
765 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
769 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
775 old_digest = ctx->digest;
779 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
780 out_sg->length = ses->digest_length;
781 cpu_to_hw_sg(out_sg);
785 /* need to extend the input to a compound frame */
786 in_sg->extension = 1;
788 in_sg->length = data_len;
789 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
794 if (ses->iv.length) {
797 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
800 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
801 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
803 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
804 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
807 sg->length = ses->iv.length;
809 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
810 in_sg->length += sg->length;
815 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
816 sg->offset = data_offset;
818 if (data_len <= (mbuf->data_len - data_offset)) {
819 sg->length = data_len;
821 sg->length = mbuf->data_len - data_offset;
823 /* remaining i/p segs */
824 while ((data_len = data_len - sg->length) &&
825 (mbuf = mbuf->next)) {
828 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
829 if (data_len > mbuf->data_len)
830 sg->length = mbuf->data_len;
832 sg->length = data_len;
836 if (is_decode(ses)) {
837 /* Digest verification case */
840 rte_memcpy(old_digest, sym->auth.digest.data,
842 start_addr = rte_dpaa_mem_vtop(old_digest);
843 qm_sg_entry_set64(sg, start_addr);
844 sg->length = ses->digest_length;
845 in_sg->length += ses->digest_length;
856 * |<----data_len------->|
857 * |ip_header|ah_header|icv|payload|
862 static inline struct dpaa_sec_job *
863 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
865 struct rte_crypto_sym_op *sym = op->sym;
866 struct rte_mbuf *mbuf = sym->m_src;
867 struct dpaa_sec_job *cf;
868 struct dpaa_sec_op_ctx *ctx;
869 struct qm_sg_entry *sg, *in_sg;
870 rte_iova_t start_addr;
872 int data_len, data_offset;
874 data_len = sym->auth.data.length;
875 data_offset = sym->auth.data.offset;
877 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
878 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
879 if ((data_len & 7) || (data_offset & 7)) {
880 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
884 data_len = data_len >> 3;
885 data_offset = data_offset >> 3;
888 ctx = dpaa_sec_alloc_ctx(ses, 4);
894 old_digest = ctx->digest;
896 start_addr = rte_pktmbuf_iova(mbuf);
899 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
900 sg->length = ses->digest_length;
905 /* need to extend the input to a compound frame */
906 in_sg->extension = 1;
908 in_sg->length = data_len;
909 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
912 if (ses->iv.length) {
915 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
918 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
919 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
921 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
922 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
925 sg->length = ses->iv.length;
927 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
928 in_sg->length += sg->length;
933 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
934 sg->offset = data_offset;
935 sg->length = data_len;
937 if (is_decode(ses)) {
938 /* Digest verification case */
940 /* hash result or digest, save digest first */
941 rte_memcpy(old_digest, sym->auth.digest.data,
943 /* let's check digest by hw */
944 start_addr = rte_dpaa_mem_vtop(old_digest);
946 qm_sg_entry_set64(sg, start_addr);
947 sg->length = ses->digest_length;
948 in_sg->length += ses->digest_length;
957 static inline struct dpaa_sec_job *
958 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
960 struct rte_crypto_sym_op *sym = op->sym;
961 struct dpaa_sec_job *cf;
962 struct dpaa_sec_op_ctx *ctx;
963 struct qm_sg_entry *sg, *out_sg, *in_sg;
964 struct rte_mbuf *mbuf;
966 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
968 int data_len, data_offset;
970 data_len = sym->cipher.data.length;
971 data_offset = sym->cipher.data.offset;
973 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
974 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
975 if ((data_len & 7) || (data_offset & 7)) {
976 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
980 data_len = data_len >> 3;
981 data_offset = data_offset >> 3;
986 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
989 req_segs = mbuf->nb_segs * 2 + 3;
991 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
992 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
997 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1005 out_sg = &cf->sg[0];
1006 out_sg->extension = 1;
1007 out_sg->length = data_len;
1008 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1009 cpu_to_hw_sg(out_sg);
1013 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1014 sg->length = mbuf->data_len - data_offset;
1015 sg->offset = data_offset;
1017 /* Successive segs */
1022 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1023 sg->length = mbuf->data_len;
1032 in_sg->extension = 1;
1034 in_sg->length = data_len + ses->iv.length;
1037 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1038 cpu_to_hw_sg(in_sg);
1041 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1042 sg->length = ses->iv.length;
1047 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1048 sg->length = mbuf->data_len - data_offset;
1049 sg->offset = data_offset;
1051 /* Successive segs */
1056 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1057 sg->length = mbuf->data_len;
1066 static inline struct dpaa_sec_job *
1067 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1069 struct rte_crypto_sym_op *sym = op->sym;
1070 struct dpaa_sec_job *cf;
1071 struct dpaa_sec_op_ctx *ctx;
1072 struct qm_sg_entry *sg;
1073 rte_iova_t src_start_addr, dst_start_addr;
1074 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1076 int data_len, data_offset;
1078 data_len = sym->cipher.data.length;
1079 data_offset = sym->cipher.data.offset;
1081 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1082 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1083 if ((data_len & 7) || (data_offset & 7)) {
1084 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1088 data_len = data_len >> 3;
1089 data_offset = data_offset >> 3;
1092 ctx = dpaa_sec_alloc_ctx(ses, 4);
1099 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1102 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1104 dst_start_addr = src_start_addr;
1108 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1109 sg->length = data_len + ses->iv.length;
1115 /* need to extend the input to a compound frame */
1118 sg->length = data_len + ses->iv.length;
1119 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1123 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1124 sg->length = ses->iv.length;
1128 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1129 sg->length = data_len;
1136 static inline struct dpaa_sec_job *
1137 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1139 struct rte_crypto_sym_op *sym = op->sym;
1140 struct dpaa_sec_job *cf;
1141 struct dpaa_sec_op_ctx *ctx;
1142 struct qm_sg_entry *sg, *out_sg, *in_sg;
1143 struct rte_mbuf *mbuf;
1145 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1150 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1153 req_segs = mbuf->nb_segs * 2 + 4;
1156 if (ses->auth_only_len)
1159 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1160 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1165 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1172 rte_prefetch0(cf->sg);
1175 out_sg = &cf->sg[0];
1176 out_sg->extension = 1;
1178 out_sg->length = sym->aead.data.length + ses->digest_length;
1180 out_sg->length = sym->aead.data.length;
1182 /* output sg entries */
1184 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1185 cpu_to_hw_sg(out_sg);
1188 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1189 sg->length = mbuf->data_len - sym->aead.data.offset;
1190 sg->offset = sym->aead.data.offset;
1192 /* Successive segs */
1197 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1198 sg->length = mbuf->data_len;
1201 sg->length -= ses->digest_length;
1203 if (is_encode(ses)) {
1205 /* set auth output */
1207 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1208 sg->length = ses->digest_length;
1216 in_sg->extension = 1;
1219 in_sg->length = ses->iv.length + sym->aead.data.length
1220 + ses->auth_only_len;
1222 in_sg->length = ses->iv.length + sym->aead.data.length
1223 + ses->auth_only_len + ses->digest_length;
1225 /* input sg entries */
1227 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1228 cpu_to_hw_sg(in_sg);
1231 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1232 sg->length = ses->iv.length;
1235 /* 2nd seg auth only */
1236 if (ses->auth_only_len) {
1238 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1239 sg->length = ses->auth_only_len;
1245 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1246 sg->length = mbuf->data_len - sym->aead.data.offset;
1247 sg->offset = sym->aead.data.offset;
1249 /* Successive segs */
1254 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1255 sg->length = mbuf->data_len;
1259 if (is_decode(ses)) {
1262 memcpy(ctx->digest, sym->aead.digest.data,
1263 ses->digest_length);
1264 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1265 sg->length = ses->digest_length;
1273 static inline struct dpaa_sec_job *
1274 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1276 struct rte_crypto_sym_op *sym = op->sym;
1277 struct dpaa_sec_job *cf;
1278 struct dpaa_sec_op_ctx *ctx;
1279 struct qm_sg_entry *sg;
1280 uint32_t length = 0;
1281 rte_iova_t src_start_addr, dst_start_addr;
1282 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1285 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1288 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1290 dst_start_addr = src_start_addr;
1292 ctx = dpaa_sec_alloc_ctx(ses, 7);
1300 rte_prefetch0(cf->sg);
1302 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1303 if (is_encode(ses)) {
1304 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1305 sg->length = ses->iv.length;
1306 length += sg->length;
1310 if (ses->auth_only_len) {
1311 qm_sg_entry_set64(sg,
1312 rte_dpaa_mem_vtop(sym->aead.aad.data));
1313 sg->length = ses->auth_only_len;
1314 length += sg->length;
1318 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1319 sg->length = sym->aead.data.length;
1320 length += sg->length;
1324 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1325 sg->length = ses->iv.length;
1326 length += sg->length;
1330 if (ses->auth_only_len) {
1331 qm_sg_entry_set64(sg,
1332 rte_dpaa_mem_vtop(sym->aead.aad.data));
1333 sg->length = ses->auth_only_len;
1334 length += sg->length;
1338 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1339 sg->length = sym->aead.data.length;
1340 length += sg->length;
1343 memcpy(ctx->digest, sym->aead.digest.data,
1344 ses->digest_length);
1347 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1348 sg->length = ses->digest_length;
1349 length += sg->length;
1353 /* input compound frame */
1354 cf->sg[1].length = length;
1355 cf->sg[1].extension = 1;
1356 cf->sg[1].final = 1;
1357 cpu_to_hw_sg(&cf->sg[1]);
1361 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1362 qm_sg_entry_set64(sg,
1363 dst_start_addr + sym->aead.data.offset);
1364 sg->length = sym->aead.data.length;
1365 length = sg->length;
1366 if (is_encode(ses)) {
1368 /* set auth output */
1370 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1371 sg->length = ses->digest_length;
1372 length += sg->length;
1377 /* output compound frame */
1378 cf->sg[0].length = length;
1379 cf->sg[0].extension = 1;
1380 cpu_to_hw_sg(&cf->sg[0]);
1385 static inline struct dpaa_sec_job *
1386 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1388 struct rte_crypto_sym_op *sym = op->sym;
1389 struct dpaa_sec_job *cf;
1390 struct dpaa_sec_op_ctx *ctx;
1391 struct qm_sg_entry *sg, *out_sg, *in_sg;
1392 struct rte_mbuf *mbuf;
1394 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1399 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1402 req_segs = mbuf->nb_segs * 2 + 4;
1405 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1406 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1411 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1418 rte_prefetch0(cf->sg);
1421 out_sg = &cf->sg[0];
1422 out_sg->extension = 1;
1424 out_sg->length = sym->auth.data.length + ses->digest_length;
1426 out_sg->length = sym->auth.data.length;
1428 /* output sg entries */
1430 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1431 cpu_to_hw_sg(out_sg);
1434 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1435 sg->length = mbuf->data_len - sym->auth.data.offset;
1436 sg->offset = sym->auth.data.offset;
1438 /* Successive segs */
1443 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1444 sg->length = mbuf->data_len;
1447 sg->length -= ses->digest_length;
1449 if (is_encode(ses)) {
1451 /* set auth output */
1453 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1454 sg->length = ses->digest_length;
1462 in_sg->extension = 1;
1465 in_sg->length = ses->iv.length + sym->auth.data.length;
1467 in_sg->length = ses->iv.length + sym->auth.data.length
1468 + ses->digest_length;
1470 /* input sg entries */
1472 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1473 cpu_to_hw_sg(in_sg);
1476 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1477 sg->length = ses->iv.length;
1482 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1483 sg->length = mbuf->data_len - sym->auth.data.offset;
1484 sg->offset = sym->auth.data.offset;
1486 /* Successive segs */
1491 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1492 sg->length = mbuf->data_len;
1496 sg->length -= ses->digest_length;
1497 if (is_decode(ses)) {
1500 memcpy(ctx->digest, sym->auth.digest.data,
1501 ses->digest_length);
1502 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1503 sg->length = ses->digest_length;
1511 static inline struct dpaa_sec_job *
1512 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1514 struct rte_crypto_sym_op *sym = op->sym;
1515 struct dpaa_sec_job *cf;
1516 struct dpaa_sec_op_ctx *ctx;
1517 struct qm_sg_entry *sg;
1518 rte_iova_t src_start_addr, dst_start_addr;
1519 uint32_t length = 0;
1520 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1523 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1525 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1527 dst_start_addr = src_start_addr;
1529 ctx = dpaa_sec_alloc_ctx(ses, 7);
1537 rte_prefetch0(cf->sg);
1539 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1540 if (is_encode(ses)) {
1541 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1542 sg->length = ses->iv.length;
1543 length += sg->length;
1547 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1548 sg->length = sym->auth.data.length;
1549 length += sg->length;
1553 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1554 sg->length = ses->iv.length;
1555 length += sg->length;
1560 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1561 sg->length = sym->auth.data.length;
1562 length += sg->length;
1565 memcpy(ctx->digest, sym->auth.digest.data,
1566 ses->digest_length);
1569 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1570 sg->length = ses->digest_length;
1571 length += sg->length;
1575 /* input compound frame */
1576 cf->sg[1].length = length;
1577 cf->sg[1].extension = 1;
1578 cf->sg[1].final = 1;
1579 cpu_to_hw_sg(&cf->sg[1]);
1583 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1584 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1585 sg->length = sym->cipher.data.length;
1586 length = sg->length;
1587 if (is_encode(ses)) {
1589 /* set auth output */
1591 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1592 sg->length = ses->digest_length;
1593 length += sg->length;
1598 /* output compound frame */
1599 cf->sg[0].length = length;
1600 cf->sg[0].extension = 1;
1601 cpu_to_hw_sg(&cf->sg[0]);
1606 #ifdef RTE_LIB_SECURITY
1607 static inline struct dpaa_sec_job *
1608 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1610 struct rte_crypto_sym_op *sym = op->sym;
1611 struct dpaa_sec_job *cf;
1612 struct dpaa_sec_op_ctx *ctx;
1613 struct qm_sg_entry *sg;
1614 phys_addr_t src_start_addr, dst_start_addr;
1616 ctx = dpaa_sec_alloc_ctx(ses, 2);
1622 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1625 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1627 dst_start_addr = src_start_addr;
1631 qm_sg_entry_set64(sg, src_start_addr);
1632 sg->length = sym->m_src->pkt_len;
1636 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1639 qm_sg_entry_set64(sg, dst_start_addr);
1640 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1646 static inline struct dpaa_sec_job *
1647 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1649 struct rte_crypto_sym_op *sym = op->sym;
1650 struct dpaa_sec_job *cf;
1651 struct dpaa_sec_op_ctx *ctx;
1652 struct qm_sg_entry *sg, *out_sg, *in_sg;
1653 struct rte_mbuf *mbuf;
1655 uint32_t in_len = 0, out_len = 0;
1662 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1663 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1664 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1669 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1675 out_sg = &cf->sg[0];
1676 out_sg->extension = 1;
1677 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1681 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1684 /* Successive segs */
1685 while (mbuf->next) {
1686 sg->length = mbuf->data_len;
1687 out_len += sg->length;
1691 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1694 sg->length = mbuf->buf_len - mbuf->data_off;
1695 out_len += sg->length;
1699 out_sg->length = out_len;
1700 cpu_to_hw_sg(out_sg);
1705 in_sg->extension = 1;
1707 in_len = mbuf->data_len;
1710 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1713 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1714 sg->length = mbuf->data_len;
1717 /* Successive segs */
1722 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1723 sg->length = mbuf->data_len;
1725 in_len += sg->length;
1731 in_sg->length = in_len;
1732 cpu_to_hw_sg(in_sg);
1734 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1741 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1744 /* Function to transmit the frames to given device and queuepair */
1746 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1747 uint16_t num_tx = 0;
1748 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1749 uint32_t frames_to_send;
1750 struct rte_crypto_op *op;
1751 struct dpaa_sec_job *cf;
1752 dpaa_sec_session *ses;
1753 uint16_t auth_hdr_len, auth_tail_len;
1754 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1755 struct qman_fq *inq[DPAA_SEC_BURST];
1757 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1758 if (rte_dpaa_portal_init((void *)0)) {
1759 DPAA_SEC_ERR("Failure in affining portal");
1765 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1766 DPAA_SEC_BURST : nb_ops;
1767 for (loop = 0; loop < frames_to_send; loop++) {
1769 if (*dpaa_seqn(op->sym->m_src) != 0) {
1770 index = *dpaa_seqn(op->sym->m_src) - 1;
1771 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1772 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1773 flags[loop] = ((index & 0x0f) << 8);
1774 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1775 DPAA_PER_LCORE_DQRR_SIZE--;
1776 DPAA_PER_LCORE_DQRR_HELD &=
1781 switch (op->sess_type) {
1782 case RTE_CRYPTO_OP_WITH_SESSION:
1783 ses = (dpaa_sec_session *)
1784 get_sym_session_private_data(
1786 dpaa_cryptodev_driver_id);
1788 #ifdef RTE_LIB_SECURITY
1789 case RTE_CRYPTO_OP_SECURITY_SESSION:
1790 ses = (dpaa_sec_session *)
1791 get_sec_session_private_data(
1792 op->sym->sec_session);
1797 "sessionless crypto op not supported");
1798 frames_to_send = loop;
1804 DPAA_SEC_DP_ERR("session not available");
1805 frames_to_send = loop;
1810 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1811 if (dpaa_sec_attach_sess_q(qp, ses)) {
1812 frames_to_send = loop;
1816 } else if (unlikely(ses->qp[rte_lcore_id() %
1817 MAX_DPAA_CORES] != qp)) {
1818 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1820 ses->qp[rte_lcore_id() %
1821 MAX_DPAA_CORES], qp);
1822 frames_to_send = loop;
1827 auth_hdr_len = op->sym->auth.data.length -
1828 op->sym->cipher.data.length;
1831 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1832 ((op->sym->m_dst == NULL) ||
1833 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1834 switch (ses->ctxt) {
1835 #ifdef RTE_LIB_SECURITY
1837 case DPAA_SEC_IPSEC:
1838 cf = build_proto(op, ses);
1842 cf = build_auth_only(op, ses);
1844 case DPAA_SEC_CIPHER:
1845 cf = build_cipher_only(op, ses);
1848 cf = build_cipher_auth_gcm(op, ses);
1849 auth_hdr_len = ses->auth_only_len;
1851 case DPAA_SEC_CIPHER_HASH:
1853 op->sym->cipher.data.offset
1854 - op->sym->auth.data.offset;
1856 op->sym->auth.data.length
1857 - op->sym->cipher.data.length
1859 cf = build_cipher_auth(op, ses);
1862 DPAA_SEC_DP_ERR("not supported ops");
1863 frames_to_send = loop;
1868 switch (ses->ctxt) {
1869 #ifdef RTE_LIB_SECURITY
1871 case DPAA_SEC_IPSEC:
1872 cf = build_proto_sg(op, ses);
1876 cf = build_auth_only_sg(op, ses);
1878 case DPAA_SEC_CIPHER:
1879 cf = build_cipher_only_sg(op, ses);
1882 cf = build_cipher_auth_gcm_sg(op, ses);
1883 auth_hdr_len = ses->auth_only_len;
1885 case DPAA_SEC_CIPHER_HASH:
1887 op->sym->cipher.data.offset
1888 - op->sym->auth.data.offset;
1890 op->sym->auth.data.length
1891 - op->sym->cipher.data.length
1893 cf = build_cipher_auth_sg(op, ses);
1896 DPAA_SEC_DP_ERR("not supported ops");
1897 frames_to_send = loop;
1902 if (unlikely(!cf)) {
1903 frames_to_send = loop;
1909 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1910 fd->opaque_addr = 0;
1912 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1913 fd->_format1 = qm_fd_compound;
1914 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1916 /* Auth_only_len is set as 0 in descriptor and it is
1917 * overwritten here in the fd.cmd which will update
1920 if (auth_hdr_len || auth_tail_len) {
1921 fd->cmd = 0x80000000;
1923 ((auth_tail_len << 16) | auth_hdr_len);
1926 #ifdef RTE_LIB_SECURITY
1927 /* In case of PDCP, per packet HFN is stored in
1928 * mbuf priv after sym_op.
1930 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1931 fd->cmd = 0x80000000 |
1932 *((uint32_t *)((uint8_t *)op +
1933 ses->pdcp.hfn_ovd_offset));
1934 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1935 *((uint32_t *)((uint8_t *)op +
1936 ses->pdcp.hfn_ovd_offset)),
1943 while (loop < frames_to_send) {
1944 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1945 &flags[loop], frames_to_send - loop);
1947 nb_ops -= frames_to_send;
1948 num_tx += frames_to_send;
1951 dpaa_qp->tx_pkts += num_tx;
1952 dpaa_qp->tx_errs += nb_ops - num_tx;
1958 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1962 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1964 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1965 if (rte_dpaa_portal_init((void *)0)) {
1966 DPAA_SEC_ERR("Failure in affining portal");
1971 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1973 dpaa_qp->rx_pkts += num_rx;
1974 dpaa_qp->rx_errs += nb_ops - num_rx;
1976 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1981 /** Release queue pair */
1983 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1986 struct dpaa_sec_dev_private *internals;
1987 struct dpaa_sec_qp *qp = NULL;
1989 PMD_INIT_FUNC_TRACE();
1991 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1993 internals = dev->data->dev_private;
1994 if (qp_id >= internals->max_nb_queue_pairs) {
1995 DPAA_SEC_ERR("Max supported qpid %d",
1996 internals->max_nb_queue_pairs);
2000 qp = &internals->qps[qp_id];
2001 rte_mempool_free(qp->ctx_pool);
2002 qp->internals = NULL;
2003 dev->data->queue_pairs[qp_id] = NULL;
2008 /** Setup a queue pair */
2010 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2011 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2012 __rte_unused int socket_id)
2014 struct dpaa_sec_dev_private *internals;
2015 struct dpaa_sec_qp *qp = NULL;
2018 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2020 internals = dev->data->dev_private;
2021 if (qp_id >= internals->max_nb_queue_pairs) {
2022 DPAA_SEC_ERR("Max supported qpid %d",
2023 internals->max_nb_queue_pairs);
2027 qp = &internals->qps[qp_id];
2028 qp->internals = internals;
2029 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2030 dev->data->dev_id, qp_id);
2031 if (!qp->ctx_pool) {
2032 qp->ctx_pool = rte_mempool_create((const char *)str,
2035 CTX_POOL_CACHE_SIZE, 0,
2036 NULL, NULL, NULL, NULL,
2038 if (!qp->ctx_pool) {
2039 DPAA_SEC_ERR("%s create failed\n", str);
2043 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2044 dev->data->dev_id, qp_id);
2045 dev->data->queue_pairs[qp_id] = qp;
2050 /** Returns the size of session structure */
2052 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2054 PMD_INIT_FUNC_TRACE();
2056 return sizeof(dpaa_sec_session);
2060 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2061 struct rte_crypto_sym_xform *xform,
2062 dpaa_sec_session *session)
2064 session->ctxt = DPAA_SEC_CIPHER;
2065 session->cipher_alg = xform->cipher.algo;
2066 session->iv.length = xform->cipher.iv.length;
2067 session->iv.offset = xform->cipher.iv.offset;
2068 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2069 RTE_CACHE_LINE_SIZE);
2070 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2071 DPAA_SEC_ERR("No Memory for cipher key");
2074 session->cipher_key.length = xform->cipher.key.length;
2076 memcpy(session->cipher_key.data, xform->cipher.key.data,
2077 xform->cipher.key.length);
2078 switch (xform->cipher.algo) {
2079 case RTE_CRYPTO_CIPHER_AES_CBC:
2080 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2081 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2083 case RTE_CRYPTO_CIPHER_DES_CBC:
2084 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2085 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2087 case RTE_CRYPTO_CIPHER_3DES_CBC:
2088 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2089 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2091 case RTE_CRYPTO_CIPHER_AES_CTR:
2092 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2093 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2095 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2096 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2098 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2099 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2102 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2103 xform->cipher.algo);
2106 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2113 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2114 struct rte_crypto_sym_xform *xform,
2115 dpaa_sec_session *session)
2117 session->ctxt = DPAA_SEC_AUTH;
2118 session->auth_alg = xform->auth.algo;
2119 session->auth_key.length = xform->auth.key.length;
2120 if (xform->auth.key.length) {
2121 session->auth_key.data =
2122 rte_zmalloc(NULL, xform->auth.key.length,
2123 RTE_CACHE_LINE_SIZE);
2124 if (session->auth_key.data == NULL) {
2125 DPAA_SEC_ERR("No Memory for auth key");
2128 memcpy(session->auth_key.data, xform->auth.key.data,
2129 xform->auth.key.length);
2132 session->digest_length = xform->auth.digest_length;
2133 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2134 session->iv.offset = xform->auth.iv.offset;
2135 session->iv.length = xform->auth.iv.length;
2138 switch (xform->auth.algo) {
2139 case RTE_CRYPTO_AUTH_SHA1:
2140 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2141 session->auth_key.algmode = OP_ALG_AAI_HASH;
2143 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2144 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2145 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2147 case RTE_CRYPTO_AUTH_MD5:
2148 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2149 session->auth_key.algmode = OP_ALG_AAI_HASH;
2151 case RTE_CRYPTO_AUTH_MD5_HMAC:
2152 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2153 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2155 case RTE_CRYPTO_AUTH_SHA224:
2156 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2157 session->auth_key.algmode = OP_ALG_AAI_HASH;
2159 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2160 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2161 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2163 case RTE_CRYPTO_AUTH_SHA256:
2164 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2165 session->auth_key.algmode = OP_ALG_AAI_HASH;
2167 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2168 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2169 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2171 case RTE_CRYPTO_AUTH_SHA384:
2172 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2173 session->auth_key.algmode = OP_ALG_AAI_HASH;
2175 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2176 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2177 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2179 case RTE_CRYPTO_AUTH_SHA512:
2180 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2181 session->auth_key.algmode = OP_ALG_AAI_HASH;
2183 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2184 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2185 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2187 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2188 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2189 session->auth_key.algmode = OP_ALG_AAI_F9;
2191 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2192 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2193 session->auth_key.algmode = OP_ALG_AAI_F9;
2195 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2196 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2197 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2199 case RTE_CRYPTO_AUTH_AES_CMAC:
2200 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2201 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2204 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2209 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2216 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2217 struct rte_crypto_sym_xform *xform,
2218 dpaa_sec_session *session)
2221 struct rte_crypto_cipher_xform *cipher_xform;
2222 struct rte_crypto_auth_xform *auth_xform;
2224 session->ctxt = DPAA_SEC_CIPHER_HASH;
2225 if (session->auth_cipher_text) {
2226 cipher_xform = &xform->cipher;
2227 auth_xform = &xform->next->auth;
2229 cipher_xform = &xform->next->cipher;
2230 auth_xform = &xform->auth;
2233 /* Set IV parameters */
2234 session->iv.offset = cipher_xform->iv.offset;
2235 session->iv.length = cipher_xform->iv.length;
2237 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2238 RTE_CACHE_LINE_SIZE);
2239 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2240 DPAA_SEC_ERR("No Memory for cipher key");
2243 session->cipher_key.length = cipher_xform->key.length;
2244 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2245 RTE_CACHE_LINE_SIZE);
2246 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2247 DPAA_SEC_ERR("No Memory for auth key");
2250 session->auth_key.length = auth_xform->key.length;
2251 memcpy(session->cipher_key.data, cipher_xform->key.data,
2252 cipher_xform->key.length);
2253 memcpy(session->auth_key.data, auth_xform->key.data,
2254 auth_xform->key.length);
2256 session->digest_length = auth_xform->digest_length;
2257 session->auth_alg = auth_xform->algo;
2259 switch (auth_xform->algo) {
2260 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2261 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2262 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2264 case RTE_CRYPTO_AUTH_MD5_HMAC:
2265 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2266 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2268 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2269 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2270 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2272 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2273 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2274 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2276 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2277 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2278 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2280 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2281 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2282 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2284 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2285 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2286 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2288 case RTE_CRYPTO_AUTH_AES_CMAC:
2289 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2290 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2293 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2298 session->cipher_alg = cipher_xform->algo;
2300 switch (cipher_xform->algo) {
2301 case RTE_CRYPTO_CIPHER_AES_CBC:
2302 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2303 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2305 case RTE_CRYPTO_CIPHER_DES_CBC:
2306 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2307 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2309 case RTE_CRYPTO_CIPHER_3DES_CBC:
2310 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2311 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2313 case RTE_CRYPTO_CIPHER_AES_CTR:
2314 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2315 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2318 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2319 cipher_xform->algo);
2322 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2328 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2329 struct rte_crypto_sym_xform *xform,
2330 dpaa_sec_session *session)
2332 session->aead_alg = xform->aead.algo;
2333 session->ctxt = DPAA_SEC_AEAD;
2334 session->iv.length = xform->aead.iv.length;
2335 session->iv.offset = xform->aead.iv.offset;
2336 session->auth_only_len = xform->aead.aad_length;
2337 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2338 RTE_CACHE_LINE_SIZE);
2339 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2340 DPAA_SEC_ERR("No Memory for aead key\n");
2343 session->aead_key.length = xform->aead.key.length;
2344 session->digest_length = xform->aead.digest_length;
2346 memcpy(session->aead_key.data, xform->aead.key.data,
2347 xform->aead.key.length);
2349 switch (session->aead_alg) {
2350 case RTE_CRYPTO_AEAD_AES_GCM:
2351 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2352 session->aead_key.algmode = OP_ALG_AAI_GCM;
2355 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2359 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2365 static struct qman_fq *
2366 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2370 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2371 if (qi->inq_attach[i] == 0) {
2372 qi->inq_attach[i] = 1;
2376 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2382 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2386 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2387 if (&qi->inq[i] == fq) {
2388 if (qman_retire_fq(fq, NULL) != 0)
2389 DPAA_SEC_DEBUG("Queue is not retired\n");
2391 qi->inq_attach[i] = 0;
2399 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2403 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2404 ret = dpaa_sec_prep_cdb(sess);
2406 DPAA_SEC_ERR("Unable to prepare sec cdb");
2409 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2410 ret = rte_dpaa_portal_init((void *)0);
2412 DPAA_SEC_ERR("Failure in affining portal");
2416 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2417 rte_dpaa_mem_vtop(&sess->cdb),
2418 qman_fq_fqid(&qp->outq));
2420 DPAA_SEC_ERR("Unable to init sec queue");
2426 free_session_data(dpaa_sec_session *s)
2429 rte_free(s->aead_key.data);
2431 rte_free(s->auth_key.data);
2432 rte_free(s->cipher_key.data);
2434 memset(s, 0, sizeof(dpaa_sec_session));
2438 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2439 struct rte_crypto_sym_xform *xform, void *sess)
2441 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2442 dpaa_sec_session *session = sess;
2446 PMD_INIT_FUNC_TRACE();
2448 if (unlikely(sess == NULL)) {
2449 DPAA_SEC_ERR("invalid session struct");
2452 memset(session, 0, sizeof(dpaa_sec_session));
2454 /* Default IV length = 0 */
2455 session->iv.length = 0;
2458 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2459 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2460 ret = dpaa_sec_cipher_init(dev, xform, session);
2462 /* Authentication Only */
2463 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2464 xform->next == NULL) {
2465 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2466 session->ctxt = DPAA_SEC_AUTH;
2467 ret = dpaa_sec_auth_init(dev, xform, session);
2469 /* Cipher then Authenticate */
2470 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2471 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2472 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2473 session->auth_cipher_text = 1;
2474 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2475 ret = dpaa_sec_auth_init(dev, xform, session);
2476 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2477 ret = dpaa_sec_cipher_init(dev, xform, session);
2479 ret = dpaa_sec_chain_init(dev, xform, session);
2481 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2484 /* Authenticate then Cipher */
2485 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2486 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2487 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2488 session->auth_cipher_text = 0;
2489 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2490 ret = dpaa_sec_cipher_init(dev, xform, session);
2491 else if (xform->next->cipher.algo
2492 == RTE_CRYPTO_CIPHER_NULL)
2493 ret = dpaa_sec_auth_init(dev, xform, session);
2495 ret = dpaa_sec_chain_init(dev, xform, session);
2497 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2501 /* AEAD operation for AES-GCM kind of Algorithms */
2502 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2503 xform->next == NULL) {
2504 ret = dpaa_sec_aead_init(dev, xform, session);
2507 DPAA_SEC_ERR("Invalid crypto type");
2511 DPAA_SEC_ERR("unable to init session");
2515 rte_spinlock_lock(&internals->lock);
2516 for (i = 0; i < MAX_DPAA_CORES; i++) {
2517 session->inq[i] = dpaa_sec_attach_rxq(internals);
2518 if (session->inq[i] == NULL) {
2519 DPAA_SEC_ERR("unable to attach sec queue");
2520 rte_spinlock_unlock(&internals->lock);
2525 rte_spinlock_unlock(&internals->lock);
2530 free_session_data(session);
2535 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2536 struct rte_crypto_sym_xform *xform,
2537 struct rte_cryptodev_sym_session *sess,
2538 struct rte_mempool *mempool)
2540 void *sess_private_data;
2543 PMD_INIT_FUNC_TRACE();
2545 if (rte_mempool_get(mempool, &sess_private_data)) {
2546 DPAA_SEC_ERR("Couldn't get object from session mempool");
2550 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2552 DPAA_SEC_ERR("failed to configure session parameters");
2554 /* Return session to mempool */
2555 rte_mempool_put(mempool, sess_private_data);
2559 set_sym_session_private_data(sess, dev->driver_id,
2567 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2569 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2570 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2573 for (i = 0; i < MAX_DPAA_CORES; i++) {
2575 dpaa_sec_detach_rxq(qi, s->inq[i]);
2579 free_session_data(s);
2580 rte_mempool_put(sess_mp, (void *)s);
2583 /** Clear the memory of session so it doesn't leave key material behind */
2585 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2586 struct rte_cryptodev_sym_session *sess)
2588 PMD_INIT_FUNC_TRACE();
2589 uint8_t index = dev->driver_id;
2590 void *sess_priv = get_sym_session_private_data(sess, index);
2591 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2594 free_session_memory(dev, s);
2595 set_sym_session_private_data(sess, index, NULL);
2599 #ifdef RTE_LIB_SECURITY
2601 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2602 struct rte_security_ipsec_xform *ipsec_xform,
2603 dpaa_sec_session *session)
2605 PMD_INIT_FUNC_TRACE();
2607 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2608 RTE_CACHE_LINE_SIZE);
2609 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2610 DPAA_SEC_ERR("No Memory for aead key");
2613 memcpy(session->aead_key.data, aead_xform->key.data,
2614 aead_xform->key.length);
2616 session->digest_length = aead_xform->digest_length;
2617 session->aead_key.length = aead_xform->key.length;
2619 switch (aead_xform->algo) {
2620 case RTE_CRYPTO_AEAD_AES_GCM:
2621 switch (session->digest_length) {
2623 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2626 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2629 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2632 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2633 session->digest_length);
2636 if (session->dir == DIR_ENC) {
2637 memcpy(session->encap_pdb.gcm.salt,
2638 (uint8_t *)&(ipsec_xform->salt), 4);
2640 memcpy(session->decap_pdb.gcm.salt,
2641 (uint8_t *)&(ipsec_xform->salt), 4);
2643 session->aead_key.algmode = OP_ALG_AAI_GCM;
2644 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2647 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2655 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2656 struct rte_crypto_auth_xform *auth_xform,
2657 struct rte_security_ipsec_xform *ipsec_xform,
2658 dpaa_sec_session *session)
2661 session->cipher_key.data = rte_zmalloc(NULL,
2662 cipher_xform->key.length,
2663 RTE_CACHE_LINE_SIZE);
2664 if (session->cipher_key.data == NULL &&
2665 cipher_xform->key.length > 0) {
2666 DPAA_SEC_ERR("No Memory for cipher key");
2670 session->cipher_key.length = cipher_xform->key.length;
2671 memcpy(session->cipher_key.data, cipher_xform->key.data,
2672 cipher_xform->key.length);
2673 session->cipher_alg = cipher_xform->algo;
2675 session->cipher_key.data = NULL;
2676 session->cipher_key.length = 0;
2677 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2681 session->auth_key.data = rte_zmalloc(NULL,
2682 auth_xform->key.length,
2683 RTE_CACHE_LINE_SIZE);
2684 if (session->auth_key.data == NULL &&
2685 auth_xform->key.length > 0) {
2686 DPAA_SEC_ERR("No Memory for auth key");
2689 session->auth_key.length = auth_xform->key.length;
2690 memcpy(session->auth_key.data, auth_xform->key.data,
2691 auth_xform->key.length);
2692 session->auth_alg = auth_xform->algo;
2693 session->digest_length = auth_xform->digest_length;
2695 session->auth_key.data = NULL;
2696 session->auth_key.length = 0;
2697 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2700 switch (session->auth_alg) {
2701 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2702 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2703 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2705 case RTE_CRYPTO_AUTH_MD5_HMAC:
2706 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2707 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2709 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2710 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2711 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2712 if (session->digest_length != 16)
2714 "+++Using sha256-hmac truncated len is non-standard,"
2715 "it will not work with lookaside proto");
2717 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2718 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2719 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2721 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2722 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2723 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2725 case RTE_CRYPTO_AUTH_AES_CMAC:
2726 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2727 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2729 case RTE_CRYPTO_AUTH_NULL:
2730 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2732 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2733 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2734 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2736 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2737 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2738 case RTE_CRYPTO_AUTH_SHA1:
2739 case RTE_CRYPTO_AUTH_SHA256:
2740 case RTE_CRYPTO_AUTH_SHA512:
2741 case RTE_CRYPTO_AUTH_SHA224:
2742 case RTE_CRYPTO_AUTH_SHA384:
2743 case RTE_CRYPTO_AUTH_MD5:
2744 case RTE_CRYPTO_AUTH_AES_GMAC:
2745 case RTE_CRYPTO_AUTH_KASUMI_F9:
2746 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2747 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2748 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2752 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2757 switch (session->cipher_alg) {
2758 case RTE_CRYPTO_CIPHER_AES_CBC:
2759 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2760 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2762 case RTE_CRYPTO_CIPHER_DES_CBC:
2763 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2764 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2766 case RTE_CRYPTO_CIPHER_3DES_CBC:
2767 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2768 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2770 case RTE_CRYPTO_CIPHER_AES_CTR:
2771 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2772 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2773 if (session->dir == DIR_ENC) {
2774 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2775 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2777 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2778 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2781 case RTE_CRYPTO_CIPHER_NULL:
2782 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2784 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2785 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2786 case RTE_CRYPTO_CIPHER_3DES_ECB:
2787 case RTE_CRYPTO_CIPHER_AES_ECB:
2788 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2789 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2790 session->cipher_alg);
2793 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2794 session->cipher_alg);
2802 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2803 struct rte_security_session_conf *conf,
2806 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2807 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2808 struct rte_crypto_auth_xform *auth_xform = NULL;
2809 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2810 struct rte_crypto_aead_xform *aead_xform = NULL;
2811 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2815 PMD_INIT_FUNC_TRACE();
2817 memset(session, 0, sizeof(dpaa_sec_session));
2818 session->proto_alg = conf->protocol;
2819 session->ctxt = DPAA_SEC_IPSEC;
2821 if (ipsec_xform->life.bytes_hard_limit != 0 ||
2822 ipsec_xform->life.bytes_soft_limit != 0 ||
2823 ipsec_xform->life.packets_hard_limit != 0 ||
2824 ipsec_xform->life.packets_soft_limit != 0)
2827 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2828 session->dir = DIR_ENC;
2830 session->dir = DIR_DEC;
2832 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2833 cipher_xform = &conf->crypto_xform->cipher;
2834 if (conf->crypto_xform->next)
2835 auth_xform = &conf->crypto_xform->next->auth;
2836 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2837 ipsec_xform, session);
2838 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2839 auth_xform = &conf->crypto_xform->auth;
2840 if (conf->crypto_xform->next)
2841 cipher_xform = &conf->crypto_xform->next->cipher;
2842 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2843 ipsec_xform, session);
2844 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2845 aead_xform = &conf->crypto_xform->aead;
2846 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2847 ipsec_xform, session);
2849 DPAA_SEC_ERR("XFORM not specified");
2854 DPAA_SEC_ERR("Failed to process xform");
2858 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2859 if (ipsec_xform->tunnel.type ==
2860 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2861 session->ip4_hdr.ip_v = IPVERSION;
2862 session->ip4_hdr.ip_hl = 5;
2863 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2864 sizeof(session->ip4_hdr));
2865 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2866 session->ip4_hdr.ip_id = 0;
2867 session->ip4_hdr.ip_off = 0;
2868 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2869 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2870 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2871 IPPROTO_ESP : IPPROTO_AH;
2872 session->ip4_hdr.ip_sum = 0;
2873 session->ip4_hdr.ip_src =
2874 ipsec_xform->tunnel.ipv4.src_ip;
2875 session->ip4_hdr.ip_dst =
2876 ipsec_xform->tunnel.ipv4.dst_ip;
2877 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2878 (void *)&session->ip4_hdr,
2880 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2881 } else if (ipsec_xform->tunnel.type ==
2882 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2883 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2884 DPAA_IPv6_DEFAULT_VTC_FLOW |
2885 ((ipsec_xform->tunnel.ipv6.dscp <<
2886 RTE_IPV6_HDR_TC_SHIFT) &
2887 RTE_IPV6_HDR_TC_MASK) |
2888 ((ipsec_xform->tunnel.ipv6.flabel <<
2889 RTE_IPV6_HDR_FL_SHIFT) &
2890 RTE_IPV6_HDR_FL_MASK));
2891 /* Payload length will be updated by HW */
2892 session->ip6_hdr.payload_len = 0;
2893 session->ip6_hdr.hop_limits =
2894 ipsec_xform->tunnel.ipv6.hlimit;
2895 session->ip6_hdr.proto = (ipsec_xform->proto ==
2896 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2897 IPPROTO_ESP : IPPROTO_AH;
2898 memcpy(&session->ip6_hdr.src_addr,
2899 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2900 memcpy(&session->ip6_hdr.dst_addr,
2901 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2902 session->encap_pdb.ip_hdr_len =
2903 sizeof(struct rte_ipv6_hdr);
2906 session->encap_pdb.options =
2907 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2908 PDBOPTS_ESP_OIHI_PDB_INL |
2911 if (ipsec_xform->options.dec_ttl)
2912 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
2913 if (ipsec_xform->options.esn)
2914 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2915 session->encap_pdb.spi = ipsec_xform->spi;
2917 } else if (ipsec_xform->direction ==
2918 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2919 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2920 session->decap_pdb.options = sizeof(struct ip) << 16;
2922 session->decap_pdb.options =
2923 sizeof(struct rte_ipv6_hdr) << 16;
2924 if (ipsec_xform->options.esn)
2925 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2926 if (ipsec_xform->replay_win_sz) {
2928 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2937 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2940 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2943 session->decap_pdb.options |=
2949 rte_spinlock_lock(&internals->lock);
2950 for (i = 0; i < MAX_DPAA_CORES; i++) {
2951 session->inq[i] = dpaa_sec_attach_rxq(internals);
2952 if (session->inq[i] == NULL) {
2953 DPAA_SEC_ERR("unable to attach sec queue");
2954 rte_spinlock_unlock(&internals->lock);
2958 rte_spinlock_unlock(&internals->lock);
2962 free_session_data(session);
2967 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2968 struct rte_security_session_conf *conf,
2971 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2972 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2973 struct rte_crypto_auth_xform *auth_xform = NULL;
2974 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2975 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2976 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2980 PMD_INIT_FUNC_TRACE();
2982 memset(session, 0, sizeof(dpaa_sec_session));
2984 /* find xfrm types */
2985 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2986 cipher_xform = &xform->cipher;
2987 if (xform->next != NULL &&
2988 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
2989 auth_xform = &xform->next->auth;
2990 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2991 auth_xform = &xform->auth;
2992 if (xform->next != NULL &&
2993 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
2994 cipher_xform = &xform->next->cipher;
2996 DPAA_SEC_ERR("Invalid crypto type");
3000 session->proto_alg = conf->protocol;
3001 session->ctxt = DPAA_SEC_PDCP;
3004 switch (cipher_xform->algo) {
3005 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3006 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3008 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3009 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3011 case RTE_CRYPTO_CIPHER_AES_CTR:
3012 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3014 case RTE_CRYPTO_CIPHER_NULL:
3015 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3018 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3019 session->cipher_alg);
3023 session->cipher_key.data = rte_zmalloc(NULL,
3024 cipher_xform->key.length,
3025 RTE_CACHE_LINE_SIZE);
3026 if (session->cipher_key.data == NULL &&
3027 cipher_xform->key.length > 0) {
3028 DPAA_SEC_ERR("No Memory for cipher key");
3031 session->cipher_key.length = cipher_xform->key.length;
3032 memcpy(session->cipher_key.data, cipher_xform->key.data,
3033 cipher_xform->key.length);
3034 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3036 session->cipher_alg = cipher_xform->algo;
3038 session->cipher_key.data = NULL;
3039 session->cipher_key.length = 0;
3040 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3041 session->dir = DIR_ENC;
3044 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3045 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3046 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3048 "PDCP Seq Num size should be 5/12 bits for cmode");
3055 switch (auth_xform->algo) {
3056 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3057 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3059 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3060 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3062 case RTE_CRYPTO_AUTH_AES_CMAC:
3063 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3065 case RTE_CRYPTO_AUTH_NULL:
3066 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3069 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3071 rte_free(session->cipher_key.data);
3074 session->auth_key.data = rte_zmalloc(NULL,
3075 auth_xform->key.length,
3076 RTE_CACHE_LINE_SIZE);
3077 if (!session->auth_key.data &&
3078 auth_xform->key.length > 0) {
3079 DPAA_SEC_ERR("No Memory for auth key");
3080 rte_free(session->cipher_key.data);
3083 session->auth_key.length = auth_xform->key.length;
3084 memcpy(session->auth_key.data, auth_xform->key.data,
3085 auth_xform->key.length);
3086 session->auth_alg = auth_xform->algo;
3088 session->auth_key.data = NULL;
3089 session->auth_key.length = 0;
3090 session->auth_alg = 0;
3092 session->pdcp.domain = pdcp_xform->domain;
3093 session->pdcp.bearer = pdcp_xform->bearer;
3094 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3095 session->pdcp.sn_size = pdcp_xform->sn_size;
3096 session->pdcp.hfn = pdcp_xform->hfn;
3097 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3098 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3099 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3101 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3103 rte_spinlock_lock(&dev_priv->lock);
3104 for (i = 0; i < MAX_DPAA_CORES; i++) {
3105 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3106 if (session->inq[i] == NULL) {
3107 DPAA_SEC_ERR("unable to attach sec queue");
3108 rte_spinlock_unlock(&dev_priv->lock);
3113 rte_spinlock_unlock(&dev_priv->lock);
3116 rte_free(session->auth_key.data);
3117 rte_free(session->cipher_key.data);
3118 memset(session, 0, sizeof(dpaa_sec_session));
3123 dpaa_sec_security_session_create(void *dev,
3124 struct rte_security_session_conf *conf,
3125 struct rte_security_session *sess,
3126 struct rte_mempool *mempool)
3128 void *sess_private_data;
3129 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3132 if (rte_mempool_get(mempool, &sess_private_data)) {
3133 DPAA_SEC_ERR("Couldn't get object from session mempool");
3137 switch (conf->protocol) {
3138 case RTE_SECURITY_PROTOCOL_IPSEC:
3139 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3142 case RTE_SECURITY_PROTOCOL_PDCP:
3143 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3146 case RTE_SECURITY_PROTOCOL_MACSEC:
3152 DPAA_SEC_ERR("failed to configure session parameters");
3153 /* Return session to mempool */
3154 rte_mempool_put(mempool, sess_private_data);
3158 set_sec_session_private_data(sess, sess_private_data);
3163 /** Clear the memory of session so it doesn't leave key material behind */
3165 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3166 struct rte_security_session *sess)
3168 PMD_INIT_FUNC_TRACE();
3169 void *sess_priv = get_sec_session_private_data(sess);
3170 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3173 free_session_memory((struct rte_cryptodev *)dev, s);
3174 set_sec_session_private_data(sess, NULL);
3180 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3181 struct rte_cryptodev_config *config __rte_unused)
3183 PMD_INIT_FUNC_TRACE();
3189 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3191 PMD_INIT_FUNC_TRACE();
3196 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3198 PMD_INIT_FUNC_TRACE();
3202 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3204 PMD_INIT_FUNC_TRACE();
3213 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3214 struct rte_cryptodev_info *info)
3216 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3218 PMD_INIT_FUNC_TRACE();
3220 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3221 info->feature_flags = dev->feature_flags;
3222 info->capabilities = dpaa_sec_capabilities;
3223 info->sym.max_nb_sessions = internals->max_nb_sessions;
3224 info->driver_id = dpaa_cryptodev_driver_id;
3228 static enum qman_cb_dqrr_result
3229 dpaa_sec_process_parallel_event(void *event,
3230 struct qman_portal *qm __always_unused,
3231 struct qman_fq *outq,
3232 const struct qm_dqrr_entry *dqrr,
3235 const struct qm_fd *fd;
3236 struct dpaa_sec_job *job;
3237 struct dpaa_sec_op_ctx *ctx;
3238 struct rte_event *ev = (struct rte_event *)event;
3242 /* sg is embedded in an op ctx,
3243 * sg[0] is for output
3246 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3248 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3249 ctx->fd_status = fd->status;
3250 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3251 struct qm_sg_entry *sg_out;
3254 sg_out = &job->sg[0];
3255 hw_sg_to_cpu(sg_out);
3256 len = sg_out->length;
3257 ctx->op->sym->m_src->pkt_len = len;
3258 ctx->op->sym->m_src->data_len = len;
3260 if (!ctx->fd_status) {
3261 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3263 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3264 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3266 ev->event_ptr = (void *)ctx->op;
3268 ev->flow_id = outq->ev.flow_id;
3269 ev->sub_event_type = outq->ev.sub_event_type;
3270 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3271 ev->op = RTE_EVENT_OP_NEW;
3272 ev->sched_type = outq->ev.sched_type;
3273 ev->queue_id = outq->ev.queue_id;
3274 ev->priority = outq->ev.priority;
3275 *bufs = (void *)ctx->op;
3277 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3279 return qman_cb_dqrr_consume;
3282 static enum qman_cb_dqrr_result
3283 dpaa_sec_process_atomic_event(void *event,
3284 struct qman_portal *qm __rte_unused,
3285 struct qman_fq *outq,
3286 const struct qm_dqrr_entry *dqrr,
3290 const struct qm_fd *fd;
3291 struct dpaa_sec_job *job;
3292 struct dpaa_sec_op_ctx *ctx;
3293 struct rte_event *ev = (struct rte_event *)event;
3297 /* sg is embedded in an op ctx,
3298 * sg[0] is for output
3301 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3303 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3304 ctx->fd_status = fd->status;
3305 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3306 struct qm_sg_entry *sg_out;
3309 sg_out = &job->sg[0];
3310 hw_sg_to_cpu(sg_out);
3311 len = sg_out->length;
3312 ctx->op->sym->m_src->pkt_len = len;
3313 ctx->op->sym->m_src->data_len = len;
3315 if (!ctx->fd_status) {
3316 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3318 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3319 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3321 ev->event_ptr = (void *)ctx->op;
3322 ev->flow_id = outq->ev.flow_id;
3323 ev->sub_event_type = outq->ev.sub_event_type;
3324 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3325 ev->op = RTE_EVENT_OP_NEW;
3326 ev->sched_type = outq->ev.sched_type;
3327 ev->queue_id = outq->ev.queue_id;
3328 ev->priority = outq->ev.priority;
3330 /* Save active dqrr entries */
3331 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3332 DPAA_PER_LCORE_DQRR_SIZE++;
3333 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3334 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3335 ev->impl_opaque = index + 1;
3336 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3337 *bufs = (void *)ctx->op;
3339 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3341 return qman_cb_dqrr_defer;
3345 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3348 const struct rte_event *event)
3350 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3351 struct qm_mcc_initfq opts = {0};
3355 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3356 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3357 opts.fqd.dest.channel = ch_id;
3359 switch (event->sched_type) {
3360 case RTE_SCHED_TYPE_ATOMIC:
3361 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3362 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3363 * configuration with HOLD_ACTIVE setting
3365 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3366 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3368 case RTE_SCHED_TYPE_ORDERED:
3369 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3372 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3373 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3377 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3378 if (unlikely(ret)) {
3379 DPAA_SEC_ERR("unable to init caam source fq!");
3383 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3389 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3392 struct qm_mcc_initfq opts = {0};
3394 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3396 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3397 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3398 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3399 qp->outq.cb.ern = ern_sec_fq_handler;
3400 qman_retire_fq(&qp->outq, NULL);
3401 qman_oos_fq(&qp->outq);
3402 ret = qman_init_fq(&qp->outq, 0, &opts);
3404 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3405 qp->outq.cb.dqrr = NULL;
3410 static struct rte_cryptodev_ops crypto_ops = {
3411 .dev_configure = dpaa_sec_dev_configure,
3412 .dev_start = dpaa_sec_dev_start,
3413 .dev_stop = dpaa_sec_dev_stop,
3414 .dev_close = dpaa_sec_dev_close,
3415 .dev_infos_get = dpaa_sec_dev_infos_get,
3416 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3417 .queue_pair_release = dpaa_sec_queue_pair_release,
3418 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3419 .sym_session_configure = dpaa_sec_sym_session_configure,
3420 .sym_session_clear = dpaa_sec_sym_session_clear,
3421 /* Raw data-path API related operations */
3422 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3423 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3426 #ifdef RTE_LIB_SECURITY
3427 static const struct rte_security_capability *
3428 dpaa_sec_capabilities_get(void *device __rte_unused)
3430 return dpaa_sec_security_cap;
3433 static const struct rte_security_ops dpaa_sec_security_ops = {
3434 .session_create = dpaa_sec_security_session_create,
3435 .session_update = NULL,
3436 .session_stats_get = NULL,
3437 .session_destroy = dpaa_sec_security_session_destroy,
3438 .set_pkt_metadata = NULL,
3439 .capabilities_get = dpaa_sec_capabilities_get
3443 dpaa_sec_uninit(struct rte_cryptodev *dev)
3445 struct dpaa_sec_dev_private *internals;
3450 internals = dev->data->dev_private;
3451 rte_free(dev->security_ctx);
3453 rte_free(internals);
3455 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3456 dev->data->name, rte_socket_id());
3462 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3464 struct dpaa_sec_dev_private *internals;
3465 #ifdef RTE_LIB_SECURITY
3466 struct rte_security_ctx *security_instance;
3468 struct dpaa_sec_qp *qp;
3472 PMD_INIT_FUNC_TRACE();
3474 cryptodev->driver_id = dpaa_cryptodev_driver_id;
3475 cryptodev->dev_ops = &crypto_ops;
3477 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3478 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3479 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3480 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3481 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3482 RTE_CRYPTODEV_FF_SECURITY |
3483 RTE_CRYPTODEV_FF_SYM_RAW_DP |
3484 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3485 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3486 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3487 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3488 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3490 internals = cryptodev->data->dev_private;
3491 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3492 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3495 * For secondary processes, we don't initialise any further as primary
3496 * has already done this work. Only check we don't need a different
3499 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3500 DPAA_SEC_WARN("Device already init by primary process");
3503 #ifdef RTE_LIB_SECURITY
3504 /* Initialize security_ctx only for primary process*/
3505 security_instance = rte_malloc("rte_security_instances_ops",
3506 sizeof(struct rte_security_ctx), 0);
3507 if (security_instance == NULL)
3509 security_instance->device = (void *)cryptodev;
3510 security_instance->ops = &dpaa_sec_security_ops;
3511 security_instance->sess_cnt = 0;
3512 cryptodev->security_ctx = security_instance;
3514 rte_spinlock_init(&internals->lock);
3515 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3516 /* init qman fq for queue pair */
3517 qp = &internals->qps[i];
3518 ret = dpaa_sec_init_tx(&qp->outq);
3520 DPAA_SEC_ERR("config tx of queue pair %d", i);
3525 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3526 QMAN_FQ_FLAG_TO_DCPORTAL;
3527 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3528 /* create rx qman fq for sessions*/
3529 ret = qman_create_fq(0, flags, &internals->inq[i]);
3530 if (unlikely(ret != 0)) {
3531 DPAA_SEC_ERR("sec qman_create_fq failed");
3536 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3540 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3542 rte_free(cryptodev->security_ctx);
3547 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3548 struct rte_dpaa_device *dpaa_dev)
3550 struct rte_cryptodev *cryptodev;
3551 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3555 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3557 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3558 if (cryptodev == NULL)
3561 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3562 cryptodev->data->dev_private = rte_zmalloc_socket(
3563 "cryptodev private structure",
3564 sizeof(struct dpaa_sec_dev_private),
3565 RTE_CACHE_LINE_SIZE,
3568 if (cryptodev->data->dev_private == NULL)
3569 rte_panic("Cannot allocate memzone for private "
3573 dpaa_dev->crypto_dev = cryptodev;
3574 cryptodev->device = &dpaa_dev->device;
3576 /* init user callbacks */
3577 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3579 /* if sec device version is not configured */
3580 if (!rta_get_sec_era()) {
3581 const struct device_node *caam_node;
3583 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3584 const uint32_t *prop = of_get_property(caam_node,
3589 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3595 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3596 retval = rte_dpaa_portal_init((void *)1);
3598 DPAA_SEC_ERR("Unable to initialize portal");
3603 /* Invoke PMD device initialization function */
3604 retval = dpaa_sec_dev_init(cryptodev);
3606 rte_cryptodev_pmd_probing_finish(cryptodev);
3612 /* In case of error, cleanup is done */
3613 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3614 rte_free(cryptodev->data->dev_private);
3616 rte_cryptodev_pmd_release_device(cryptodev);
3622 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3624 struct rte_cryptodev *cryptodev;
3627 cryptodev = dpaa_dev->crypto_dev;
3628 if (cryptodev == NULL)
3631 ret = dpaa_sec_uninit(cryptodev);
3635 return rte_cryptodev_pmd_destroy(cryptodev);
3638 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3639 .drv_type = FSL_DPAA_CRYPTO,
3641 .name = "DPAA SEC PMD"
3643 .probe = cryptodev_dpaa_sec_probe,
3644 .remove = cryptodev_dpaa_sec_remove,
3647 static struct cryptodev_driver dpaa_sec_crypto_drv;
3649 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3650 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3651 dpaa_cryptodev_driver_id);
3652 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);