1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2021 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (ses->pdcp.sdap_enabled) {
267 int nb_keys_to_inline =
268 rta_inline_pdcp_sdap_query(authdata.algtype,
272 if (nb_keys_to_inline >= 1) {
273 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
274 (size_t)cipherdata.key);
275 cipherdata.key_type = RTA_DATA_PTR;
277 if (nb_keys_to_inline >= 2) {
278 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
279 (size_t)authdata.key);
280 authdata.key_type = RTA_DATA_PTR;
283 if (rta_inline_pdcp_query(authdata.algtype,
286 ses->pdcp.hfn_ovd)) {
287 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
288 (size_t)cipherdata.key);
289 cipherdata.key_type = RTA_DATA_PTR;
293 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
294 if (ses->dir == DIR_ENC)
295 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
296 cdb->sh_desc, 1, swap,
301 ses->pdcp.hfn_threshold,
302 &cipherdata, &authdata,
304 else if (ses->dir == DIR_DEC)
305 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
306 cdb->sh_desc, 1, swap,
311 ses->pdcp.hfn_threshold,
312 &cipherdata, &authdata,
314 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
315 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
318 if (ses->dir == DIR_ENC) {
319 if (ses->pdcp.sdap_enabled)
321 cnstr_shdsc_pdcp_sdap_u_plane_encap(
322 cdb->sh_desc, 1, swap,
327 ses->pdcp.hfn_threshold,
328 &cipherdata, p_authdata, 0);
331 cnstr_shdsc_pdcp_u_plane_encap(
332 cdb->sh_desc, 1, swap,
337 ses->pdcp.hfn_threshold,
338 &cipherdata, p_authdata, 0);
339 } else if (ses->dir == DIR_DEC) {
340 if (ses->pdcp.sdap_enabled)
342 cnstr_shdsc_pdcp_sdap_u_plane_decap(
343 cdb->sh_desc, 1, swap,
348 ses->pdcp.hfn_threshold,
349 &cipherdata, p_authdata, 0);
352 cnstr_shdsc_pdcp_u_plane_decap(
353 cdb->sh_desc, 1, swap,
358 ses->pdcp.hfn_threshold,
359 &cipherdata, p_authdata, 0);
362 return shared_desc_len;
365 /* prepare ipsec proto command block of the session */
367 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
369 struct alginfo cipherdata = {0}, authdata = {0};
370 struct sec_cdb *cdb = &ses->cdb;
371 int32_t shared_desc_len = 0;
373 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
379 cipherdata.key = (size_t)ses->cipher_key.data;
380 cipherdata.keylen = ses->cipher_key.length;
381 cipherdata.key_enc_flags = 0;
382 cipherdata.key_type = RTA_DATA_IMM;
383 cipherdata.algtype = ses->cipher_key.alg;
384 cipherdata.algmode = ses->cipher_key.algmode;
386 if (ses->auth_key.length) {
387 authdata.key = (size_t)ses->auth_key.data;
388 authdata.keylen = ses->auth_key.length;
389 authdata.key_enc_flags = 0;
390 authdata.key_type = RTA_DATA_IMM;
391 authdata.algtype = ses->auth_key.alg;
392 authdata.algmode = ses->auth_key.algmode;
395 cdb->sh_desc[0] = cipherdata.keylen;
396 cdb->sh_desc[1] = authdata.keylen;
397 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
399 (unsigned int *)cdb->sh_desc,
400 &cdb->sh_desc[2], 2);
403 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
406 if (cdb->sh_desc[2] & 1)
407 cipherdata.key_type = RTA_DATA_IMM;
409 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
410 (void *)(size_t)cipherdata.key);
411 cipherdata.key_type = RTA_DATA_PTR;
413 if (cdb->sh_desc[2] & (1<<1))
414 authdata.key_type = RTA_DATA_IMM;
416 authdata.key = (size_t)rte_dpaa_mem_vtop(
417 (void *)(size_t)authdata.key);
418 authdata.key_type = RTA_DATA_PTR;
424 if (ses->dir == DIR_ENC) {
425 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
427 true, swap, SHR_SERIAL,
429 (uint8_t *)&ses->ip4_hdr,
430 &cipherdata, &authdata);
431 } else if (ses->dir == DIR_DEC) {
432 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
434 true, swap, SHR_SERIAL,
436 &cipherdata, &authdata);
438 return shared_desc_len;
441 /* prepare command block of the session */
443 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
445 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
446 int32_t shared_desc_len = 0;
447 struct sec_cdb *cdb = &ses->cdb;
449 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
455 memset(cdb, 0, sizeof(struct sec_cdb));
458 #ifdef RTE_LIB_SECURITY
460 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
463 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
466 case DPAA_SEC_CIPHER:
467 alginfo_c.key = (size_t)ses->cipher_key.data;
468 alginfo_c.keylen = ses->cipher_key.length;
469 alginfo_c.key_enc_flags = 0;
470 alginfo_c.key_type = RTA_DATA_IMM;
471 alginfo_c.algtype = ses->cipher_key.alg;
472 alginfo_c.algmode = ses->cipher_key.algmode;
474 switch (ses->cipher_alg) {
475 case RTE_CRYPTO_CIPHER_AES_CBC:
476 case RTE_CRYPTO_CIPHER_3DES_CBC:
477 case RTE_CRYPTO_CIPHER_DES_CBC:
478 case RTE_CRYPTO_CIPHER_AES_CTR:
479 case RTE_CRYPTO_CIPHER_3DES_CTR:
480 shared_desc_len = cnstr_shdsc_blkcipher(
482 swap, SHR_NEVER, &alginfo_c,
486 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
487 shared_desc_len = cnstr_shdsc_snow_f8(
488 cdb->sh_desc, true, swap,
492 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
493 shared_desc_len = cnstr_shdsc_zuce(
494 cdb->sh_desc, true, swap,
499 DPAA_SEC_ERR("unsupported cipher alg %d",
505 alginfo_a.key = (size_t)ses->auth_key.data;
506 alginfo_a.keylen = ses->auth_key.length;
507 alginfo_a.key_enc_flags = 0;
508 alginfo_a.key_type = RTA_DATA_IMM;
509 alginfo_a.algtype = ses->auth_key.alg;
510 alginfo_a.algmode = ses->auth_key.algmode;
511 switch (ses->auth_alg) {
512 case RTE_CRYPTO_AUTH_MD5:
513 case RTE_CRYPTO_AUTH_SHA1:
514 case RTE_CRYPTO_AUTH_SHA224:
515 case RTE_CRYPTO_AUTH_SHA256:
516 case RTE_CRYPTO_AUTH_SHA384:
517 case RTE_CRYPTO_AUTH_SHA512:
518 shared_desc_len = cnstr_shdsc_hash(
520 swap, SHR_NEVER, &alginfo_a,
524 case RTE_CRYPTO_AUTH_MD5_HMAC:
525 case RTE_CRYPTO_AUTH_SHA1_HMAC:
526 case RTE_CRYPTO_AUTH_SHA224_HMAC:
527 case RTE_CRYPTO_AUTH_SHA256_HMAC:
528 case RTE_CRYPTO_AUTH_SHA384_HMAC:
529 case RTE_CRYPTO_AUTH_SHA512_HMAC:
530 shared_desc_len = cnstr_shdsc_hmac(
532 swap, SHR_NEVER, &alginfo_a,
536 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
537 shared_desc_len = cnstr_shdsc_snow_f9(
538 cdb->sh_desc, true, swap,
543 case RTE_CRYPTO_AUTH_ZUC_EIA3:
544 shared_desc_len = cnstr_shdsc_zuca(
545 cdb->sh_desc, true, swap,
550 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
551 case RTE_CRYPTO_AUTH_AES_CMAC:
552 shared_desc_len = cnstr_shdsc_aes_mac(
554 true, swap, SHR_NEVER,
560 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
564 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
565 DPAA_SEC_ERR("not supported aead alg");
568 alginfo.key = (size_t)ses->aead_key.data;
569 alginfo.keylen = ses->aead_key.length;
570 alginfo.key_enc_flags = 0;
571 alginfo.key_type = RTA_DATA_IMM;
572 alginfo.algtype = ses->aead_key.alg;
573 alginfo.algmode = ses->aead_key.algmode;
575 if (ses->dir == DIR_ENC)
576 shared_desc_len = cnstr_shdsc_gcm_encap(
577 cdb->sh_desc, true, swap, SHR_NEVER,
582 shared_desc_len = cnstr_shdsc_gcm_decap(
583 cdb->sh_desc, true, swap, SHR_NEVER,
588 case DPAA_SEC_CIPHER_HASH:
589 alginfo_c.key = (size_t)ses->cipher_key.data;
590 alginfo_c.keylen = ses->cipher_key.length;
591 alginfo_c.key_enc_flags = 0;
592 alginfo_c.key_type = RTA_DATA_IMM;
593 alginfo_c.algtype = ses->cipher_key.alg;
594 alginfo_c.algmode = ses->cipher_key.algmode;
596 alginfo_a.key = (size_t)ses->auth_key.data;
597 alginfo_a.keylen = ses->auth_key.length;
598 alginfo_a.key_enc_flags = 0;
599 alginfo_a.key_type = RTA_DATA_IMM;
600 alginfo_a.algtype = ses->auth_key.alg;
601 alginfo_a.algmode = ses->auth_key.algmode;
603 cdb->sh_desc[0] = alginfo_c.keylen;
604 cdb->sh_desc[1] = alginfo_a.keylen;
605 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
607 (unsigned int *)cdb->sh_desc,
608 &cdb->sh_desc[2], 2);
611 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
614 if (cdb->sh_desc[2] & 1)
615 alginfo_c.key_type = RTA_DATA_IMM;
617 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
618 (void *)(size_t)alginfo_c.key);
619 alginfo_c.key_type = RTA_DATA_PTR;
621 if (cdb->sh_desc[2] & (1<<1))
622 alginfo_a.key_type = RTA_DATA_IMM;
624 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
625 (void *)(size_t)alginfo_a.key);
626 alginfo_a.key_type = RTA_DATA_PTR;
631 /* Auth_only_len is set as 0 here and it will be
632 * overwritten in fd for each packet.
634 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
635 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
637 ses->digest_length, ses->dir);
639 case DPAA_SEC_HASH_CIPHER:
641 DPAA_SEC_ERR("error: Unsupported session");
645 if (shared_desc_len < 0) {
646 DPAA_SEC_ERR("error in preparing command block");
647 return shared_desc_len;
650 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
651 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
652 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
657 /* qp is lockless, should be accessed by only one thread */
659 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
662 unsigned int pkts = 0;
663 int num_rx_bufs, ret;
664 struct qm_dqrr_entry *dq;
665 uint32_t vdqcr_flags = 0;
669 * Until request for four buffers, we provide exact number of buffers.
670 * Otherwise we do not set the QM_VDQCR_EXACT flag.
671 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
672 * requested, so we request two less in this case.
675 vdqcr_flags = QM_VDQCR_EXACT;
676 num_rx_bufs = nb_ops;
678 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
679 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
681 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
686 const struct qm_fd *fd;
687 struct dpaa_sec_job *job;
688 struct dpaa_sec_op_ctx *ctx;
689 struct rte_crypto_op *op;
691 dq = qman_dequeue(fq);
696 /* sg is embedded in an op ctx,
697 * sg[0] is for output
700 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
702 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
703 ctx->fd_status = fd->status;
705 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
706 struct qm_sg_entry *sg_out;
708 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
709 op->sym->m_src : op->sym->m_dst;
711 sg_out = &job->sg[0];
712 hw_sg_to_cpu(sg_out);
713 len = sg_out->length;
715 while (mbuf->next != NULL) {
716 len -= mbuf->data_len;
719 mbuf->data_len = len;
721 if (!ctx->fd_status) {
722 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
724 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
725 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
729 /* report op status to sym->op and then free the ctx memeory */
730 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
732 qman_dqrr_consume(fq, dq);
733 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
738 static inline struct dpaa_sec_job *
739 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
741 struct rte_crypto_sym_op *sym = op->sym;
742 struct rte_mbuf *mbuf = sym->m_src;
743 struct dpaa_sec_job *cf;
744 struct dpaa_sec_op_ctx *ctx;
745 struct qm_sg_entry *sg, *out_sg, *in_sg;
746 phys_addr_t start_addr;
747 uint8_t *old_digest, extra_segs;
748 int data_len, data_offset;
750 data_len = sym->auth.data.length;
751 data_offset = sym->auth.data.offset;
753 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
754 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
755 if ((data_len & 7) || (data_offset & 7)) {
756 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
760 data_len = data_len >> 3;
761 data_offset = data_offset >> 3;
769 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
770 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
774 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
780 old_digest = ctx->digest;
784 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
785 out_sg->length = ses->digest_length;
786 cpu_to_hw_sg(out_sg);
790 /* need to extend the input to a compound frame */
791 in_sg->extension = 1;
793 in_sg->length = data_len;
794 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
799 if (ses->iv.length) {
802 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
805 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
806 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
808 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
809 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
812 sg->length = ses->iv.length;
814 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
815 in_sg->length += sg->length;
820 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
821 sg->offset = data_offset;
823 if (data_len <= (mbuf->data_len - data_offset)) {
824 sg->length = data_len;
826 sg->length = mbuf->data_len - data_offset;
828 /* remaining i/p segs */
829 while ((data_len = data_len - sg->length) &&
830 (mbuf = mbuf->next)) {
833 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
834 if (data_len > mbuf->data_len)
835 sg->length = mbuf->data_len;
837 sg->length = data_len;
841 if (is_decode(ses)) {
842 /* Digest verification case */
845 rte_memcpy(old_digest, sym->auth.digest.data,
847 start_addr = rte_dpaa_mem_vtop(old_digest);
848 qm_sg_entry_set64(sg, start_addr);
849 sg->length = ses->digest_length;
850 in_sg->length += ses->digest_length;
861 * |<----data_len------->|
862 * |ip_header|ah_header|icv|payload|
867 static inline struct dpaa_sec_job *
868 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
870 struct rte_crypto_sym_op *sym = op->sym;
871 struct rte_mbuf *mbuf = sym->m_src;
872 struct dpaa_sec_job *cf;
873 struct dpaa_sec_op_ctx *ctx;
874 struct qm_sg_entry *sg, *in_sg;
875 rte_iova_t start_addr;
877 int data_len, data_offset;
879 data_len = sym->auth.data.length;
880 data_offset = sym->auth.data.offset;
882 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
883 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
884 if ((data_len & 7) || (data_offset & 7)) {
885 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
889 data_len = data_len >> 3;
890 data_offset = data_offset >> 3;
893 ctx = dpaa_sec_alloc_ctx(ses, 4);
899 old_digest = ctx->digest;
901 start_addr = rte_pktmbuf_iova(mbuf);
904 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
905 sg->length = ses->digest_length;
910 /* need to extend the input to a compound frame */
911 in_sg->extension = 1;
913 in_sg->length = data_len;
914 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
917 if (ses->iv.length) {
920 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
923 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
924 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
926 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
927 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
930 sg->length = ses->iv.length;
932 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
933 in_sg->length += sg->length;
938 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
939 sg->offset = data_offset;
940 sg->length = data_len;
942 if (is_decode(ses)) {
943 /* Digest verification case */
945 /* hash result or digest, save digest first */
946 rte_memcpy(old_digest, sym->auth.digest.data,
948 /* let's check digest by hw */
949 start_addr = rte_dpaa_mem_vtop(old_digest);
951 qm_sg_entry_set64(sg, start_addr);
952 sg->length = ses->digest_length;
953 in_sg->length += ses->digest_length;
962 static inline struct dpaa_sec_job *
963 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
965 struct rte_crypto_sym_op *sym = op->sym;
966 struct dpaa_sec_job *cf;
967 struct dpaa_sec_op_ctx *ctx;
968 struct qm_sg_entry *sg, *out_sg, *in_sg;
969 struct rte_mbuf *mbuf;
971 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
973 int data_len, data_offset;
975 data_len = sym->cipher.data.length;
976 data_offset = sym->cipher.data.offset;
978 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
979 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
980 if ((data_len & 7) || (data_offset & 7)) {
981 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
985 data_len = data_len >> 3;
986 data_offset = data_offset >> 3;
991 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
994 req_segs = mbuf->nb_segs * 2 + 3;
996 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
997 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1002 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1010 out_sg = &cf->sg[0];
1011 out_sg->extension = 1;
1012 out_sg->length = data_len;
1013 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1014 cpu_to_hw_sg(out_sg);
1018 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1019 sg->length = mbuf->data_len - data_offset;
1020 sg->offset = data_offset;
1022 /* Successive segs */
1027 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1028 sg->length = mbuf->data_len;
1037 in_sg->extension = 1;
1039 in_sg->length = data_len + ses->iv.length;
1042 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1043 cpu_to_hw_sg(in_sg);
1046 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1047 sg->length = ses->iv.length;
1052 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1053 sg->length = mbuf->data_len - data_offset;
1054 sg->offset = data_offset;
1056 /* Successive segs */
1061 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1062 sg->length = mbuf->data_len;
1071 static inline struct dpaa_sec_job *
1072 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1074 struct rte_crypto_sym_op *sym = op->sym;
1075 struct dpaa_sec_job *cf;
1076 struct dpaa_sec_op_ctx *ctx;
1077 struct qm_sg_entry *sg;
1078 rte_iova_t src_start_addr, dst_start_addr;
1079 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1081 int data_len, data_offset;
1083 data_len = sym->cipher.data.length;
1084 data_offset = sym->cipher.data.offset;
1086 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1087 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1088 if ((data_len & 7) || (data_offset & 7)) {
1089 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1093 data_len = data_len >> 3;
1094 data_offset = data_offset >> 3;
1097 ctx = dpaa_sec_alloc_ctx(ses, 4);
1104 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1107 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1109 dst_start_addr = src_start_addr;
1113 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1114 sg->length = data_len + ses->iv.length;
1120 /* need to extend the input to a compound frame */
1123 sg->length = data_len + ses->iv.length;
1124 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1128 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1129 sg->length = ses->iv.length;
1133 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1134 sg->length = data_len;
1141 static inline struct dpaa_sec_job *
1142 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1144 struct rte_crypto_sym_op *sym = op->sym;
1145 struct dpaa_sec_job *cf;
1146 struct dpaa_sec_op_ctx *ctx;
1147 struct qm_sg_entry *sg, *out_sg, *in_sg;
1148 struct rte_mbuf *mbuf;
1150 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1155 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1158 req_segs = mbuf->nb_segs * 2 + 4;
1161 if (ses->auth_only_len)
1164 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1165 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1170 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1177 rte_prefetch0(cf->sg);
1180 out_sg = &cf->sg[0];
1181 out_sg->extension = 1;
1183 out_sg->length = sym->aead.data.length + ses->digest_length;
1185 out_sg->length = sym->aead.data.length;
1187 /* output sg entries */
1189 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1190 cpu_to_hw_sg(out_sg);
1193 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1194 sg->length = mbuf->data_len - sym->aead.data.offset;
1195 sg->offset = sym->aead.data.offset;
1197 /* Successive segs */
1202 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1203 sg->length = mbuf->data_len;
1206 sg->length -= ses->digest_length;
1208 if (is_encode(ses)) {
1210 /* set auth output */
1212 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1213 sg->length = ses->digest_length;
1221 in_sg->extension = 1;
1224 in_sg->length = ses->iv.length + sym->aead.data.length
1225 + ses->auth_only_len;
1227 in_sg->length = ses->iv.length + sym->aead.data.length
1228 + ses->auth_only_len + ses->digest_length;
1230 /* input sg entries */
1232 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1233 cpu_to_hw_sg(in_sg);
1236 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1237 sg->length = ses->iv.length;
1240 /* 2nd seg auth only */
1241 if (ses->auth_only_len) {
1243 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1244 sg->length = ses->auth_only_len;
1250 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1251 sg->length = mbuf->data_len - sym->aead.data.offset;
1252 sg->offset = sym->aead.data.offset;
1254 /* Successive segs */
1259 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1260 sg->length = mbuf->data_len;
1264 if (is_decode(ses)) {
1267 memcpy(ctx->digest, sym->aead.digest.data,
1268 ses->digest_length);
1269 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1270 sg->length = ses->digest_length;
1278 static inline struct dpaa_sec_job *
1279 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1281 struct rte_crypto_sym_op *sym = op->sym;
1282 struct dpaa_sec_job *cf;
1283 struct dpaa_sec_op_ctx *ctx;
1284 struct qm_sg_entry *sg;
1285 uint32_t length = 0;
1286 rte_iova_t src_start_addr, dst_start_addr;
1287 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1290 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1293 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1295 dst_start_addr = src_start_addr;
1297 ctx = dpaa_sec_alloc_ctx(ses, 7);
1305 rte_prefetch0(cf->sg);
1307 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1308 if (is_encode(ses)) {
1309 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1310 sg->length = ses->iv.length;
1311 length += sg->length;
1315 if (ses->auth_only_len) {
1316 qm_sg_entry_set64(sg,
1317 rte_dpaa_mem_vtop(sym->aead.aad.data));
1318 sg->length = ses->auth_only_len;
1319 length += sg->length;
1323 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1324 sg->length = sym->aead.data.length;
1325 length += sg->length;
1329 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1330 sg->length = ses->iv.length;
1331 length += sg->length;
1335 if (ses->auth_only_len) {
1336 qm_sg_entry_set64(sg,
1337 rte_dpaa_mem_vtop(sym->aead.aad.data));
1338 sg->length = ses->auth_only_len;
1339 length += sg->length;
1343 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1344 sg->length = sym->aead.data.length;
1345 length += sg->length;
1348 memcpy(ctx->digest, sym->aead.digest.data,
1349 ses->digest_length);
1352 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1353 sg->length = ses->digest_length;
1354 length += sg->length;
1358 /* input compound frame */
1359 cf->sg[1].length = length;
1360 cf->sg[1].extension = 1;
1361 cf->sg[1].final = 1;
1362 cpu_to_hw_sg(&cf->sg[1]);
1366 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1367 qm_sg_entry_set64(sg,
1368 dst_start_addr + sym->aead.data.offset);
1369 sg->length = sym->aead.data.length;
1370 length = sg->length;
1371 if (is_encode(ses)) {
1373 /* set auth output */
1375 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1376 sg->length = ses->digest_length;
1377 length += sg->length;
1382 /* output compound frame */
1383 cf->sg[0].length = length;
1384 cf->sg[0].extension = 1;
1385 cpu_to_hw_sg(&cf->sg[0]);
1390 static inline struct dpaa_sec_job *
1391 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1393 struct rte_crypto_sym_op *sym = op->sym;
1394 struct dpaa_sec_job *cf;
1395 struct dpaa_sec_op_ctx *ctx;
1396 struct qm_sg_entry *sg, *out_sg, *in_sg;
1397 struct rte_mbuf *mbuf;
1399 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1404 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1407 req_segs = mbuf->nb_segs * 2 + 4;
1410 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1411 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1416 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1423 rte_prefetch0(cf->sg);
1426 out_sg = &cf->sg[0];
1427 out_sg->extension = 1;
1429 out_sg->length = sym->auth.data.length + ses->digest_length;
1431 out_sg->length = sym->auth.data.length;
1433 /* output sg entries */
1435 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1436 cpu_to_hw_sg(out_sg);
1439 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1440 sg->length = mbuf->data_len - sym->auth.data.offset;
1441 sg->offset = sym->auth.data.offset;
1443 /* Successive segs */
1448 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1449 sg->length = mbuf->data_len;
1452 sg->length -= ses->digest_length;
1454 if (is_encode(ses)) {
1456 /* set auth output */
1458 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1459 sg->length = ses->digest_length;
1467 in_sg->extension = 1;
1470 in_sg->length = ses->iv.length + sym->auth.data.length;
1472 in_sg->length = ses->iv.length + sym->auth.data.length
1473 + ses->digest_length;
1475 /* input sg entries */
1477 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1478 cpu_to_hw_sg(in_sg);
1481 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1482 sg->length = ses->iv.length;
1487 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1488 sg->length = mbuf->data_len - sym->auth.data.offset;
1489 sg->offset = sym->auth.data.offset;
1491 /* Successive segs */
1496 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1497 sg->length = mbuf->data_len;
1501 sg->length -= ses->digest_length;
1502 if (is_decode(ses)) {
1505 memcpy(ctx->digest, sym->auth.digest.data,
1506 ses->digest_length);
1507 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1508 sg->length = ses->digest_length;
1516 static inline struct dpaa_sec_job *
1517 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1519 struct rte_crypto_sym_op *sym = op->sym;
1520 struct dpaa_sec_job *cf;
1521 struct dpaa_sec_op_ctx *ctx;
1522 struct qm_sg_entry *sg;
1523 rte_iova_t src_start_addr, dst_start_addr;
1524 uint32_t length = 0;
1525 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1528 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1530 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1532 dst_start_addr = src_start_addr;
1534 ctx = dpaa_sec_alloc_ctx(ses, 7);
1542 rte_prefetch0(cf->sg);
1544 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1545 if (is_encode(ses)) {
1546 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1547 sg->length = ses->iv.length;
1548 length += sg->length;
1552 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1553 sg->length = sym->auth.data.length;
1554 length += sg->length;
1558 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1559 sg->length = ses->iv.length;
1560 length += sg->length;
1565 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1566 sg->length = sym->auth.data.length;
1567 length += sg->length;
1570 memcpy(ctx->digest, sym->auth.digest.data,
1571 ses->digest_length);
1574 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1575 sg->length = ses->digest_length;
1576 length += sg->length;
1580 /* input compound frame */
1581 cf->sg[1].length = length;
1582 cf->sg[1].extension = 1;
1583 cf->sg[1].final = 1;
1584 cpu_to_hw_sg(&cf->sg[1]);
1588 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1589 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1590 sg->length = sym->cipher.data.length;
1591 length = sg->length;
1592 if (is_encode(ses)) {
1594 /* set auth output */
1596 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1597 sg->length = ses->digest_length;
1598 length += sg->length;
1603 /* output compound frame */
1604 cf->sg[0].length = length;
1605 cf->sg[0].extension = 1;
1606 cpu_to_hw_sg(&cf->sg[0]);
1611 #ifdef RTE_LIB_SECURITY
1612 static inline struct dpaa_sec_job *
1613 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1615 struct rte_crypto_sym_op *sym = op->sym;
1616 struct dpaa_sec_job *cf;
1617 struct dpaa_sec_op_ctx *ctx;
1618 struct qm_sg_entry *sg;
1619 phys_addr_t src_start_addr, dst_start_addr;
1621 ctx = dpaa_sec_alloc_ctx(ses, 2);
1627 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1630 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1632 dst_start_addr = src_start_addr;
1636 qm_sg_entry_set64(sg, src_start_addr);
1637 sg->length = sym->m_src->pkt_len;
1641 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1644 qm_sg_entry_set64(sg, dst_start_addr);
1645 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1651 static inline struct dpaa_sec_job *
1652 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1654 struct rte_crypto_sym_op *sym = op->sym;
1655 struct dpaa_sec_job *cf;
1656 struct dpaa_sec_op_ctx *ctx;
1657 struct qm_sg_entry *sg, *out_sg, *in_sg;
1658 struct rte_mbuf *mbuf;
1660 uint32_t in_len = 0, out_len = 0;
1667 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1668 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1669 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1674 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1680 out_sg = &cf->sg[0];
1681 out_sg->extension = 1;
1682 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1686 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1689 /* Successive segs */
1690 while (mbuf->next) {
1691 sg->length = mbuf->data_len;
1692 out_len += sg->length;
1696 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1699 sg->length = mbuf->buf_len - mbuf->data_off;
1700 out_len += sg->length;
1704 out_sg->length = out_len;
1705 cpu_to_hw_sg(out_sg);
1710 in_sg->extension = 1;
1712 in_len = mbuf->data_len;
1715 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1718 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1719 sg->length = mbuf->data_len;
1722 /* Successive segs */
1727 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1728 sg->length = mbuf->data_len;
1730 in_len += sg->length;
1736 in_sg->length = in_len;
1737 cpu_to_hw_sg(in_sg);
1739 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1746 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1749 /* Function to transmit the frames to given device and queuepair */
1751 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1752 uint16_t num_tx = 0;
1753 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1754 uint32_t frames_to_send;
1755 struct rte_crypto_op *op;
1756 struct dpaa_sec_job *cf;
1757 dpaa_sec_session *ses;
1758 uint16_t auth_hdr_len, auth_tail_len;
1759 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1760 struct qman_fq *inq[DPAA_SEC_BURST];
1762 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1763 if (rte_dpaa_portal_init((void *)0)) {
1764 DPAA_SEC_ERR("Failure in affining portal");
1770 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1771 DPAA_SEC_BURST : nb_ops;
1772 for (loop = 0; loop < frames_to_send; loop++) {
1774 if (*dpaa_seqn(op->sym->m_src) != 0) {
1775 index = *dpaa_seqn(op->sym->m_src) - 1;
1776 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1777 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1778 flags[loop] = ((index & 0x0f) << 8);
1779 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1780 DPAA_PER_LCORE_DQRR_SIZE--;
1781 DPAA_PER_LCORE_DQRR_HELD &=
1786 switch (op->sess_type) {
1787 case RTE_CRYPTO_OP_WITH_SESSION:
1788 ses = (dpaa_sec_session *)
1789 get_sym_session_private_data(
1791 cryptodev_driver_id);
1793 #ifdef RTE_LIB_SECURITY
1794 case RTE_CRYPTO_OP_SECURITY_SESSION:
1795 ses = (dpaa_sec_session *)
1796 get_sec_session_private_data(
1797 op->sym->sec_session);
1802 "sessionless crypto op not supported");
1803 frames_to_send = loop;
1809 DPAA_SEC_DP_ERR("session not available");
1810 frames_to_send = loop;
1815 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1816 if (dpaa_sec_attach_sess_q(qp, ses)) {
1817 frames_to_send = loop;
1821 } else if (unlikely(ses->qp[rte_lcore_id() %
1822 MAX_DPAA_CORES] != qp)) {
1823 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1825 ses->qp[rte_lcore_id() %
1826 MAX_DPAA_CORES], qp);
1827 frames_to_send = loop;
1832 auth_hdr_len = op->sym->auth.data.length -
1833 op->sym->cipher.data.length;
1836 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1837 ((op->sym->m_dst == NULL) ||
1838 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1839 switch (ses->ctxt) {
1840 #ifdef RTE_LIB_SECURITY
1842 case DPAA_SEC_IPSEC:
1843 cf = build_proto(op, ses);
1847 cf = build_auth_only(op, ses);
1849 case DPAA_SEC_CIPHER:
1850 cf = build_cipher_only(op, ses);
1853 cf = build_cipher_auth_gcm(op, ses);
1854 auth_hdr_len = ses->auth_only_len;
1856 case DPAA_SEC_CIPHER_HASH:
1858 op->sym->cipher.data.offset
1859 - op->sym->auth.data.offset;
1861 op->sym->auth.data.length
1862 - op->sym->cipher.data.length
1864 cf = build_cipher_auth(op, ses);
1867 DPAA_SEC_DP_ERR("not supported ops");
1868 frames_to_send = loop;
1873 switch (ses->ctxt) {
1874 #ifdef RTE_LIB_SECURITY
1876 case DPAA_SEC_IPSEC:
1877 cf = build_proto_sg(op, ses);
1881 cf = build_auth_only_sg(op, ses);
1883 case DPAA_SEC_CIPHER:
1884 cf = build_cipher_only_sg(op, ses);
1887 cf = build_cipher_auth_gcm_sg(op, ses);
1888 auth_hdr_len = ses->auth_only_len;
1890 case DPAA_SEC_CIPHER_HASH:
1892 op->sym->cipher.data.offset
1893 - op->sym->auth.data.offset;
1895 op->sym->auth.data.length
1896 - op->sym->cipher.data.length
1898 cf = build_cipher_auth_sg(op, ses);
1901 DPAA_SEC_DP_ERR("not supported ops");
1902 frames_to_send = loop;
1907 if (unlikely(!cf)) {
1908 frames_to_send = loop;
1914 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1915 fd->opaque_addr = 0;
1917 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1918 fd->_format1 = qm_fd_compound;
1919 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1921 /* Auth_only_len is set as 0 in descriptor and it is
1922 * overwritten here in the fd.cmd which will update
1925 if (auth_hdr_len || auth_tail_len) {
1926 fd->cmd = 0x80000000;
1928 ((auth_tail_len << 16) | auth_hdr_len);
1931 #ifdef RTE_LIB_SECURITY
1932 /* In case of PDCP, per packet HFN is stored in
1933 * mbuf priv after sym_op.
1935 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1936 fd->cmd = 0x80000000 |
1937 *((uint32_t *)((uint8_t *)op +
1938 ses->pdcp.hfn_ovd_offset));
1939 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1940 *((uint32_t *)((uint8_t *)op +
1941 ses->pdcp.hfn_ovd_offset)),
1948 while (loop < frames_to_send) {
1949 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1950 &flags[loop], frames_to_send - loop);
1952 nb_ops -= frames_to_send;
1953 num_tx += frames_to_send;
1956 dpaa_qp->tx_pkts += num_tx;
1957 dpaa_qp->tx_errs += nb_ops - num_tx;
1963 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1967 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1969 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1970 if (rte_dpaa_portal_init((void *)0)) {
1971 DPAA_SEC_ERR("Failure in affining portal");
1976 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1978 dpaa_qp->rx_pkts += num_rx;
1979 dpaa_qp->rx_errs += nb_ops - num_rx;
1981 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1986 /** Release queue pair */
1988 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1991 struct dpaa_sec_dev_private *internals;
1992 struct dpaa_sec_qp *qp = NULL;
1994 PMD_INIT_FUNC_TRACE();
1996 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1998 internals = dev->data->dev_private;
1999 if (qp_id >= internals->max_nb_queue_pairs) {
2000 DPAA_SEC_ERR("Max supported qpid %d",
2001 internals->max_nb_queue_pairs);
2005 qp = &internals->qps[qp_id];
2006 rte_mempool_free(qp->ctx_pool);
2007 qp->internals = NULL;
2008 dev->data->queue_pairs[qp_id] = NULL;
2013 /** Setup a queue pair */
2015 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2016 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2017 __rte_unused int socket_id)
2019 struct dpaa_sec_dev_private *internals;
2020 struct dpaa_sec_qp *qp = NULL;
2023 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2025 internals = dev->data->dev_private;
2026 if (qp_id >= internals->max_nb_queue_pairs) {
2027 DPAA_SEC_ERR("Max supported qpid %d",
2028 internals->max_nb_queue_pairs);
2032 qp = &internals->qps[qp_id];
2033 qp->internals = internals;
2034 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2035 dev->data->dev_id, qp_id);
2036 if (!qp->ctx_pool) {
2037 qp->ctx_pool = rte_mempool_create((const char *)str,
2040 CTX_POOL_CACHE_SIZE, 0,
2041 NULL, NULL, NULL, NULL,
2043 if (!qp->ctx_pool) {
2044 DPAA_SEC_ERR("%s create failed\n", str);
2048 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2049 dev->data->dev_id, qp_id);
2050 dev->data->queue_pairs[qp_id] = qp;
2055 /** Returns the size of session structure */
2057 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2059 PMD_INIT_FUNC_TRACE();
2061 return sizeof(dpaa_sec_session);
2065 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2066 struct rte_crypto_sym_xform *xform,
2067 dpaa_sec_session *session)
2069 session->ctxt = DPAA_SEC_CIPHER;
2070 session->cipher_alg = xform->cipher.algo;
2071 session->iv.length = xform->cipher.iv.length;
2072 session->iv.offset = xform->cipher.iv.offset;
2073 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2074 RTE_CACHE_LINE_SIZE);
2075 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2076 DPAA_SEC_ERR("No Memory for cipher key");
2079 session->cipher_key.length = xform->cipher.key.length;
2081 memcpy(session->cipher_key.data, xform->cipher.key.data,
2082 xform->cipher.key.length);
2083 switch (xform->cipher.algo) {
2084 case RTE_CRYPTO_CIPHER_AES_CBC:
2085 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2086 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2088 case RTE_CRYPTO_CIPHER_DES_CBC:
2089 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2090 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2092 case RTE_CRYPTO_CIPHER_3DES_CBC:
2093 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2094 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2096 case RTE_CRYPTO_CIPHER_AES_CTR:
2097 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2098 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2100 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2101 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2103 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2104 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2107 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2108 xform->cipher.algo);
2111 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2118 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2119 struct rte_crypto_sym_xform *xform,
2120 dpaa_sec_session *session)
2122 session->ctxt = DPAA_SEC_AUTH;
2123 session->auth_alg = xform->auth.algo;
2124 session->auth_key.length = xform->auth.key.length;
2125 if (xform->auth.key.length) {
2126 session->auth_key.data =
2127 rte_zmalloc(NULL, xform->auth.key.length,
2128 RTE_CACHE_LINE_SIZE);
2129 if (session->auth_key.data == NULL) {
2130 DPAA_SEC_ERR("No Memory for auth key");
2133 memcpy(session->auth_key.data, xform->auth.key.data,
2134 xform->auth.key.length);
2137 session->digest_length = xform->auth.digest_length;
2138 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2139 session->iv.offset = xform->auth.iv.offset;
2140 session->iv.length = xform->auth.iv.length;
2143 switch (xform->auth.algo) {
2144 case RTE_CRYPTO_AUTH_SHA1:
2145 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2146 session->auth_key.algmode = OP_ALG_AAI_HASH;
2148 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2149 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2150 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2152 case RTE_CRYPTO_AUTH_MD5:
2153 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2154 session->auth_key.algmode = OP_ALG_AAI_HASH;
2156 case RTE_CRYPTO_AUTH_MD5_HMAC:
2157 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2158 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2160 case RTE_CRYPTO_AUTH_SHA224:
2161 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2162 session->auth_key.algmode = OP_ALG_AAI_HASH;
2164 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2165 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2166 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2168 case RTE_CRYPTO_AUTH_SHA256:
2169 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2170 session->auth_key.algmode = OP_ALG_AAI_HASH;
2172 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2173 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2174 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2176 case RTE_CRYPTO_AUTH_SHA384:
2177 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2178 session->auth_key.algmode = OP_ALG_AAI_HASH;
2180 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2181 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2182 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2184 case RTE_CRYPTO_AUTH_SHA512:
2185 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2186 session->auth_key.algmode = OP_ALG_AAI_HASH;
2188 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2189 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2190 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2192 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2193 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2194 session->auth_key.algmode = OP_ALG_AAI_F9;
2196 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2197 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2198 session->auth_key.algmode = OP_ALG_AAI_F9;
2200 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2201 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2202 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2204 case RTE_CRYPTO_AUTH_AES_CMAC:
2205 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2206 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2209 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2214 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2221 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2222 struct rte_crypto_sym_xform *xform,
2223 dpaa_sec_session *session)
2226 struct rte_crypto_cipher_xform *cipher_xform;
2227 struct rte_crypto_auth_xform *auth_xform;
2229 session->ctxt = DPAA_SEC_CIPHER_HASH;
2230 if (session->auth_cipher_text) {
2231 cipher_xform = &xform->cipher;
2232 auth_xform = &xform->next->auth;
2234 cipher_xform = &xform->next->cipher;
2235 auth_xform = &xform->auth;
2238 /* Set IV parameters */
2239 session->iv.offset = cipher_xform->iv.offset;
2240 session->iv.length = cipher_xform->iv.length;
2242 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2243 RTE_CACHE_LINE_SIZE);
2244 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2245 DPAA_SEC_ERR("No Memory for cipher key");
2248 session->cipher_key.length = cipher_xform->key.length;
2249 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2250 RTE_CACHE_LINE_SIZE);
2251 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2252 DPAA_SEC_ERR("No Memory for auth key");
2255 session->auth_key.length = auth_xform->key.length;
2256 memcpy(session->cipher_key.data, cipher_xform->key.data,
2257 cipher_xform->key.length);
2258 memcpy(session->auth_key.data, auth_xform->key.data,
2259 auth_xform->key.length);
2261 session->digest_length = auth_xform->digest_length;
2262 session->auth_alg = auth_xform->algo;
2264 switch (auth_xform->algo) {
2265 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2266 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2267 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2269 case RTE_CRYPTO_AUTH_MD5_HMAC:
2270 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2271 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2273 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2274 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2275 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2277 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2278 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2279 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2281 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2282 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2283 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2285 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2286 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2287 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2289 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2290 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2291 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2293 case RTE_CRYPTO_AUTH_AES_CMAC:
2294 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2295 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2298 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2303 session->cipher_alg = cipher_xform->algo;
2305 switch (cipher_xform->algo) {
2306 case RTE_CRYPTO_CIPHER_AES_CBC:
2307 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2308 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2310 case RTE_CRYPTO_CIPHER_DES_CBC:
2311 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2312 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2314 case RTE_CRYPTO_CIPHER_3DES_CBC:
2315 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2316 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2318 case RTE_CRYPTO_CIPHER_AES_CTR:
2319 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2320 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2323 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2324 cipher_xform->algo);
2327 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2333 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2334 struct rte_crypto_sym_xform *xform,
2335 dpaa_sec_session *session)
2337 session->aead_alg = xform->aead.algo;
2338 session->ctxt = DPAA_SEC_AEAD;
2339 session->iv.length = xform->aead.iv.length;
2340 session->iv.offset = xform->aead.iv.offset;
2341 session->auth_only_len = xform->aead.aad_length;
2342 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2343 RTE_CACHE_LINE_SIZE);
2344 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2345 DPAA_SEC_ERR("No Memory for aead key\n");
2348 session->aead_key.length = xform->aead.key.length;
2349 session->digest_length = xform->aead.digest_length;
2351 memcpy(session->aead_key.data, xform->aead.key.data,
2352 xform->aead.key.length);
2354 switch (session->aead_alg) {
2355 case RTE_CRYPTO_AEAD_AES_GCM:
2356 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2357 session->aead_key.algmode = OP_ALG_AAI_GCM;
2360 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2364 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2370 static struct qman_fq *
2371 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2375 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2376 if (qi->inq_attach[i] == 0) {
2377 qi->inq_attach[i] = 1;
2381 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2387 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2391 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2392 if (&qi->inq[i] == fq) {
2393 if (qman_retire_fq(fq, NULL) != 0)
2394 DPAA_SEC_DEBUG("Queue is not retired\n");
2396 qi->inq_attach[i] = 0;
2404 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2408 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2409 ret = dpaa_sec_prep_cdb(sess);
2411 DPAA_SEC_ERR("Unable to prepare sec cdb");
2414 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2415 ret = rte_dpaa_portal_init((void *)0);
2417 DPAA_SEC_ERR("Failure in affining portal");
2421 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2422 rte_dpaa_mem_vtop(&sess->cdb),
2423 qman_fq_fqid(&qp->outq));
2425 DPAA_SEC_ERR("Unable to init sec queue");
2431 free_session_data(dpaa_sec_session *s)
2434 rte_free(s->aead_key.data);
2436 rte_free(s->auth_key.data);
2437 rte_free(s->cipher_key.data);
2439 memset(s, 0, sizeof(dpaa_sec_session));
2443 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2444 struct rte_crypto_sym_xform *xform, void *sess)
2446 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2447 dpaa_sec_session *session = sess;
2451 PMD_INIT_FUNC_TRACE();
2453 if (unlikely(sess == NULL)) {
2454 DPAA_SEC_ERR("invalid session struct");
2457 memset(session, 0, sizeof(dpaa_sec_session));
2459 /* Default IV length = 0 */
2460 session->iv.length = 0;
2463 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2464 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2465 ret = dpaa_sec_cipher_init(dev, xform, session);
2467 /* Authentication Only */
2468 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2469 xform->next == NULL) {
2470 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2471 session->ctxt = DPAA_SEC_AUTH;
2472 ret = dpaa_sec_auth_init(dev, xform, session);
2474 /* Cipher then Authenticate */
2475 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2476 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2477 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2478 session->auth_cipher_text = 1;
2479 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2480 ret = dpaa_sec_auth_init(dev, xform, session);
2481 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2482 ret = dpaa_sec_cipher_init(dev, xform, session);
2484 ret = dpaa_sec_chain_init(dev, xform, session);
2486 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2489 /* Authenticate then Cipher */
2490 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2491 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2492 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2493 session->auth_cipher_text = 0;
2494 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2495 ret = dpaa_sec_cipher_init(dev, xform, session);
2496 else if (xform->next->cipher.algo
2497 == RTE_CRYPTO_CIPHER_NULL)
2498 ret = dpaa_sec_auth_init(dev, xform, session);
2500 ret = dpaa_sec_chain_init(dev, xform, session);
2502 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2506 /* AEAD operation for AES-GCM kind of Algorithms */
2507 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2508 xform->next == NULL) {
2509 ret = dpaa_sec_aead_init(dev, xform, session);
2512 DPAA_SEC_ERR("Invalid crypto type");
2516 DPAA_SEC_ERR("unable to init session");
2520 rte_spinlock_lock(&internals->lock);
2521 for (i = 0; i < MAX_DPAA_CORES; i++) {
2522 session->inq[i] = dpaa_sec_attach_rxq(internals);
2523 if (session->inq[i] == NULL) {
2524 DPAA_SEC_ERR("unable to attach sec queue");
2525 rte_spinlock_unlock(&internals->lock);
2530 rte_spinlock_unlock(&internals->lock);
2535 free_session_data(session);
2540 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2541 struct rte_crypto_sym_xform *xform,
2542 struct rte_cryptodev_sym_session *sess,
2543 struct rte_mempool *mempool)
2545 void *sess_private_data;
2548 PMD_INIT_FUNC_TRACE();
2550 if (rte_mempool_get(mempool, &sess_private_data)) {
2551 DPAA_SEC_ERR("Couldn't get object from session mempool");
2555 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2557 DPAA_SEC_ERR("failed to configure session parameters");
2559 /* Return session to mempool */
2560 rte_mempool_put(mempool, sess_private_data);
2564 set_sym_session_private_data(sess, dev->driver_id,
2572 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2574 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2575 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2578 for (i = 0; i < MAX_DPAA_CORES; i++) {
2580 dpaa_sec_detach_rxq(qi, s->inq[i]);
2584 free_session_data(s);
2585 rte_mempool_put(sess_mp, (void *)s);
2588 /** Clear the memory of session so it doesn't leave key material behind */
2590 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2591 struct rte_cryptodev_sym_session *sess)
2593 PMD_INIT_FUNC_TRACE();
2594 uint8_t index = dev->driver_id;
2595 void *sess_priv = get_sym_session_private_data(sess, index);
2596 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2599 free_session_memory(dev, s);
2600 set_sym_session_private_data(sess, index, NULL);
2604 #ifdef RTE_LIB_SECURITY
2606 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2607 struct rte_security_ipsec_xform *ipsec_xform,
2608 dpaa_sec_session *session)
2610 PMD_INIT_FUNC_TRACE();
2612 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2613 RTE_CACHE_LINE_SIZE);
2614 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2615 DPAA_SEC_ERR("No Memory for aead key");
2618 memcpy(session->aead_key.data, aead_xform->key.data,
2619 aead_xform->key.length);
2621 session->digest_length = aead_xform->digest_length;
2622 session->aead_key.length = aead_xform->key.length;
2624 switch (aead_xform->algo) {
2625 case RTE_CRYPTO_AEAD_AES_GCM:
2626 switch (session->digest_length) {
2628 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2631 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2634 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2637 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2638 session->digest_length);
2641 if (session->dir == DIR_ENC) {
2642 memcpy(session->encap_pdb.gcm.salt,
2643 (uint8_t *)&(ipsec_xform->salt), 4);
2645 memcpy(session->decap_pdb.gcm.salt,
2646 (uint8_t *)&(ipsec_xform->salt), 4);
2648 session->aead_key.algmode = OP_ALG_AAI_GCM;
2649 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2652 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2660 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2661 struct rte_crypto_auth_xform *auth_xform,
2662 struct rte_security_ipsec_xform *ipsec_xform,
2663 dpaa_sec_session *session)
2666 session->cipher_key.data = rte_zmalloc(NULL,
2667 cipher_xform->key.length,
2668 RTE_CACHE_LINE_SIZE);
2669 if (session->cipher_key.data == NULL &&
2670 cipher_xform->key.length > 0) {
2671 DPAA_SEC_ERR("No Memory for cipher key");
2675 session->cipher_key.length = cipher_xform->key.length;
2676 memcpy(session->cipher_key.data, cipher_xform->key.data,
2677 cipher_xform->key.length);
2678 session->cipher_alg = cipher_xform->algo;
2680 session->cipher_key.data = NULL;
2681 session->cipher_key.length = 0;
2682 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2686 session->auth_key.data = rte_zmalloc(NULL,
2687 auth_xform->key.length,
2688 RTE_CACHE_LINE_SIZE);
2689 if (session->auth_key.data == NULL &&
2690 auth_xform->key.length > 0) {
2691 DPAA_SEC_ERR("No Memory for auth key");
2694 session->auth_key.length = auth_xform->key.length;
2695 memcpy(session->auth_key.data, auth_xform->key.data,
2696 auth_xform->key.length);
2697 session->auth_alg = auth_xform->algo;
2698 session->digest_length = auth_xform->digest_length;
2700 session->auth_key.data = NULL;
2701 session->auth_key.length = 0;
2702 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2705 switch (session->auth_alg) {
2706 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2707 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2708 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2710 case RTE_CRYPTO_AUTH_MD5_HMAC:
2711 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2712 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2714 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2715 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2716 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2717 if (session->digest_length != 16)
2719 "+++Using sha256-hmac truncated len is non-standard,"
2720 "it will not work with lookaside proto");
2722 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2723 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2724 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2726 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2727 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2728 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2730 case RTE_CRYPTO_AUTH_AES_CMAC:
2731 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2732 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2734 case RTE_CRYPTO_AUTH_NULL:
2735 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2737 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2738 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2739 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2741 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2742 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2743 case RTE_CRYPTO_AUTH_SHA1:
2744 case RTE_CRYPTO_AUTH_SHA256:
2745 case RTE_CRYPTO_AUTH_SHA512:
2746 case RTE_CRYPTO_AUTH_SHA224:
2747 case RTE_CRYPTO_AUTH_SHA384:
2748 case RTE_CRYPTO_AUTH_MD5:
2749 case RTE_CRYPTO_AUTH_AES_GMAC:
2750 case RTE_CRYPTO_AUTH_KASUMI_F9:
2751 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2752 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2753 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2757 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2762 switch (session->cipher_alg) {
2763 case RTE_CRYPTO_CIPHER_AES_CBC:
2764 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2765 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2767 case RTE_CRYPTO_CIPHER_DES_CBC:
2768 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2769 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2771 case RTE_CRYPTO_CIPHER_3DES_CBC:
2772 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2773 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2775 case RTE_CRYPTO_CIPHER_AES_CTR:
2776 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2777 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2778 if (session->dir == DIR_ENC) {
2779 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2780 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2782 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2783 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2786 case RTE_CRYPTO_CIPHER_NULL:
2787 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2789 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2790 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2791 case RTE_CRYPTO_CIPHER_3DES_ECB:
2792 case RTE_CRYPTO_CIPHER_AES_ECB:
2793 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2794 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2795 session->cipher_alg);
2798 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2799 session->cipher_alg);
2807 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2808 struct rte_security_session_conf *conf,
2811 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2812 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2813 struct rte_crypto_auth_xform *auth_xform = NULL;
2814 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2815 struct rte_crypto_aead_xform *aead_xform = NULL;
2816 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2820 PMD_INIT_FUNC_TRACE();
2822 memset(session, 0, sizeof(dpaa_sec_session));
2823 session->proto_alg = conf->protocol;
2824 session->ctxt = DPAA_SEC_IPSEC;
2826 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2827 session->dir = DIR_ENC;
2829 session->dir = DIR_DEC;
2831 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2832 cipher_xform = &conf->crypto_xform->cipher;
2833 if (conf->crypto_xform->next)
2834 auth_xform = &conf->crypto_xform->next->auth;
2835 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2836 ipsec_xform, session);
2837 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2838 auth_xform = &conf->crypto_xform->auth;
2839 if (conf->crypto_xform->next)
2840 cipher_xform = &conf->crypto_xform->next->cipher;
2841 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2842 ipsec_xform, session);
2843 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2844 aead_xform = &conf->crypto_xform->aead;
2845 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2846 ipsec_xform, session);
2848 DPAA_SEC_ERR("XFORM not specified");
2853 DPAA_SEC_ERR("Failed to process xform");
2857 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2858 if (ipsec_xform->tunnel.type ==
2859 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2860 session->ip4_hdr.ip_v = IPVERSION;
2861 session->ip4_hdr.ip_hl = 5;
2862 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2863 sizeof(session->ip4_hdr));
2864 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2865 session->ip4_hdr.ip_id = 0;
2866 session->ip4_hdr.ip_off = 0;
2867 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2868 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2869 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2870 IPPROTO_ESP : IPPROTO_AH;
2871 session->ip4_hdr.ip_sum = 0;
2872 session->ip4_hdr.ip_src =
2873 ipsec_xform->tunnel.ipv4.src_ip;
2874 session->ip4_hdr.ip_dst =
2875 ipsec_xform->tunnel.ipv4.dst_ip;
2876 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2877 (void *)&session->ip4_hdr,
2879 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2880 } else if (ipsec_xform->tunnel.type ==
2881 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2882 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2883 DPAA_IPv6_DEFAULT_VTC_FLOW |
2884 ((ipsec_xform->tunnel.ipv6.dscp <<
2885 RTE_IPV6_HDR_TC_SHIFT) &
2886 RTE_IPV6_HDR_TC_MASK) |
2887 ((ipsec_xform->tunnel.ipv6.flabel <<
2888 RTE_IPV6_HDR_FL_SHIFT) &
2889 RTE_IPV6_HDR_FL_MASK));
2890 /* Payload length will be updated by HW */
2891 session->ip6_hdr.payload_len = 0;
2892 session->ip6_hdr.hop_limits =
2893 ipsec_xform->tunnel.ipv6.hlimit;
2894 session->ip6_hdr.proto = (ipsec_xform->proto ==
2895 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2896 IPPROTO_ESP : IPPROTO_AH;
2897 memcpy(&session->ip6_hdr.src_addr,
2898 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2899 memcpy(&session->ip6_hdr.dst_addr,
2900 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2901 session->encap_pdb.ip_hdr_len =
2902 sizeof(struct rte_ipv6_hdr);
2904 session->encap_pdb.options =
2905 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2906 PDBOPTS_ESP_OIHI_PDB_INL |
2908 PDBHMO_ESP_ENCAP_DTTL |
2910 if (ipsec_xform->options.esn)
2911 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2912 session->encap_pdb.spi = ipsec_xform->spi;
2914 } else if (ipsec_xform->direction ==
2915 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2916 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2917 session->decap_pdb.options = sizeof(struct ip) << 16;
2919 session->decap_pdb.options =
2920 sizeof(struct rte_ipv6_hdr) << 16;
2921 if (ipsec_xform->options.esn)
2922 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2923 if (ipsec_xform->replay_win_sz) {
2925 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2934 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2937 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2940 session->decap_pdb.options |=
2946 rte_spinlock_lock(&internals->lock);
2947 for (i = 0; i < MAX_DPAA_CORES; i++) {
2948 session->inq[i] = dpaa_sec_attach_rxq(internals);
2949 if (session->inq[i] == NULL) {
2950 DPAA_SEC_ERR("unable to attach sec queue");
2951 rte_spinlock_unlock(&internals->lock);
2955 rte_spinlock_unlock(&internals->lock);
2959 free_session_data(session);
2964 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2965 struct rte_security_session_conf *conf,
2968 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2969 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2970 struct rte_crypto_auth_xform *auth_xform = NULL;
2971 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2972 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2973 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2977 PMD_INIT_FUNC_TRACE();
2979 memset(session, 0, sizeof(dpaa_sec_session));
2981 /* find xfrm types */
2982 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2983 cipher_xform = &xform->cipher;
2984 if (xform->next != NULL)
2985 auth_xform = &xform->next->auth;
2986 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2987 auth_xform = &xform->auth;
2988 if (xform->next != NULL)
2989 cipher_xform = &xform->next->cipher;
2991 DPAA_SEC_ERR("Invalid crypto type");
2995 session->proto_alg = conf->protocol;
2996 session->ctxt = DPAA_SEC_PDCP;
2999 switch (cipher_xform->algo) {
3000 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3001 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3003 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3004 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3006 case RTE_CRYPTO_CIPHER_AES_CTR:
3007 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3009 case RTE_CRYPTO_CIPHER_NULL:
3010 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3013 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3014 session->cipher_alg);
3018 session->cipher_key.data = rte_zmalloc(NULL,
3019 cipher_xform->key.length,
3020 RTE_CACHE_LINE_SIZE);
3021 if (session->cipher_key.data == NULL &&
3022 cipher_xform->key.length > 0) {
3023 DPAA_SEC_ERR("No Memory for cipher key");
3026 session->cipher_key.length = cipher_xform->key.length;
3027 memcpy(session->cipher_key.data, cipher_xform->key.data,
3028 cipher_xform->key.length);
3029 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3031 session->cipher_alg = cipher_xform->algo;
3033 session->cipher_key.data = NULL;
3034 session->cipher_key.length = 0;
3035 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3036 session->dir = DIR_ENC;
3039 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3040 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3041 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3043 "PDCP Seq Num size should be 5/12 bits for cmode");
3050 switch (auth_xform->algo) {
3051 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3052 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3054 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3055 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3057 case RTE_CRYPTO_AUTH_AES_CMAC:
3058 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3060 case RTE_CRYPTO_AUTH_NULL:
3061 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3064 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3066 rte_free(session->cipher_key.data);
3069 session->auth_key.data = rte_zmalloc(NULL,
3070 auth_xform->key.length,
3071 RTE_CACHE_LINE_SIZE);
3072 if (!session->auth_key.data &&
3073 auth_xform->key.length > 0) {
3074 DPAA_SEC_ERR("No Memory for auth key");
3075 rte_free(session->cipher_key.data);
3078 session->auth_key.length = auth_xform->key.length;
3079 memcpy(session->auth_key.data, auth_xform->key.data,
3080 auth_xform->key.length);
3081 session->auth_alg = auth_xform->algo;
3083 session->auth_key.data = NULL;
3084 session->auth_key.length = 0;
3085 session->auth_alg = 0;
3087 session->pdcp.domain = pdcp_xform->domain;
3088 session->pdcp.bearer = pdcp_xform->bearer;
3089 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3090 session->pdcp.sn_size = pdcp_xform->sn_size;
3091 session->pdcp.hfn = pdcp_xform->hfn;
3092 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3093 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3094 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3096 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3098 rte_spinlock_lock(&dev_priv->lock);
3099 for (i = 0; i < MAX_DPAA_CORES; i++) {
3100 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3101 if (session->inq[i] == NULL) {
3102 DPAA_SEC_ERR("unable to attach sec queue");
3103 rte_spinlock_unlock(&dev_priv->lock);
3108 rte_spinlock_unlock(&dev_priv->lock);
3111 rte_free(session->auth_key.data);
3112 rte_free(session->cipher_key.data);
3113 memset(session, 0, sizeof(dpaa_sec_session));
3118 dpaa_sec_security_session_create(void *dev,
3119 struct rte_security_session_conf *conf,
3120 struct rte_security_session *sess,
3121 struct rte_mempool *mempool)
3123 void *sess_private_data;
3124 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3127 if (rte_mempool_get(mempool, &sess_private_data)) {
3128 DPAA_SEC_ERR("Couldn't get object from session mempool");
3132 switch (conf->protocol) {
3133 case RTE_SECURITY_PROTOCOL_IPSEC:
3134 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3137 case RTE_SECURITY_PROTOCOL_PDCP:
3138 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3141 case RTE_SECURITY_PROTOCOL_MACSEC:
3147 DPAA_SEC_ERR("failed to configure session parameters");
3148 /* Return session to mempool */
3149 rte_mempool_put(mempool, sess_private_data);
3153 set_sec_session_private_data(sess, sess_private_data);
3158 /** Clear the memory of session so it doesn't leave key material behind */
3160 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3161 struct rte_security_session *sess)
3163 PMD_INIT_FUNC_TRACE();
3164 void *sess_priv = get_sec_session_private_data(sess);
3165 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3168 free_session_memory((struct rte_cryptodev *)dev, s);
3169 set_sec_session_private_data(sess, NULL);
3175 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3176 struct rte_cryptodev_config *config __rte_unused)
3178 PMD_INIT_FUNC_TRACE();
3184 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3186 PMD_INIT_FUNC_TRACE();
3191 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3193 PMD_INIT_FUNC_TRACE();
3197 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3199 PMD_INIT_FUNC_TRACE();
3208 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3209 struct rte_cryptodev_info *info)
3211 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3213 PMD_INIT_FUNC_TRACE();
3215 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3216 info->feature_flags = dev->feature_flags;
3217 info->capabilities = dpaa_sec_capabilities;
3218 info->sym.max_nb_sessions = internals->max_nb_sessions;
3219 info->driver_id = cryptodev_driver_id;
3223 static enum qman_cb_dqrr_result
3224 dpaa_sec_process_parallel_event(void *event,
3225 struct qman_portal *qm __always_unused,
3226 struct qman_fq *outq,
3227 const struct qm_dqrr_entry *dqrr,
3230 const struct qm_fd *fd;
3231 struct dpaa_sec_job *job;
3232 struct dpaa_sec_op_ctx *ctx;
3233 struct rte_event *ev = (struct rte_event *)event;
3237 /* sg is embedded in an op ctx,
3238 * sg[0] is for output
3241 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3243 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3244 ctx->fd_status = fd->status;
3245 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3246 struct qm_sg_entry *sg_out;
3249 sg_out = &job->sg[0];
3250 hw_sg_to_cpu(sg_out);
3251 len = sg_out->length;
3252 ctx->op->sym->m_src->pkt_len = len;
3253 ctx->op->sym->m_src->data_len = len;
3255 if (!ctx->fd_status) {
3256 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3258 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3259 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3261 ev->event_ptr = (void *)ctx->op;
3263 ev->flow_id = outq->ev.flow_id;
3264 ev->sub_event_type = outq->ev.sub_event_type;
3265 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3266 ev->op = RTE_EVENT_OP_NEW;
3267 ev->sched_type = outq->ev.sched_type;
3268 ev->queue_id = outq->ev.queue_id;
3269 ev->priority = outq->ev.priority;
3270 *bufs = (void *)ctx->op;
3272 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3274 return qman_cb_dqrr_consume;
3277 static enum qman_cb_dqrr_result
3278 dpaa_sec_process_atomic_event(void *event,
3279 struct qman_portal *qm __rte_unused,
3280 struct qman_fq *outq,
3281 const struct qm_dqrr_entry *dqrr,
3285 const struct qm_fd *fd;
3286 struct dpaa_sec_job *job;
3287 struct dpaa_sec_op_ctx *ctx;
3288 struct rte_event *ev = (struct rte_event *)event;
3292 /* sg is embedded in an op ctx,
3293 * sg[0] is for output
3296 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3298 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3299 ctx->fd_status = fd->status;
3300 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3301 struct qm_sg_entry *sg_out;
3304 sg_out = &job->sg[0];
3305 hw_sg_to_cpu(sg_out);
3306 len = sg_out->length;
3307 ctx->op->sym->m_src->pkt_len = len;
3308 ctx->op->sym->m_src->data_len = len;
3310 if (!ctx->fd_status) {
3311 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3313 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3314 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3316 ev->event_ptr = (void *)ctx->op;
3317 ev->flow_id = outq->ev.flow_id;
3318 ev->sub_event_type = outq->ev.sub_event_type;
3319 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3320 ev->op = RTE_EVENT_OP_NEW;
3321 ev->sched_type = outq->ev.sched_type;
3322 ev->queue_id = outq->ev.queue_id;
3323 ev->priority = outq->ev.priority;
3325 /* Save active dqrr entries */
3326 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3327 DPAA_PER_LCORE_DQRR_SIZE++;
3328 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3329 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3330 ev->impl_opaque = index + 1;
3331 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3332 *bufs = (void *)ctx->op;
3334 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3336 return qman_cb_dqrr_defer;
3340 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3343 const struct rte_event *event)
3345 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3346 struct qm_mcc_initfq opts = {0};
3350 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3351 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3352 opts.fqd.dest.channel = ch_id;
3354 switch (event->sched_type) {
3355 case RTE_SCHED_TYPE_ATOMIC:
3356 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3357 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3358 * configuration with HOLD_ACTIVE setting
3360 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3361 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3363 case RTE_SCHED_TYPE_ORDERED:
3364 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3367 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3368 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3372 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3373 if (unlikely(ret)) {
3374 DPAA_SEC_ERR("unable to init caam source fq!");
3378 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3384 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3387 struct qm_mcc_initfq opts = {0};
3389 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3391 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3392 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3393 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3394 qp->outq.cb.ern = ern_sec_fq_handler;
3395 qman_retire_fq(&qp->outq, NULL);
3396 qman_oos_fq(&qp->outq);
3397 ret = qman_init_fq(&qp->outq, 0, &opts);
3399 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3400 qp->outq.cb.dqrr = NULL;
3405 static struct rte_cryptodev_ops crypto_ops = {
3406 .dev_configure = dpaa_sec_dev_configure,
3407 .dev_start = dpaa_sec_dev_start,
3408 .dev_stop = dpaa_sec_dev_stop,
3409 .dev_close = dpaa_sec_dev_close,
3410 .dev_infos_get = dpaa_sec_dev_infos_get,
3411 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3412 .queue_pair_release = dpaa_sec_queue_pair_release,
3413 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3414 .sym_session_configure = dpaa_sec_sym_session_configure,
3415 .sym_session_clear = dpaa_sec_sym_session_clear
3418 #ifdef RTE_LIB_SECURITY
3419 static const struct rte_security_capability *
3420 dpaa_sec_capabilities_get(void *device __rte_unused)
3422 return dpaa_sec_security_cap;
3425 static const struct rte_security_ops dpaa_sec_security_ops = {
3426 .session_create = dpaa_sec_security_session_create,
3427 .session_update = NULL,
3428 .session_stats_get = NULL,
3429 .session_destroy = dpaa_sec_security_session_destroy,
3430 .set_pkt_metadata = NULL,
3431 .capabilities_get = dpaa_sec_capabilities_get
3435 dpaa_sec_uninit(struct rte_cryptodev *dev)
3437 struct dpaa_sec_dev_private *internals;
3442 internals = dev->data->dev_private;
3443 rte_free(dev->security_ctx);
3445 rte_free(internals);
3447 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3448 dev->data->name, rte_socket_id());
3454 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3456 struct dpaa_sec_dev_private *internals;
3457 #ifdef RTE_LIB_SECURITY
3458 struct rte_security_ctx *security_instance;
3460 struct dpaa_sec_qp *qp;
3464 PMD_INIT_FUNC_TRACE();
3466 cryptodev->driver_id = cryptodev_driver_id;
3467 cryptodev->dev_ops = &crypto_ops;
3469 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3470 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3471 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3472 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3473 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3474 RTE_CRYPTODEV_FF_SECURITY |
3475 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3476 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3477 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3478 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3479 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3481 internals = cryptodev->data->dev_private;
3482 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3483 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3486 * For secondary processes, we don't initialise any further as primary
3487 * has already done this work. Only check we don't need a different
3490 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3491 DPAA_SEC_WARN("Device already init by primary process");
3494 #ifdef RTE_LIB_SECURITY
3495 /* Initialize security_ctx only for primary process*/
3496 security_instance = rte_malloc("rte_security_instances_ops",
3497 sizeof(struct rte_security_ctx), 0);
3498 if (security_instance == NULL)
3500 security_instance->device = (void *)cryptodev;
3501 security_instance->ops = &dpaa_sec_security_ops;
3502 security_instance->sess_cnt = 0;
3503 cryptodev->security_ctx = security_instance;
3505 rte_spinlock_init(&internals->lock);
3506 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3507 /* init qman fq for queue pair */
3508 qp = &internals->qps[i];
3509 ret = dpaa_sec_init_tx(&qp->outq);
3511 DPAA_SEC_ERR("config tx of queue pair %d", i);
3516 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3517 QMAN_FQ_FLAG_TO_DCPORTAL;
3518 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3519 /* create rx qman fq for sessions*/
3520 ret = qman_create_fq(0, flags, &internals->inq[i]);
3521 if (unlikely(ret != 0)) {
3522 DPAA_SEC_ERR("sec qman_create_fq failed");
3527 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3531 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3533 rte_free(cryptodev->security_ctx);
3538 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3539 struct rte_dpaa_device *dpaa_dev)
3541 struct rte_cryptodev *cryptodev;
3542 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3546 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3548 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3549 if (cryptodev == NULL)
3552 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3553 cryptodev->data->dev_private = rte_zmalloc_socket(
3554 "cryptodev private structure",
3555 sizeof(struct dpaa_sec_dev_private),
3556 RTE_CACHE_LINE_SIZE,
3559 if (cryptodev->data->dev_private == NULL)
3560 rte_panic("Cannot allocate memzone for private "
3564 dpaa_dev->crypto_dev = cryptodev;
3565 cryptodev->device = &dpaa_dev->device;
3567 /* init user callbacks */
3568 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3570 /* if sec device version is not configured */
3571 if (!rta_get_sec_era()) {
3572 const struct device_node *caam_node;
3574 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3575 const uint32_t *prop = of_get_property(caam_node,
3580 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3586 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3587 retval = rte_dpaa_portal_init((void *)1);
3589 DPAA_SEC_ERR("Unable to initialize portal");
3594 /* Invoke PMD device initialization function */
3595 retval = dpaa_sec_dev_init(cryptodev);
3601 /* In case of error, cleanup is done */
3602 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3603 rte_free(cryptodev->data->dev_private);
3605 rte_cryptodev_pmd_release_device(cryptodev);
3611 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3613 struct rte_cryptodev *cryptodev;
3616 cryptodev = dpaa_dev->crypto_dev;
3617 if (cryptodev == NULL)
3620 ret = dpaa_sec_uninit(cryptodev);
3624 return rte_cryptodev_pmd_destroy(cryptodev);
3627 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3628 .drv_type = FSL_DPAA_CRYPTO,
3630 .name = "DPAA SEC PMD"
3632 .probe = cryptodev_dpaa_sec_probe,
3633 .remove = cryptodev_dpaa_sec_remove,
3636 static struct cryptodev_driver dpaa_sec_crypto_drv;
3638 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3639 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3640 cryptodev_driver_id);
3641 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);