1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (rta_inline_pdcp_query(authdata.algtype,
269 ses->pdcp.hfn_ovd)) {
271 (size_t)rte_dpaa_mem_vtop((void *)
272 (size_t)cipherdata.key);
273 cipherdata.key_type = RTA_DATA_PTR;
276 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 if (ses->dir == DIR_ENC)
278 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 cdb->sh_desc, 1, swap,
284 ses->pdcp.hfn_threshold,
285 &cipherdata, &authdata,
287 else if (ses->dir == DIR_DEC)
288 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 cdb->sh_desc, 1, swap,
294 ses->pdcp.hfn_threshold,
295 &cipherdata, &authdata,
298 if (ses->dir == DIR_ENC) {
299 if (ses->pdcp.sdap_enabled)
301 cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 cdb->sh_desc, 1, swap,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, p_authdata, 0);
311 cnstr_shdsc_pdcp_u_plane_encap(
312 cdb->sh_desc, 1, swap,
317 ses->pdcp.hfn_threshold,
318 &cipherdata, p_authdata, 0);
319 } else if (ses->dir == DIR_DEC) {
320 if (ses->pdcp.sdap_enabled)
322 cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, p_authdata, 0);
332 cnstr_shdsc_pdcp_u_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, p_authdata, 0);
342 return shared_desc_len;
345 /* prepare ipsec proto command block of the session */
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
349 struct alginfo cipherdata = {0}, authdata = {0};
350 struct sec_cdb *cdb = &ses->cdb;
351 int32_t shared_desc_len = 0;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
359 cipherdata.key = (size_t)ses->cipher_key.data;
360 cipherdata.keylen = ses->cipher_key.length;
361 cipherdata.key_enc_flags = 0;
362 cipherdata.key_type = RTA_DATA_IMM;
363 cipherdata.algtype = ses->cipher_key.alg;
364 cipherdata.algmode = ses->cipher_key.algmode;
366 if (ses->auth_key.length) {
367 authdata.key = (size_t)ses->auth_key.data;
368 authdata.keylen = ses->auth_key.length;
369 authdata.key_enc_flags = 0;
370 authdata.key_type = RTA_DATA_IMM;
371 authdata.algtype = ses->auth_key.alg;
372 authdata.algmode = ses->auth_key.algmode;
375 cdb->sh_desc[0] = cipherdata.keylen;
376 cdb->sh_desc[1] = authdata.keylen;
377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
379 (unsigned int *)cdb->sh_desc,
380 &cdb->sh_desc[2], 2);
383 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
386 if (cdb->sh_desc[2] & 1)
387 cipherdata.key_type = RTA_DATA_IMM;
389 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 (void *)(size_t)cipherdata.key);
391 cipherdata.key_type = RTA_DATA_PTR;
393 if (cdb->sh_desc[2] & (1<<1))
394 authdata.key_type = RTA_DATA_IMM;
396 authdata.key = (size_t)rte_dpaa_mem_vtop(
397 (void *)(size_t)authdata.key);
398 authdata.key_type = RTA_DATA_PTR;
404 if (ses->dir == DIR_ENC) {
405 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
407 true, swap, SHR_SERIAL,
409 (uint8_t *)&ses->ip4_hdr,
410 &cipherdata, &authdata);
411 } else if (ses->dir == DIR_DEC) {
412 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
414 true, swap, SHR_SERIAL,
416 &cipherdata, &authdata);
418 return shared_desc_len;
421 /* prepare command block of the session */
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
425 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 int32_t shared_desc_len = 0;
427 struct sec_cdb *cdb = &ses->cdb;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
435 memset(cdb, 0, sizeof(struct sec_cdb));
438 #ifdef RTE_LIB_SECURITY
440 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
443 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
446 case DPAA_SEC_CIPHER:
447 alginfo_c.key = (size_t)ses->cipher_key.data;
448 alginfo_c.keylen = ses->cipher_key.length;
449 alginfo_c.key_enc_flags = 0;
450 alginfo_c.key_type = RTA_DATA_IMM;
451 alginfo_c.algtype = ses->cipher_key.alg;
452 alginfo_c.algmode = ses->cipher_key.algmode;
454 switch (ses->cipher_alg) {
455 case RTE_CRYPTO_CIPHER_AES_CBC:
456 case RTE_CRYPTO_CIPHER_3DES_CBC:
457 case RTE_CRYPTO_CIPHER_DES_CBC:
458 case RTE_CRYPTO_CIPHER_AES_CTR:
459 case RTE_CRYPTO_CIPHER_3DES_CTR:
460 shared_desc_len = cnstr_shdsc_blkcipher(
462 swap, SHR_NEVER, &alginfo_c,
466 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
467 shared_desc_len = cnstr_shdsc_snow_f8(
468 cdb->sh_desc, true, swap,
472 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
473 shared_desc_len = cnstr_shdsc_zuce(
474 cdb->sh_desc, true, swap,
479 DPAA_SEC_ERR("unsupported cipher alg %d",
485 alginfo_a.key = (size_t)ses->auth_key.data;
486 alginfo_a.keylen = ses->auth_key.length;
487 alginfo_a.key_enc_flags = 0;
488 alginfo_a.key_type = RTA_DATA_IMM;
489 alginfo_a.algtype = ses->auth_key.alg;
490 alginfo_a.algmode = ses->auth_key.algmode;
491 switch (ses->auth_alg) {
492 case RTE_CRYPTO_AUTH_MD5:
493 case RTE_CRYPTO_AUTH_SHA1:
494 case RTE_CRYPTO_AUTH_SHA224:
495 case RTE_CRYPTO_AUTH_SHA256:
496 case RTE_CRYPTO_AUTH_SHA384:
497 case RTE_CRYPTO_AUTH_SHA512:
498 shared_desc_len = cnstr_shdsc_hash(
500 swap, SHR_NEVER, &alginfo_a,
504 case RTE_CRYPTO_AUTH_MD5_HMAC:
505 case RTE_CRYPTO_AUTH_SHA1_HMAC:
506 case RTE_CRYPTO_AUTH_SHA224_HMAC:
507 case RTE_CRYPTO_AUTH_SHA256_HMAC:
508 case RTE_CRYPTO_AUTH_SHA384_HMAC:
509 case RTE_CRYPTO_AUTH_SHA512_HMAC:
510 shared_desc_len = cnstr_shdsc_hmac(
512 swap, SHR_NEVER, &alginfo_a,
516 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
517 shared_desc_len = cnstr_shdsc_snow_f9(
518 cdb->sh_desc, true, swap,
523 case RTE_CRYPTO_AUTH_ZUC_EIA3:
524 shared_desc_len = cnstr_shdsc_zuca(
525 cdb->sh_desc, true, swap,
530 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
531 case RTE_CRYPTO_AUTH_AES_CMAC:
532 shared_desc_len = cnstr_shdsc_aes_mac(
534 true, swap, SHR_NEVER,
540 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
544 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
545 DPAA_SEC_ERR("not supported aead alg");
548 alginfo.key = (size_t)ses->aead_key.data;
549 alginfo.keylen = ses->aead_key.length;
550 alginfo.key_enc_flags = 0;
551 alginfo.key_type = RTA_DATA_IMM;
552 alginfo.algtype = ses->aead_key.alg;
553 alginfo.algmode = ses->aead_key.algmode;
555 if (ses->dir == DIR_ENC)
556 shared_desc_len = cnstr_shdsc_gcm_encap(
557 cdb->sh_desc, true, swap, SHR_NEVER,
562 shared_desc_len = cnstr_shdsc_gcm_decap(
563 cdb->sh_desc, true, swap, SHR_NEVER,
568 case DPAA_SEC_CIPHER_HASH:
569 alginfo_c.key = (size_t)ses->cipher_key.data;
570 alginfo_c.keylen = ses->cipher_key.length;
571 alginfo_c.key_enc_flags = 0;
572 alginfo_c.key_type = RTA_DATA_IMM;
573 alginfo_c.algtype = ses->cipher_key.alg;
574 alginfo_c.algmode = ses->cipher_key.algmode;
576 alginfo_a.key = (size_t)ses->auth_key.data;
577 alginfo_a.keylen = ses->auth_key.length;
578 alginfo_a.key_enc_flags = 0;
579 alginfo_a.key_type = RTA_DATA_IMM;
580 alginfo_a.algtype = ses->auth_key.alg;
581 alginfo_a.algmode = ses->auth_key.algmode;
583 cdb->sh_desc[0] = alginfo_c.keylen;
584 cdb->sh_desc[1] = alginfo_a.keylen;
585 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
587 (unsigned int *)cdb->sh_desc,
588 &cdb->sh_desc[2], 2);
591 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
594 if (cdb->sh_desc[2] & 1)
595 alginfo_c.key_type = RTA_DATA_IMM;
597 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
598 (void *)(size_t)alginfo_c.key);
599 alginfo_c.key_type = RTA_DATA_PTR;
601 if (cdb->sh_desc[2] & (1<<1))
602 alginfo_a.key_type = RTA_DATA_IMM;
604 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
605 (void *)(size_t)alginfo_a.key);
606 alginfo_a.key_type = RTA_DATA_PTR;
611 /* Auth_only_len is set as 0 here and it will be
612 * overwritten in fd for each packet.
614 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
615 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
617 ses->digest_length, ses->dir);
619 case DPAA_SEC_HASH_CIPHER:
621 DPAA_SEC_ERR("error: Unsupported session");
625 if (shared_desc_len < 0) {
626 DPAA_SEC_ERR("error in preparing command block");
627 return shared_desc_len;
630 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
631 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
632 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
637 /* qp is lockless, should be accessed by only one thread */
639 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
642 unsigned int pkts = 0;
643 int num_rx_bufs, ret;
644 struct qm_dqrr_entry *dq;
645 uint32_t vdqcr_flags = 0;
649 * Until request for four buffers, we provide exact number of buffers.
650 * Otherwise we do not set the QM_VDQCR_EXACT flag.
651 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
652 * requested, so we request two less in this case.
655 vdqcr_flags = QM_VDQCR_EXACT;
656 num_rx_bufs = nb_ops;
658 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
659 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
661 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
666 const struct qm_fd *fd;
667 struct dpaa_sec_job *job;
668 struct dpaa_sec_op_ctx *ctx;
669 struct rte_crypto_op *op;
671 dq = qman_dequeue(fq);
676 /* sg is embedded in an op ctx,
677 * sg[0] is for output
680 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
682 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
683 ctx->fd_status = fd->status;
685 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
686 struct qm_sg_entry *sg_out;
688 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
689 op->sym->m_src : op->sym->m_dst;
691 sg_out = &job->sg[0];
692 hw_sg_to_cpu(sg_out);
693 len = sg_out->length;
695 while (mbuf->next != NULL) {
696 len -= mbuf->data_len;
699 mbuf->data_len = len;
701 if (!ctx->fd_status) {
702 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
704 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
705 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
709 /* report op status to sym->op and then free the ctx memeory */
710 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
712 qman_dqrr_consume(fq, dq);
713 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
718 static inline struct dpaa_sec_job *
719 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
721 struct rte_crypto_sym_op *sym = op->sym;
722 struct rte_mbuf *mbuf = sym->m_src;
723 struct dpaa_sec_job *cf;
724 struct dpaa_sec_op_ctx *ctx;
725 struct qm_sg_entry *sg, *out_sg, *in_sg;
726 phys_addr_t start_addr;
727 uint8_t *old_digest, extra_segs;
728 int data_len, data_offset;
730 data_len = sym->auth.data.length;
731 data_offset = sym->auth.data.offset;
733 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
734 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
735 if ((data_len & 7) || (data_offset & 7)) {
736 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
740 data_len = data_len >> 3;
741 data_offset = data_offset >> 3;
749 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
750 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
754 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
760 old_digest = ctx->digest;
764 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
765 out_sg->length = ses->digest_length;
766 cpu_to_hw_sg(out_sg);
770 /* need to extend the input to a compound frame */
771 in_sg->extension = 1;
773 in_sg->length = data_len;
774 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
779 if (ses->iv.length) {
782 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
785 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
786 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
788 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
789 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
792 sg->length = ses->iv.length;
794 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
795 in_sg->length += sg->length;
800 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
801 sg->offset = data_offset;
803 if (data_len <= (mbuf->data_len - data_offset)) {
804 sg->length = data_len;
806 sg->length = mbuf->data_len - data_offset;
808 /* remaining i/p segs */
809 while ((data_len = data_len - sg->length) &&
810 (mbuf = mbuf->next)) {
813 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
814 if (data_len > mbuf->data_len)
815 sg->length = mbuf->data_len;
817 sg->length = data_len;
821 if (is_decode(ses)) {
822 /* Digest verification case */
825 rte_memcpy(old_digest, sym->auth.digest.data,
827 start_addr = rte_dpaa_mem_vtop(old_digest);
828 qm_sg_entry_set64(sg, start_addr);
829 sg->length = ses->digest_length;
830 in_sg->length += ses->digest_length;
841 * |<----data_len------->|
842 * |ip_header|ah_header|icv|payload|
847 static inline struct dpaa_sec_job *
848 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
850 struct rte_crypto_sym_op *sym = op->sym;
851 struct rte_mbuf *mbuf = sym->m_src;
852 struct dpaa_sec_job *cf;
853 struct dpaa_sec_op_ctx *ctx;
854 struct qm_sg_entry *sg, *in_sg;
855 rte_iova_t start_addr;
857 int data_len, data_offset;
859 data_len = sym->auth.data.length;
860 data_offset = sym->auth.data.offset;
862 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
863 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
864 if ((data_len & 7) || (data_offset & 7)) {
865 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
869 data_len = data_len >> 3;
870 data_offset = data_offset >> 3;
873 ctx = dpaa_sec_alloc_ctx(ses, 4);
879 old_digest = ctx->digest;
881 start_addr = rte_pktmbuf_iova(mbuf);
884 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
885 sg->length = ses->digest_length;
890 /* need to extend the input to a compound frame */
891 in_sg->extension = 1;
893 in_sg->length = data_len;
894 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
897 if (ses->iv.length) {
900 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
903 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
904 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
906 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
907 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
910 sg->length = ses->iv.length;
912 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
913 in_sg->length += sg->length;
918 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
919 sg->offset = data_offset;
920 sg->length = data_len;
922 if (is_decode(ses)) {
923 /* Digest verification case */
925 /* hash result or digest, save digest first */
926 rte_memcpy(old_digest, sym->auth.digest.data,
928 /* let's check digest by hw */
929 start_addr = rte_dpaa_mem_vtop(old_digest);
931 qm_sg_entry_set64(sg, start_addr);
932 sg->length = ses->digest_length;
933 in_sg->length += ses->digest_length;
942 static inline struct dpaa_sec_job *
943 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
945 struct rte_crypto_sym_op *sym = op->sym;
946 struct dpaa_sec_job *cf;
947 struct dpaa_sec_op_ctx *ctx;
948 struct qm_sg_entry *sg, *out_sg, *in_sg;
949 struct rte_mbuf *mbuf;
951 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
953 int data_len, data_offset;
955 data_len = sym->cipher.data.length;
956 data_offset = sym->cipher.data.offset;
958 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
959 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
960 if ((data_len & 7) || (data_offset & 7)) {
961 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
965 data_len = data_len >> 3;
966 data_offset = data_offset >> 3;
971 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
974 req_segs = mbuf->nb_segs * 2 + 3;
976 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
977 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
982 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
991 out_sg->extension = 1;
992 out_sg->length = data_len;
993 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
994 cpu_to_hw_sg(out_sg);
998 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
999 sg->length = mbuf->data_len - data_offset;
1000 sg->offset = data_offset;
1002 /* Successive segs */
1007 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1008 sg->length = mbuf->data_len;
1017 in_sg->extension = 1;
1019 in_sg->length = data_len + ses->iv.length;
1022 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1023 cpu_to_hw_sg(in_sg);
1026 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1027 sg->length = ses->iv.length;
1032 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1033 sg->length = mbuf->data_len - data_offset;
1034 sg->offset = data_offset;
1036 /* Successive segs */
1041 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1042 sg->length = mbuf->data_len;
1051 static inline struct dpaa_sec_job *
1052 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1054 struct rte_crypto_sym_op *sym = op->sym;
1055 struct dpaa_sec_job *cf;
1056 struct dpaa_sec_op_ctx *ctx;
1057 struct qm_sg_entry *sg;
1058 rte_iova_t src_start_addr, dst_start_addr;
1059 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1061 int data_len, data_offset;
1063 data_len = sym->cipher.data.length;
1064 data_offset = sym->cipher.data.offset;
1066 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1067 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1068 if ((data_len & 7) || (data_offset & 7)) {
1069 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1073 data_len = data_len >> 3;
1074 data_offset = data_offset >> 3;
1077 ctx = dpaa_sec_alloc_ctx(ses, 4);
1084 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1087 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1089 dst_start_addr = src_start_addr;
1093 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1094 sg->length = data_len + ses->iv.length;
1100 /* need to extend the input to a compound frame */
1103 sg->length = data_len + ses->iv.length;
1104 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1108 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1109 sg->length = ses->iv.length;
1113 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1114 sg->length = data_len;
1121 static inline struct dpaa_sec_job *
1122 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1124 struct rte_crypto_sym_op *sym = op->sym;
1125 struct dpaa_sec_job *cf;
1126 struct dpaa_sec_op_ctx *ctx;
1127 struct qm_sg_entry *sg, *out_sg, *in_sg;
1128 struct rte_mbuf *mbuf;
1130 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1135 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1138 req_segs = mbuf->nb_segs * 2 + 4;
1141 if (ses->auth_only_len)
1144 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1145 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1150 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1157 rte_prefetch0(cf->sg);
1160 out_sg = &cf->sg[0];
1161 out_sg->extension = 1;
1163 out_sg->length = sym->aead.data.length + ses->digest_length;
1165 out_sg->length = sym->aead.data.length;
1167 /* output sg entries */
1169 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1170 cpu_to_hw_sg(out_sg);
1173 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1174 sg->length = mbuf->data_len - sym->aead.data.offset;
1175 sg->offset = sym->aead.data.offset;
1177 /* Successive segs */
1182 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1183 sg->length = mbuf->data_len;
1186 sg->length -= ses->digest_length;
1188 if (is_encode(ses)) {
1190 /* set auth output */
1192 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1193 sg->length = ses->digest_length;
1201 in_sg->extension = 1;
1204 in_sg->length = ses->iv.length + sym->aead.data.length
1205 + ses->auth_only_len;
1207 in_sg->length = ses->iv.length + sym->aead.data.length
1208 + ses->auth_only_len + ses->digest_length;
1210 /* input sg entries */
1212 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1213 cpu_to_hw_sg(in_sg);
1216 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1217 sg->length = ses->iv.length;
1220 /* 2nd seg auth only */
1221 if (ses->auth_only_len) {
1223 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1224 sg->length = ses->auth_only_len;
1230 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1231 sg->length = mbuf->data_len - sym->aead.data.offset;
1232 sg->offset = sym->aead.data.offset;
1234 /* Successive segs */
1239 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1240 sg->length = mbuf->data_len;
1244 if (is_decode(ses)) {
1247 memcpy(ctx->digest, sym->aead.digest.data,
1248 ses->digest_length);
1249 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1250 sg->length = ses->digest_length;
1258 static inline struct dpaa_sec_job *
1259 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1261 struct rte_crypto_sym_op *sym = op->sym;
1262 struct dpaa_sec_job *cf;
1263 struct dpaa_sec_op_ctx *ctx;
1264 struct qm_sg_entry *sg;
1265 uint32_t length = 0;
1266 rte_iova_t src_start_addr, dst_start_addr;
1267 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1270 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1273 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1275 dst_start_addr = src_start_addr;
1277 ctx = dpaa_sec_alloc_ctx(ses, 7);
1285 rte_prefetch0(cf->sg);
1287 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1288 if (is_encode(ses)) {
1289 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1290 sg->length = ses->iv.length;
1291 length += sg->length;
1295 if (ses->auth_only_len) {
1296 qm_sg_entry_set64(sg,
1297 rte_dpaa_mem_vtop(sym->aead.aad.data));
1298 sg->length = ses->auth_only_len;
1299 length += sg->length;
1303 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1304 sg->length = sym->aead.data.length;
1305 length += sg->length;
1309 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1310 sg->length = ses->iv.length;
1311 length += sg->length;
1315 if (ses->auth_only_len) {
1316 qm_sg_entry_set64(sg,
1317 rte_dpaa_mem_vtop(sym->aead.aad.data));
1318 sg->length = ses->auth_only_len;
1319 length += sg->length;
1323 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1324 sg->length = sym->aead.data.length;
1325 length += sg->length;
1328 memcpy(ctx->digest, sym->aead.digest.data,
1329 ses->digest_length);
1332 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1333 sg->length = ses->digest_length;
1334 length += sg->length;
1338 /* input compound frame */
1339 cf->sg[1].length = length;
1340 cf->sg[1].extension = 1;
1341 cf->sg[1].final = 1;
1342 cpu_to_hw_sg(&cf->sg[1]);
1346 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1347 qm_sg_entry_set64(sg,
1348 dst_start_addr + sym->aead.data.offset);
1349 sg->length = sym->aead.data.length;
1350 length = sg->length;
1351 if (is_encode(ses)) {
1353 /* set auth output */
1355 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1356 sg->length = ses->digest_length;
1357 length += sg->length;
1362 /* output compound frame */
1363 cf->sg[0].length = length;
1364 cf->sg[0].extension = 1;
1365 cpu_to_hw_sg(&cf->sg[0]);
1370 static inline struct dpaa_sec_job *
1371 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1373 struct rte_crypto_sym_op *sym = op->sym;
1374 struct dpaa_sec_job *cf;
1375 struct dpaa_sec_op_ctx *ctx;
1376 struct qm_sg_entry *sg, *out_sg, *in_sg;
1377 struct rte_mbuf *mbuf;
1379 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1384 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1387 req_segs = mbuf->nb_segs * 2 + 4;
1390 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1391 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1396 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1403 rte_prefetch0(cf->sg);
1406 out_sg = &cf->sg[0];
1407 out_sg->extension = 1;
1409 out_sg->length = sym->auth.data.length + ses->digest_length;
1411 out_sg->length = sym->auth.data.length;
1413 /* output sg entries */
1415 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1416 cpu_to_hw_sg(out_sg);
1419 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1420 sg->length = mbuf->data_len - sym->auth.data.offset;
1421 sg->offset = sym->auth.data.offset;
1423 /* Successive segs */
1428 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1429 sg->length = mbuf->data_len;
1432 sg->length -= ses->digest_length;
1434 if (is_encode(ses)) {
1436 /* set auth output */
1438 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1439 sg->length = ses->digest_length;
1447 in_sg->extension = 1;
1450 in_sg->length = ses->iv.length + sym->auth.data.length;
1452 in_sg->length = ses->iv.length + sym->auth.data.length
1453 + ses->digest_length;
1455 /* input sg entries */
1457 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1458 cpu_to_hw_sg(in_sg);
1461 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1462 sg->length = ses->iv.length;
1467 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1468 sg->length = mbuf->data_len - sym->auth.data.offset;
1469 sg->offset = sym->auth.data.offset;
1471 /* Successive segs */
1476 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1477 sg->length = mbuf->data_len;
1481 sg->length -= ses->digest_length;
1482 if (is_decode(ses)) {
1485 memcpy(ctx->digest, sym->auth.digest.data,
1486 ses->digest_length);
1487 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1488 sg->length = ses->digest_length;
1496 static inline struct dpaa_sec_job *
1497 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1499 struct rte_crypto_sym_op *sym = op->sym;
1500 struct dpaa_sec_job *cf;
1501 struct dpaa_sec_op_ctx *ctx;
1502 struct qm_sg_entry *sg;
1503 rte_iova_t src_start_addr, dst_start_addr;
1504 uint32_t length = 0;
1505 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1508 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1510 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1512 dst_start_addr = src_start_addr;
1514 ctx = dpaa_sec_alloc_ctx(ses, 7);
1522 rte_prefetch0(cf->sg);
1524 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1525 if (is_encode(ses)) {
1526 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1527 sg->length = ses->iv.length;
1528 length += sg->length;
1532 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1533 sg->length = sym->auth.data.length;
1534 length += sg->length;
1538 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1539 sg->length = ses->iv.length;
1540 length += sg->length;
1545 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1546 sg->length = sym->auth.data.length;
1547 length += sg->length;
1550 memcpy(ctx->digest, sym->auth.digest.data,
1551 ses->digest_length);
1554 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1555 sg->length = ses->digest_length;
1556 length += sg->length;
1560 /* input compound frame */
1561 cf->sg[1].length = length;
1562 cf->sg[1].extension = 1;
1563 cf->sg[1].final = 1;
1564 cpu_to_hw_sg(&cf->sg[1]);
1568 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1569 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1570 sg->length = sym->cipher.data.length;
1571 length = sg->length;
1572 if (is_encode(ses)) {
1574 /* set auth output */
1576 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1577 sg->length = ses->digest_length;
1578 length += sg->length;
1583 /* output compound frame */
1584 cf->sg[0].length = length;
1585 cf->sg[0].extension = 1;
1586 cpu_to_hw_sg(&cf->sg[0]);
1591 #ifdef RTE_LIB_SECURITY
1592 static inline struct dpaa_sec_job *
1593 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1595 struct rte_crypto_sym_op *sym = op->sym;
1596 struct dpaa_sec_job *cf;
1597 struct dpaa_sec_op_ctx *ctx;
1598 struct qm_sg_entry *sg;
1599 phys_addr_t src_start_addr, dst_start_addr;
1601 ctx = dpaa_sec_alloc_ctx(ses, 2);
1607 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1610 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1612 dst_start_addr = src_start_addr;
1616 qm_sg_entry_set64(sg, src_start_addr);
1617 sg->length = sym->m_src->pkt_len;
1621 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1624 qm_sg_entry_set64(sg, dst_start_addr);
1625 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1631 static inline struct dpaa_sec_job *
1632 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1634 struct rte_crypto_sym_op *sym = op->sym;
1635 struct dpaa_sec_job *cf;
1636 struct dpaa_sec_op_ctx *ctx;
1637 struct qm_sg_entry *sg, *out_sg, *in_sg;
1638 struct rte_mbuf *mbuf;
1640 uint32_t in_len = 0, out_len = 0;
1647 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1648 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1649 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1654 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1660 out_sg = &cf->sg[0];
1661 out_sg->extension = 1;
1662 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1666 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1669 /* Successive segs */
1670 while (mbuf->next) {
1671 sg->length = mbuf->data_len;
1672 out_len += sg->length;
1676 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1679 sg->length = mbuf->buf_len - mbuf->data_off;
1680 out_len += sg->length;
1684 out_sg->length = out_len;
1685 cpu_to_hw_sg(out_sg);
1690 in_sg->extension = 1;
1692 in_len = mbuf->data_len;
1695 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1698 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1699 sg->length = mbuf->data_len;
1702 /* Successive segs */
1707 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1708 sg->length = mbuf->data_len;
1710 in_len += sg->length;
1716 in_sg->length = in_len;
1717 cpu_to_hw_sg(in_sg);
1719 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1726 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1729 /* Function to transmit the frames to given device and queuepair */
1731 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1732 uint16_t num_tx = 0;
1733 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1734 uint32_t frames_to_send;
1735 struct rte_crypto_op *op;
1736 struct dpaa_sec_job *cf;
1737 dpaa_sec_session *ses;
1738 uint16_t auth_hdr_len, auth_tail_len;
1739 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1740 struct qman_fq *inq[DPAA_SEC_BURST];
1742 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1743 if (rte_dpaa_portal_init((void *)0)) {
1744 DPAA_SEC_ERR("Failure in affining portal");
1750 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1751 DPAA_SEC_BURST : nb_ops;
1752 for (loop = 0; loop < frames_to_send; loop++) {
1754 if (*dpaa_seqn(op->sym->m_src) != 0) {
1755 index = *dpaa_seqn(op->sym->m_src) - 1;
1756 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1757 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1758 flags[loop] = ((index & 0x0f) << 8);
1759 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1760 DPAA_PER_LCORE_DQRR_SIZE--;
1761 DPAA_PER_LCORE_DQRR_HELD &=
1766 switch (op->sess_type) {
1767 case RTE_CRYPTO_OP_WITH_SESSION:
1768 ses = (dpaa_sec_session *)
1769 get_sym_session_private_data(
1771 cryptodev_driver_id);
1773 #ifdef RTE_LIB_SECURITY
1774 case RTE_CRYPTO_OP_SECURITY_SESSION:
1775 ses = (dpaa_sec_session *)
1776 get_sec_session_private_data(
1777 op->sym->sec_session);
1782 "sessionless crypto op not supported");
1783 frames_to_send = loop;
1789 DPAA_SEC_DP_ERR("session not available");
1790 frames_to_send = loop;
1795 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1796 if (dpaa_sec_attach_sess_q(qp, ses)) {
1797 frames_to_send = loop;
1801 } else if (unlikely(ses->qp[rte_lcore_id() %
1802 MAX_DPAA_CORES] != qp)) {
1803 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1805 ses->qp[rte_lcore_id() %
1806 MAX_DPAA_CORES], qp);
1807 frames_to_send = loop;
1812 auth_hdr_len = op->sym->auth.data.length -
1813 op->sym->cipher.data.length;
1816 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1817 ((op->sym->m_dst == NULL) ||
1818 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1819 switch (ses->ctxt) {
1820 #ifdef RTE_LIB_SECURITY
1822 case DPAA_SEC_IPSEC:
1823 cf = build_proto(op, ses);
1827 cf = build_auth_only(op, ses);
1829 case DPAA_SEC_CIPHER:
1830 cf = build_cipher_only(op, ses);
1833 cf = build_cipher_auth_gcm(op, ses);
1834 auth_hdr_len = ses->auth_only_len;
1836 case DPAA_SEC_CIPHER_HASH:
1838 op->sym->cipher.data.offset
1839 - op->sym->auth.data.offset;
1841 op->sym->auth.data.length
1842 - op->sym->cipher.data.length
1844 cf = build_cipher_auth(op, ses);
1847 DPAA_SEC_DP_ERR("not supported ops");
1848 frames_to_send = loop;
1853 switch (ses->ctxt) {
1854 #ifdef RTE_LIB_SECURITY
1856 case DPAA_SEC_IPSEC:
1857 cf = build_proto_sg(op, ses);
1861 cf = build_auth_only_sg(op, ses);
1863 case DPAA_SEC_CIPHER:
1864 cf = build_cipher_only_sg(op, ses);
1867 cf = build_cipher_auth_gcm_sg(op, ses);
1868 auth_hdr_len = ses->auth_only_len;
1870 case DPAA_SEC_CIPHER_HASH:
1872 op->sym->cipher.data.offset
1873 - op->sym->auth.data.offset;
1875 op->sym->auth.data.length
1876 - op->sym->cipher.data.length
1878 cf = build_cipher_auth_sg(op, ses);
1881 DPAA_SEC_DP_ERR("not supported ops");
1882 frames_to_send = loop;
1887 if (unlikely(!cf)) {
1888 frames_to_send = loop;
1894 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1895 fd->opaque_addr = 0;
1897 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1898 fd->_format1 = qm_fd_compound;
1899 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1901 /* Auth_only_len is set as 0 in descriptor and it is
1902 * overwritten here in the fd.cmd which will update
1905 if (auth_hdr_len || auth_tail_len) {
1906 fd->cmd = 0x80000000;
1908 ((auth_tail_len << 16) | auth_hdr_len);
1911 #ifdef RTE_LIB_SECURITY
1912 /* In case of PDCP, per packet HFN is stored in
1913 * mbuf priv after sym_op.
1915 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1916 fd->cmd = 0x80000000 |
1917 *((uint32_t *)((uint8_t *)op +
1918 ses->pdcp.hfn_ovd_offset));
1919 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1920 *((uint32_t *)((uint8_t *)op +
1921 ses->pdcp.hfn_ovd_offset)),
1928 while (loop < frames_to_send) {
1929 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1930 &flags[loop], frames_to_send - loop);
1932 nb_ops -= frames_to_send;
1933 num_tx += frames_to_send;
1936 dpaa_qp->tx_pkts += num_tx;
1937 dpaa_qp->tx_errs += nb_ops - num_tx;
1943 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1947 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1949 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1950 if (rte_dpaa_portal_init((void *)0)) {
1951 DPAA_SEC_ERR("Failure in affining portal");
1956 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1958 dpaa_qp->rx_pkts += num_rx;
1959 dpaa_qp->rx_errs += nb_ops - num_rx;
1961 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1966 /** Release queue pair */
1968 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1971 struct dpaa_sec_dev_private *internals;
1972 struct dpaa_sec_qp *qp = NULL;
1974 PMD_INIT_FUNC_TRACE();
1976 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1978 internals = dev->data->dev_private;
1979 if (qp_id >= internals->max_nb_queue_pairs) {
1980 DPAA_SEC_ERR("Max supported qpid %d",
1981 internals->max_nb_queue_pairs);
1985 qp = &internals->qps[qp_id];
1986 rte_mempool_free(qp->ctx_pool);
1987 qp->internals = NULL;
1988 dev->data->queue_pairs[qp_id] = NULL;
1993 /** Setup a queue pair */
1995 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1996 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1997 __rte_unused int socket_id)
1999 struct dpaa_sec_dev_private *internals;
2000 struct dpaa_sec_qp *qp = NULL;
2003 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2005 internals = dev->data->dev_private;
2006 if (qp_id >= internals->max_nb_queue_pairs) {
2007 DPAA_SEC_ERR("Max supported qpid %d",
2008 internals->max_nb_queue_pairs);
2012 qp = &internals->qps[qp_id];
2013 qp->internals = internals;
2014 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2015 dev->data->dev_id, qp_id);
2016 if (!qp->ctx_pool) {
2017 qp->ctx_pool = rte_mempool_create((const char *)str,
2020 CTX_POOL_CACHE_SIZE, 0,
2021 NULL, NULL, NULL, NULL,
2023 if (!qp->ctx_pool) {
2024 DPAA_SEC_ERR("%s create failed\n", str);
2028 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2029 dev->data->dev_id, qp_id);
2030 dev->data->queue_pairs[qp_id] = qp;
2035 /** Returns the size of session structure */
2037 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2039 PMD_INIT_FUNC_TRACE();
2041 return sizeof(dpaa_sec_session);
2045 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2046 struct rte_crypto_sym_xform *xform,
2047 dpaa_sec_session *session)
2049 session->ctxt = DPAA_SEC_CIPHER;
2050 session->cipher_alg = xform->cipher.algo;
2051 session->iv.length = xform->cipher.iv.length;
2052 session->iv.offset = xform->cipher.iv.offset;
2053 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2054 RTE_CACHE_LINE_SIZE);
2055 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2056 DPAA_SEC_ERR("No Memory for cipher key");
2059 session->cipher_key.length = xform->cipher.key.length;
2061 memcpy(session->cipher_key.data, xform->cipher.key.data,
2062 xform->cipher.key.length);
2063 switch (xform->cipher.algo) {
2064 case RTE_CRYPTO_CIPHER_AES_CBC:
2065 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2066 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2068 case RTE_CRYPTO_CIPHER_DES_CBC:
2069 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2070 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2072 case RTE_CRYPTO_CIPHER_3DES_CBC:
2073 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2074 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2076 case RTE_CRYPTO_CIPHER_AES_CTR:
2077 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2078 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2080 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2081 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2083 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2084 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2087 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2088 xform->cipher.algo);
2091 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2098 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2099 struct rte_crypto_sym_xform *xform,
2100 dpaa_sec_session *session)
2102 session->ctxt = DPAA_SEC_AUTH;
2103 session->auth_alg = xform->auth.algo;
2104 session->auth_key.length = xform->auth.key.length;
2105 if (xform->auth.key.length) {
2106 session->auth_key.data =
2107 rte_zmalloc(NULL, xform->auth.key.length,
2108 RTE_CACHE_LINE_SIZE);
2109 if (session->auth_key.data == NULL) {
2110 DPAA_SEC_ERR("No Memory for auth key");
2113 memcpy(session->auth_key.data, xform->auth.key.data,
2114 xform->auth.key.length);
2117 session->digest_length = xform->auth.digest_length;
2118 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2119 session->iv.offset = xform->auth.iv.offset;
2120 session->iv.length = xform->auth.iv.length;
2123 switch (xform->auth.algo) {
2124 case RTE_CRYPTO_AUTH_SHA1:
2125 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2126 session->auth_key.algmode = OP_ALG_AAI_HASH;
2128 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2129 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2130 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2132 case RTE_CRYPTO_AUTH_MD5:
2133 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2134 session->auth_key.algmode = OP_ALG_AAI_HASH;
2136 case RTE_CRYPTO_AUTH_MD5_HMAC:
2137 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2138 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2140 case RTE_CRYPTO_AUTH_SHA224:
2141 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2142 session->auth_key.algmode = OP_ALG_AAI_HASH;
2144 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2145 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2146 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2148 case RTE_CRYPTO_AUTH_SHA256:
2149 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2150 session->auth_key.algmode = OP_ALG_AAI_HASH;
2152 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2153 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2154 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2156 case RTE_CRYPTO_AUTH_SHA384:
2157 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2158 session->auth_key.algmode = OP_ALG_AAI_HASH;
2160 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2161 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2162 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2164 case RTE_CRYPTO_AUTH_SHA512:
2165 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2166 session->auth_key.algmode = OP_ALG_AAI_HASH;
2168 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2169 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2170 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2172 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2173 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2174 session->auth_key.algmode = OP_ALG_AAI_F9;
2176 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2177 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2178 session->auth_key.algmode = OP_ALG_AAI_F9;
2180 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2181 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2182 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2184 case RTE_CRYPTO_AUTH_AES_CMAC:
2185 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2186 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2189 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2194 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2201 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2202 struct rte_crypto_sym_xform *xform,
2203 dpaa_sec_session *session)
2206 struct rte_crypto_cipher_xform *cipher_xform;
2207 struct rte_crypto_auth_xform *auth_xform;
2209 session->ctxt = DPAA_SEC_CIPHER_HASH;
2210 if (session->auth_cipher_text) {
2211 cipher_xform = &xform->cipher;
2212 auth_xform = &xform->next->auth;
2214 cipher_xform = &xform->next->cipher;
2215 auth_xform = &xform->auth;
2218 /* Set IV parameters */
2219 session->iv.offset = cipher_xform->iv.offset;
2220 session->iv.length = cipher_xform->iv.length;
2222 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2223 RTE_CACHE_LINE_SIZE);
2224 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2225 DPAA_SEC_ERR("No Memory for cipher key");
2228 session->cipher_key.length = cipher_xform->key.length;
2229 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2230 RTE_CACHE_LINE_SIZE);
2231 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2232 DPAA_SEC_ERR("No Memory for auth key");
2235 session->auth_key.length = auth_xform->key.length;
2236 memcpy(session->cipher_key.data, cipher_xform->key.data,
2237 cipher_xform->key.length);
2238 memcpy(session->auth_key.data, auth_xform->key.data,
2239 auth_xform->key.length);
2241 session->digest_length = auth_xform->digest_length;
2242 session->auth_alg = auth_xform->algo;
2244 switch (auth_xform->algo) {
2245 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2246 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2247 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2249 case RTE_CRYPTO_AUTH_MD5_HMAC:
2250 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2251 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2253 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2254 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2255 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2257 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2258 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2259 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2261 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2262 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2263 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2265 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2266 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2267 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2269 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2270 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2271 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2273 case RTE_CRYPTO_AUTH_AES_CMAC:
2274 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2275 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2278 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2283 session->cipher_alg = cipher_xform->algo;
2285 switch (cipher_xform->algo) {
2286 case RTE_CRYPTO_CIPHER_AES_CBC:
2287 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2288 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2290 case RTE_CRYPTO_CIPHER_DES_CBC:
2291 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2292 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2294 case RTE_CRYPTO_CIPHER_3DES_CBC:
2295 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2296 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2298 case RTE_CRYPTO_CIPHER_AES_CTR:
2299 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2300 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2303 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2304 cipher_xform->algo);
2307 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2313 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2314 struct rte_crypto_sym_xform *xform,
2315 dpaa_sec_session *session)
2317 session->aead_alg = xform->aead.algo;
2318 session->ctxt = DPAA_SEC_AEAD;
2319 session->iv.length = xform->aead.iv.length;
2320 session->iv.offset = xform->aead.iv.offset;
2321 session->auth_only_len = xform->aead.aad_length;
2322 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2323 RTE_CACHE_LINE_SIZE);
2324 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2325 DPAA_SEC_ERR("No Memory for aead key\n");
2328 session->aead_key.length = xform->aead.key.length;
2329 session->digest_length = xform->aead.digest_length;
2331 memcpy(session->aead_key.data, xform->aead.key.data,
2332 xform->aead.key.length);
2334 switch (session->aead_alg) {
2335 case RTE_CRYPTO_AEAD_AES_GCM:
2336 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2337 session->aead_key.algmode = OP_ALG_AAI_GCM;
2340 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2344 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2350 static struct qman_fq *
2351 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2355 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2356 if (qi->inq_attach[i] == 0) {
2357 qi->inq_attach[i] = 1;
2361 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2367 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2371 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2372 if (&qi->inq[i] == fq) {
2373 if (qman_retire_fq(fq, NULL) != 0)
2374 DPAA_SEC_DEBUG("Queue is not retired\n");
2376 qi->inq_attach[i] = 0;
2384 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2388 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2389 ret = dpaa_sec_prep_cdb(sess);
2391 DPAA_SEC_ERR("Unable to prepare sec cdb");
2394 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2395 ret = rte_dpaa_portal_init((void *)0);
2397 DPAA_SEC_ERR("Failure in affining portal");
2401 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2402 rte_dpaa_mem_vtop(&sess->cdb),
2403 qman_fq_fqid(&qp->outq));
2405 DPAA_SEC_ERR("Unable to init sec queue");
2411 free_session_data(dpaa_sec_session *s)
2414 rte_free(s->aead_key.data);
2416 rte_free(s->auth_key.data);
2417 rte_free(s->cipher_key.data);
2419 memset(s, 0, sizeof(dpaa_sec_session));
2423 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2424 struct rte_crypto_sym_xform *xform, void *sess)
2426 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2427 dpaa_sec_session *session = sess;
2431 PMD_INIT_FUNC_TRACE();
2433 if (unlikely(sess == NULL)) {
2434 DPAA_SEC_ERR("invalid session struct");
2437 memset(session, 0, sizeof(dpaa_sec_session));
2439 /* Default IV length = 0 */
2440 session->iv.length = 0;
2443 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2444 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2445 ret = dpaa_sec_cipher_init(dev, xform, session);
2447 /* Authentication Only */
2448 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2449 xform->next == NULL) {
2450 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2451 session->ctxt = DPAA_SEC_AUTH;
2452 ret = dpaa_sec_auth_init(dev, xform, session);
2454 /* Cipher then Authenticate */
2455 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2456 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2457 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2458 session->auth_cipher_text = 1;
2459 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2460 ret = dpaa_sec_auth_init(dev, xform, session);
2461 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2462 ret = dpaa_sec_cipher_init(dev, xform, session);
2464 ret = dpaa_sec_chain_init(dev, xform, session);
2466 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2469 /* Authenticate then Cipher */
2470 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2471 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2472 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2473 session->auth_cipher_text = 0;
2474 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2475 ret = dpaa_sec_cipher_init(dev, xform, session);
2476 else if (xform->next->cipher.algo
2477 == RTE_CRYPTO_CIPHER_NULL)
2478 ret = dpaa_sec_auth_init(dev, xform, session);
2480 ret = dpaa_sec_chain_init(dev, xform, session);
2482 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2486 /* AEAD operation for AES-GCM kind of Algorithms */
2487 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2488 xform->next == NULL) {
2489 ret = dpaa_sec_aead_init(dev, xform, session);
2492 DPAA_SEC_ERR("Invalid crypto type");
2496 DPAA_SEC_ERR("unable to init session");
2500 rte_spinlock_lock(&internals->lock);
2501 for (i = 0; i < MAX_DPAA_CORES; i++) {
2502 session->inq[i] = dpaa_sec_attach_rxq(internals);
2503 if (session->inq[i] == NULL) {
2504 DPAA_SEC_ERR("unable to attach sec queue");
2505 rte_spinlock_unlock(&internals->lock);
2510 rte_spinlock_unlock(&internals->lock);
2515 free_session_data(session);
2520 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2521 struct rte_crypto_sym_xform *xform,
2522 struct rte_cryptodev_sym_session *sess,
2523 struct rte_mempool *mempool)
2525 void *sess_private_data;
2528 PMD_INIT_FUNC_TRACE();
2530 if (rte_mempool_get(mempool, &sess_private_data)) {
2531 DPAA_SEC_ERR("Couldn't get object from session mempool");
2535 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2537 DPAA_SEC_ERR("failed to configure session parameters");
2539 /* Return session to mempool */
2540 rte_mempool_put(mempool, sess_private_data);
2544 set_sym_session_private_data(sess, dev->driver_id,
2552 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2554 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2555 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2558 for (i = 0; i < MAX_DPAA_CORES; i++) {
2560 dpaa_sec_detach_rxq(qi, s->inq[i]);
2564 free_session_data(s);
2565 rte_mempool_put(sess_mp, (void *)s);
2568 /** Clear the memory of session so it doesn't leave key material behind */
2570 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2571 struct rte_cryptodev_sym_session *sess)
2573 PMD_INIT_FUNC_TRACE();
2574 uint8_t index = dev->driver_id;
2575 void *sess_priv = get_sym_session_private_data(sess, index);
2576 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2579 free_session_memory(dev, s);
2580 set_sym_session_private_data(sess, index, NULL);
2584 #ifdef RTE_LIB_SECURITY
2586 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2587 struct rte_security_ipsec_xform *ipsec_xform,
2588 dpaa_sec_session *session)
2590 PMD_INIT_FUNC_TRACE();
2592 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2593 RTE_CACHE_LINE_SIZE);
2594 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2595 DPAA_SEC_ERR("No Memory for aead key");
2598 memcpy(session->aead_key.data, aead_xform->key.data,
2599 aead_xform->key.length);
2601 session->digest_length = aead_xform->digest_length;
2602 session->aead_key.length = aead_xform->key.length;
2604 switch (aead_xform->algo) {
2605 case RTE_CRYPTO_AEAD_AES_GCM:
2606 switch (session->digest_length) {
2608 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2611 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2614 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2617 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2618 session->digest_length);
2621 if (session->dir == DIR_ENC) {
2622 memcpy(session->encap_pdb.gcm.salt,
2623 (uint8_t *)&(ipsec_xform->salt), 4);
2625 memcpy(session->decap_pdb.gcm.salt,
2626 (uint8_t *)&(ipsec_xform->salt), 4);
2628 session->aead_key.algmode = OP_ALG_AAI_GCM;
2629 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2632 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2640 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2641 struct rte_crypto_auth_xform *auth_xform,
2642 struct rte_security_ipsec_xform *ipsec_xform,
2643 dpaa_sec_session *session)
2646 session->cipher_key.data = rte_zmalloc(NULL,
2647 cipher_xform->key.length,
2648 RTE_CACHE_LINE_SIZE);
2649 if (session->cipher_key.data == NULL &&
2650 cipher_xform->key.length > 0) {
2651 DPAA_SEC_ERR("No Memory for cipher key");
2655 session->cipher_key.length = cipher_xform->key.length;
2656 memcpy(session->cipher_key.data, cipher_xform->key.data,
2657 cipher_xform->key.length);
2658 session->cipher_alg = cipher_xform->algo;
2660 session->cipher_key.data = NULL;
2661 session->cipher_key.length = 0;
2662 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2666 session->auth_key.data = rte_zmalloc(NULL,
2667 auth_xform->key.length,
2668 RTE_CACHE_LINE_SIZE);
2669 if (session->auth_key.data == NULL &&
2670 auth_xform->key.length > 0) {
2671 DPAA_SEC_ERR("No Memory for auth key");
2674 session->auth_key.length = auth_xform->key.length;
2675 memcpy(session->auth_key.data, auth_xform->key.data,
2676 auth_xform->key.length);
2677 session->auth_alg = auth_xform->algo;
2678 session->digest_length = auth_xform->digest_length;
2680 session->auth_key.data = NULL;
2681 session->auth_key.length = 0;
2682 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2685 switch (session->auth_alg) {
2686 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2687 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2688 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2690 case RTE_CRYPTO_AUTH_MD5_HMAC:
2691 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2692 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2694 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2695 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2696 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2697 if (session->digest_length != 16)
2699 "+++Using sha256-hmac truncated len is non-standard,"
2700 "it will not work with lookaside proto");
2702 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2703 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2704 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2706 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2707 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2708 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2710 case RTE_CRYPTO_AUTH_AES_CMAC:
2711 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2712 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2714 case RTE_CRYPTO_AUTH_NULL:
2715 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2717 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2718 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2719 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2721 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2722 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2723 case RTE_CRYPTO_AUTH_SHA1:
2724 case RTE_CRYPTO_AUTH_SHA256:
2725 case RTE_CRYPTO_AUTH_SHA512:
2726 case RTE_CRYPTO_AUTH_SHA224:
2727 case RTE_CRYPTO_AUTH_SHA384:
2728 case RTE_CRYPTO_AUTH_MD5:
2729 case RTE_CRYPTO_AUTH_AES_GMAC:
2730 case RTE_CRYPTO_AUTH_KASUMI_F9:
2731 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2732 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2733 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2737 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2742 switch (session->cipher_alg) {
2743 case RTE_CRYPTO_CIPHER_AES_CBC:
2744 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2745 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2747 case RTE_CRYPTO_CIPHER_DES_CBC:
2748 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2749 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2751 case RTE_CRYPTO_CIPHER_3DES_CBC:
2752 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2753 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2755 case RTE_CRYPTO_CIPHER_AES_CTR:
2756 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2757 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2758 if (session->dir == DIR_ENC) {
2759 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2760 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2762 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2763 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2766 case RTE_CRYPTO_CIPHER_NULL:
2767 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2769 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2770 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2771 case RTE_CRYPTO_CIPHER_3DES_ECB:
2772 case RTE_CRYPTO_CIPHER_AES_ECB:
2773 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2774 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2775 session->cipher_alg);
2778 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2779 session->cipher_alg);
2787 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2788 struct rte_security_session_conf *conf,
2791 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2792 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2793 struct rte_crypto_auth_xform *auth_xform = NULL;
2794 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2795 struct rte_crypto_aead_xform *aead_xform = NULL;
2796 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2800 PMD_INIT_FUNC_TRACE();
2802 memset(session, 0, sizeof(dpaa_sec_session));
2803 session->proto_alg = conf->protocol;
2804 session->ctxt = DPAA_SEC_IPSEC;
2806 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2807 session->dir = DIR_ENC;
2809 session->dir = DIR_DEC;
2811 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2812 cipher_xform = &conf->crypto_xform->cipher;
2813 if (conf->crypto_xform->next)
2814 auth_xform = &conf->crypto_xform->next->auth;
2815 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2816 ipsec_xform, session);
2817 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2818 auth_xform = &conf->crypto_xform->auth;
2819 if (conf->crypto_xform->next)
2820 cipher_xform = &conf->crypto_xform->next->cipher;
2821 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2822 ipsec_xform, session);
2823 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2824 aead_xform = &conf->crypto_xform->aead;
2825 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2826 ipsec_xform, session);
2828 DPAA_SEC_ERR("XFORM not specified");
2833 DPAA_SEC_ERR("Failed to process xform");
2837 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2838 if (ipsec_xform->tunnel.type ==
2839 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2840 session->ip4_hdr.ip_v = IPVERSION;
2841 session->ip4_hdr.ip_hl = 5;
2842 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2843 sizeof(session->ip4_hdr));
2844 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2845 session->ip4_hdr.ip_id = 0;
2846 session->ip4_hdr.ip_off = 0;
2847 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2848 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2849 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2850 IPPROTO_ESP : IPPROTO_AH;
2851 session->ip4_hdr.ip_sum = 0;
2852 session->ip4_hdr.ip_src =
2853 ipsec_xform->tunnel.ipv4.src_ip;
2854 session->ip4_hdr.ip_dst =
2855 ipsec_xform->tunnel.ipv4.dst_ip;
2856 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2857 (void *)&session->ip4_hdr,
2859 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2860 } else if (ipsec_xform->tunnel.type ==
2861 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2862 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2863 DPAA_IPv6_DEFAULT_VTC_FLOW |
2864 ((ipsec_xform->tunnel.ipv6.dscp <<
2865 RTE_IPV6_HDR_TC_SHIFT) &
2866 RTE_IPV6_HDR_TC_MASK) |
2867 ((ipsec_xform->tunnel.ipv6.flabel <<
2868 RTE_IPV6_HDR_FL_SHIFT) &
2869 RTE_IPV6_HDR_FL_MASK));
2870 /* Payload length will be updated by HW */
2871 session->ip6_hdr.payload_len = 0;
2872 session->ip6_hdr.hop_limits =
2873 ipsec_xform->tunnel.ipv6.hlimit;
2874 session->ip6_hdr.proto = (ipsec_xform->proto ==
2875 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2876 IPPROTO_ESP : IPPROTO_AH;
2877 memcpy(&session->ip6_hdr.src_addr,
2878 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2879 memcpy(&session->ip6_hdr.dst_addr,
2880 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2881 session->encap_pdb.ip_hdr_len =
2882 sizeof(struct rte_ipv6_hdr);
2884 session->encap_pdb.options =
2885 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2886 PDBOPTS_ESP_OIHI_PDB_INL |
2888 PDBHMO_ESP_ENCAP_DTTL |
2890 if (ipsec_xform->options.esn)
2891 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2892 session->encap_pdb.spi = ipsec_xform->spi;
2894 } else if (ipsec_xform->direction ==
2895 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2896 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2897 session->decap_pdb.options = sizeof(struct ip) << 16;
2899 session->decap_pdb.options =
2900 sizeof(struct rte_ipv6_hdr) << 16;
2901 if (ipsec_xform->options.esn)
2902 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2903 if (ipsec_xform->replay_win_sz) {
2905 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2914 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2917 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2920 session->decap_pdb.options |=
2926 rte_spinlock_lock(&internals->lock);
2927 for (i = 0; i < MAX_DPAA_CORES; i++) {
2928 session->inq[i] = dpaa_sec_attach_rxq(internals);
2929 if (session->inq[i] == NULL) {
2930 DPAA_SEC_ERR("unable to attach sec queue");
2931 rte_spinlock_unlock(&internals->lock);
2935 rte_spinlock_unlock(&internals->lock);
2939 free_session_data(session);
2944 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2945 struct rte_security_session_conf *conf,
2948 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2949 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2950 struct rte_crypto_auth_xform *auth_xform = NULL;
2951 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2952 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2953 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2957 PMD_INIT_FUNC_TRACE();
2959 memset(session, 0, sizeof(dpaa_sec_session));
2961 /* find xfrm types */
2962 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2963 cipher_xform = &xform->cipher;
2964 if (xform->next != NULL)
2965 auth_xform = &xform->next->auth;
2966 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2967 auth_xform = &xform->auth;
2968 if (xform->next != NULL)
2969 cipher_xform = &xform->next->cipher;
2971 DPAA_SEC_ERR("Invalid crypto type");
2975 session->proto_alg = conf->protocol;
2976 session->ctxt = DPAA_SEC_PDCP;
2979 switch (cipher_xform->algo) {
2980 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2981 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2983 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2984 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2986 case RTE_CRYPTO_CIPHER_AES_CTR:
2987 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2989 case RTE_CRYPTO_CIPHER_NULL:
2990 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2993 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2994 session->cipher_alg);
2998 session->cipher_key.data = rte_zmalloc(NULL,
2999 cipher_xform->key.length,
3000 RTE_CACHE_LINE_SIZE);
3001 if (session->cipher_key.data == NULL &&
3002 cipher_xform->key.length > 0) {
3003 DPAA_SEC_ERR("No Memory for cipher key");
3006 session->cipher_key.length = cipher_xform->key.length;
3007 memcpy(session->cipher_key.data, cipher_xform->key.data,
3008 cipher_xform->key.length);
3009 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3011 session->cipher_alg = cipher_xform->algo;
3013 session->cipher_key.data = NULL;
3014 session->cipher_key.length = 0;
3015 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3016 session->dir = DIR_ENC;
3019 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3020 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3021 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3023 "PDCP Seq Num size should be 5/12 bits for cmode");
3030 switch (auth_xform->algo) {
3031 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3032 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3034 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3035 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3037 case RTE_CRYPTO_AUTH_AES_CMAC:
3038 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3040 case RTE_CRYPTO_AUTH_NULL:
3041 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3044 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3046 rte_free(session->cipher_key.data);
3049 session->auth_key.data = rte_zmalloc(NULL,
3050 auth_xform->key.length,
3051 RTE_CACHE_LINE_SIZE);
3052 if (!session->auth_key.data &&
3053 auth_xform->key.length > 0) {
3054 DPAA_SEC_ERR("No Memory for auth key");
3055 rte_free(session->cipher_key.data);
3058 session->auth_key.length = auth_xform->key.length;
3059 memcpy(session->auth_key.data, auth_xform->key.data,
3060 auth_xform->key.length);
3061 session->auth_alg = auth_xform->algo;
3063 session->auth_key.data = NULL;
3064 session->auth_key.length = 0;
3065 session->auth_alg = 0;
3067 session->pdcp.domain = pdcp_xform->domain;
3068 session->pdcp.bearer = pdcp_xform->bearer;
3069 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3070 session->pdcp.sn_size = pdcp_xform->sn_size;
3071 session->pdcp.hfn = pdcp_xform->hfn;
3072 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3073 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3074 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3076 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3078 rte_spinlock_lock(&dev_priv->lock);
3079 for (i = 0; i < MAX_DPAA_CORES; i++) {
3080 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3081 if (session->inq[i] == NULL) {
3082 DPAA_SEC_ERR("unable to attach sec queue");
3083 rte_spinlock_unlock(&dev_priv->lock);
3088 rte_spinlock_unlock(&dev_priv->lock);
3091 rte_free(session->auth_key.data);
3092 rte_free(session->cipher_key.data);
3093 memset(session, 0, sizeof(dpaa_sec_session));
3098 dpaa_sec_security_session_create(void *dev,
3099 struct rte_security_session_conf *conf,
3100 struct rte_security_session *sess,
3101 struct rte_mempool *mempool)
3103 void *sess_private_data;
3104 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3107 if (rte_mempool_get(mempool, &sess_private_data)) {
3108 DPAA_SEC_ERR("Couldn't get object from session mempool");
3112 switch (conf->protocol) {
3113 case RTE_SECURITY_PROTOCOL_IPSEC:
3114 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3117 case RTE_SECURITY_PROTOCOL_PDCP:
3118 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3121 case RTE_SECURITY_PROTOCOL_MACSEC:
3127 DPAA_SEC_ERR("failed to configure session parameters");
3128 /* Return session to mempool */
3129 rte_mempool_put(mempool, sess_private_data);
3133 set_sec_session_private_data(sess, sess_private_data);
3138 /** Clear the memory of session so it doesn't leave key material behind */
3140 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3141 struct rte_security_session *sess)
3143 PMD_INIT_FUNC_TRACE();
3144 void *sess_priv = get_sec_session_private_data(sess);
3145 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3148 free_session_memory((struct rte_cryptodev *)dev, s);
3149 set_sec_session_private_data(sess, NULL);
3155 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3156 struct rte_cryptodev_config *config __rte_unused)
3158 PMD_INIT_FUNC_TRACE();
3164 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3166 PMD_INIT_FUNC_TRACE();
3171 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3173 PMD_INIT_FUNC_TRACE();
3177 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3179 PMD_INIT_FUNC_TRACE();
3188 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3189 struct rte_cryptodev_info *info)
3191 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3193 PMD_INIT_FUNC_TRACE();
3195 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3196 info->feature_flags = dev->feature_flags;
3197 info->capabilities = dpaa_sec_capabilities;
3198 info->sym.max_nb_sessions = internals->max_nb_sessions;
3199 info->driver_id = cryptodev_driver_id;
3203 static enum qman_cb_dqrr_result
3204 dpaa_sec_process_parallel_event(void *event,
3205 struct qman_portal *qm __always_unused,
3206 struct qman_fq *outq,
3207 const struct qm_dqrr_entry *dqrr,
3210 const struct qm_fd *fd;
3211 struct dpaa_sec_job *job;
3212 struct dpaa_sec_op_ctx *ctx;
3213 struct rte_event *ev = (struct rte_event *)event;
3217 /* sg is embedded in an op ctx,
3218 * sg[0] is for output
3221 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3223 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3224 ctx->fd_status = fd->status;
3225 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3226 struct qm_sg_entry *sg_out;
3229 sg_out = &job->sg[0];
3230 hw_sg_to_cpu(sg_out);
3231 len = sg_out->length;
3232 ctx->op->sym->m_src->pkt_len = len;
3233 ctx->op->sym->m_src->data_len = len;
3235 if (!ctx->fd_status) {
3236 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3238 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3239 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3241 ev->event_ptr = (void *)ctx->op;
3243 ev->flow_id = outq->ev.flow_id;
3244 ev->sub_event_type = outq->ev.sub_event_type;
3245 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3246 ev->op = RTE_EVENT_OP_NEW;
3247 ev->sched_type = outq->ev.sched_type;
3248 ev->queue_id = outq->ev.queue_id;
3249 ev->priority = outq->ev.priority;
3250 *bufs = (void *)ctx->op;
3252 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3254 return qman_cb_dqrr_consume;
3257 static enum qman_cb_dqrr_result
3258 dpaa_sec_process_atomic_event(void *event,
3259 struct qman_portal *qm __rte_unused,
3260 struct qman_fq *outq,
3261 const struct qm_dqrr_entry *dqrr,
3265 const struct qm_fd *fd;
3266 struct dpaa_sec_job *job;
3267 struct dpaa_sec_op_ctx *ctx;
3268 struct rte_event *ev = (struct rte_event *)event;
3272 /* sg is embedded in an op ctx,
3273 * sg[0] is for output
3276 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3278 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3279 ctx->fd_status = fd->status;
3280 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3281 struct qm_sg_entry *sg_out;
3284 sg_out = &job->sg[0];
3285 hw_sg_to_cpu(sg_out);
3286 len = sg_out->length;
3287 ctx->op->sym->m_src->pkt_len = len;
3288 ctx->op->sym->m_src->data_len = len;
3290 if (!ctx->fd_status) {
3291 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3293 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3294 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3296 ev->event_ptr = (void *)ctx->op;
3297 ev->flow_id = outq->ev.flow_id;
3298 ev->sub_event_type = outq->ev.sub_event_type;
3299 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3300 ev->op = RTE_EVENT_OP_NEW;
3301 ev->sched_type = outq->ev.sched_type;
3302 ev->queue_id = outq->ev.queue_id;
3303 ev->priority = outq->ev.priority;
3305 /* Save active dqrr entries */
3306 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3307 DPAA_PER_LCORE_DQRR_SIZE++;
3308 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3309 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3310 ev->impl_opaque = index + 1;
3311 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3312 *bufs = (void *)ctx->op;
3314 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3316 return qman_cb_dqrr_defer;
3320 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3323 const struct rte_event *event)
3325 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3326 struct qm_mcc_initfq opts = {0};
3330 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3331 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3332 opts.fqd.dest.channel = ch_id;
3334 switch (event->sched_type) {
3335 case RTE_SCHED_TYPE_ATOMIC:
3336 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3337 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3338 * configuration with HOLD_ACTIVE setting
3340 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3341 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3343 case RTE_SCHED_TYPE_ORDERED:
3344 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3347 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3348 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3352 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3353 if (unlikely(ret)) {
3354 DPAA_SEC_ERR("unable to init caam source fq!");
3358 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3364 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3367 struct qm_mcc_initfq opts = {0};
3369 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3371 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3372 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3373 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3374 qp->outq.cb.ern = ern_sec_fq_handler;
3375 qman_retire_fq(&qp->outq, NULL);
3376 qman_oos_fq(&qp->outq);
3377 ret = qman_init_fq(&qp->outq, 0, &opts);
3379 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3380 qp->outq.cb.dqrr = NULL;
3385 static struct rte_cryptodev_ops crypto_ops = {
3386 .dev_configure = dpaa_sec_dev_configure,
3387 .dev_start = dpaa_sec_dev_start,
3388 .dev_stop = dpaa_sec_dev_stop,
3389 .dev_close = dpaa_sec_dev_close,
3390 .dev_infos_get = dpaa_sec_dev_infos_get,
3391 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3392 .queue_pair_release = dpaa_sec_queue_pair_release,
3393 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3394 .sym_session_configure = dpaa_sec_sym_session_configure,
3395 .sym_session_clear = dpaa_sec_sym_session_clear
3398 #ifdef RTE_LIB_SECURITY
3399 static const struct rte_security_capability *
3400 dpaa_sec_capabilities_get(void *device __rte_unused)
3402 return dpaa_sec_security_cap;
3405 static const struct rte_security_ops dpaa_sec_security_ops = {
3406 .session_create = dpaa_sec_security_session_create,
3407 .session_update = NULL,
3408 .session_stats_get = NULL,
3409 .session_destroy = dpaa_sec_security_session_destroy,
3410 .set_pkt_metadata = NULL,
3411 .capabilities_get = dpaa_sec_capabilities_get
3415 dpaa_sec_uninit(struct rte_cryptodev *dev)
3417 struct dpaa_sec_dev_private *internals;
3422 internals = dev->data->dev_private;
3423 rte_free(dev->security_ctx);
3425 rte_free(internals);
3427 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3428 dev->data->name, rte_socket_id());
3434 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3436 struct dpaa_sec_dev_private *internals;
3437 #ifdef RTE_LIB_SECURITY
3438 struct rte_security_ctx *security_instance;
3440 struct dpaa_sec_qp *qp;
3444 PMD_INIT_FUNC_TRACE();
3446 cryptodev->driver_id = cryptodev_driver_id;
3447 cryptodev->dev_ops = &crypto_ops;
3449 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3450 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3451 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3452 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3453 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3454 RTE_CRYPTODEV_FF_SECURITY |
3455 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3456 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3457 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3458 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3459 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3461 internals = cryptodev->data->dev_private;
3462 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3463 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3466 * For secondary processes, we don't initialise any further as primary
3467 * has already done this work. Only check we don't need a different
3470 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3471 DPAA_SEC_WARN("Device already init by primary process");
3474 #ifdef RTE_LIB_SECURITY
3475 /* Initialize security_ctx only for primary process*/
3476 security_instance = rte_malloc("rte_security_instances_ops",
3477 sizeof(struct rte_security_ctx), 0);
3478 if (security_instance == NULL)
3480 security_instance->device = (void *)cryptodev;
3481 security_instance->ops = &dpaa_sec_security_ops;
3482 security_instance->sess_cnt = 0;
3483 cryptodev->security_ctx = security_instance;
3485 rte_spinlock_init(&internals->lock);
3486 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3487 /* init qman fq for queue pair */
3488 qp = &internals->qps[i];
3489 ret = dpaa_sec_init_tx(&qp->outq);
3491 DPAA_SEC_ERR("config tx of queue pair %d", i);
3496 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3497 QMAN_FQ_FLAG_TO_DCPORTAL;
3498 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3499 /* create rx qman fq for sessions*/
3500 ret = qman_create_fq(0, flags, &internals->inq[i]);
3501 if (unlikely(ret != 0)) {
3502 DPAA_SEC_ERR("sec qman_create_fq failed");
3507 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3511 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3513 rte_free(cryptodev->security_ctx);
3518 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3519 struct rte_dpaa_device *dpaa_dev)
3521 struct rte_cryptodev *cryptodev;
3522 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3526 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3528 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3529 if (cryptodev == NULL)
3532 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3533 cryptodev->data->dev_private = rte_zmalloc_socket(
3534 "cryptodev private structure",
3535 sizeof(struct dpaa_sec_dev_private),
3536 RTE_CACHE_LINE_SIZE,
3539 if (cryptodev->data->dev_private == NULL)
3540 rte_panic("Cannot allocate memzone for private "
3544 dpaa_dev->crypto_dev = cryptodev;
3545 cryptodev->device = &dpaa_dev->device;
3547 /* init user callbacks */
3548 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3550 /* if sec device version is not configured */
3551 if (!rta_get_sec_era()) {
3552 const struct device_node *caam_node;
3554 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3555 const uint32_t *prop = of_get_property(caam_node,
3560 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3566 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3567 retval = rte_dpaa_portal_init((void *)1);
3569 DPAA_SEC_ERR("Unable to initialize portal");
3574 /* Invoke PMD device initialization function */
3575 retval = dpaa_sec_dev_init(cryptodev);
3581 /* In case of error, cleanup is done */
3582 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3583 rte_free(cryptodev->data->dev_private);
3585 rte_cryptodev_pmd_release_device(cryptodev);
3591 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3593 struct rte_cryptodev *cryptodev;
3596 cryptodev = dpaa_dev->crypto_dev;
3597 if (cryptodev == NULL)
3600 ret = dpaa_sec_uninit(cryptodev);
3604 return rte_cryptodev_pmd_destroy(cryptodev);
3607 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3608 .drv_type = FSL_DPAA_CRYPTO,
3610 .name = "DPAA SEC PMD"
3612 .probe = cryptodev_dpaa_sec_probe,
3613 .remove = cryptodev_dpaa_sec_remove,
3616 static struct cryptodev_driver dpaa_sec_crypto_drv;
3618 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3619 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3620 cryptodev_driver_id);
3621 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);