1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
44 enum rta_sec_era rta_sec_era;
48 static uint8_t cryptodev_driver_id;
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
59 if (!ctx->fd_status) {
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
62 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(
74 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 dcbz_64(&ctx->job.sg[i]);
89 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
109 dpaa_mem_ptov(rte_iova_t paddr)
113 va = (void *)dpaax_iova_table_get_va(paddr);
117 return rte_mem_iova2virt(paddr);
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
123 const struct qm_mr_entry *msg)
125 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 /* initialize the queue with dest chan as caam chan so that
130 * all the packets in this queue could be dispatched into caam
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 struct qm_mcc_initfq fq_opts;
140 /* Clear FQ options */
141 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
143 flags = QMAN_INITFQ_FLAG_SCHED;
144 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 QM_INITFQ_WE_CONTEXTB;
147 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 fq_opts.fqd.context_b = fqid_out;
149 fq_opts.fqd.dest.channel = qm_channel_caam;
150 fq_opts.fqd.dest.wq = 0;
152 fq_in->cb.ern = ern_sec_fq_handler;
154 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
156 ret = qman_init_fq(fq_in, flags, &fq_opts);
157 if (unlikely(ret != 0))
158 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 struct qman_fq *fq __always_unused,
167 const struct qm_dqrr_entry *dqrr)
169 const struct qm_fd *fd;
170 struct dpaa_sec_job *job;
171 struct dpaa_sec_op_ctx *ctx;
173 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 return qman_cb_dqrr_defer;
176 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 return qman_cb_dqrr_consume;
180 /* sg is embedded in an op ctx,
181 * sg[0] is for output
184 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
186 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 ctx->fd_status = fd->status;
188 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 struct qm_sg_entry *sg_out;
191 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 ctx->op->sym->m_src : ctx->op->sym->m_dst;
194 sg_out = &job->sg[0];
195 hw_sg_to_cpu(sg_out);
196 len = sg_out->length;
198 while (mbuf->next != NULL) {
199 len -= mbuf->data_len;
202 mbuf->data_len = len;
204 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 dpaa_sec_op_ending(ctx);
207 return qman_cb_dqrr_consume;
210 /* caam result is put into this queue */
212 dpaa_sec_init_tx(struct qman_fq *fq)
215 struct qm_mcc_initfq opts;
218 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 QMAN_FQ_FLAG_DYNAMIC_FQID;
221 ret = qman_create_fq(0, flags, fq);
223 DPAA_SEC_ERR("qman_create_fq failed");
227 memset(&opts, 0, sizeof(opts));
228 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233 fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 fq->cb.ern = ern_sec_fq_handler;
236 ret = qman_init_fq(fq, 0, &opts);
238 DPAA_SEC_ERR("unable to init caam source fq!");
245 static inline int is_encode(dpaa_sec_session *ses)
247 return ses->dir == DIR_ENC;
250 static inline int is_decode(dpaa_sec_session *ses)
252 return ses->dir == DIR_DEC;
256 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
258 struct alginfo authdata = {0}, cipherdata = {0};
259 struct sec_cdb *cdb = &ses->cdb;
260 struct alginfo *p_authdata = NULL;
261 int32_t shared_desc_len = 0;
263 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
269 cipherdata.key = (size_t)ses->cipher_key.data;
270 cipherdata.keylen = ses->cipher_key.length;
271 cipherdata.key_enc_flags = 0;
272 cipherdata.key_type = RTA_DATA_IMM;
273 cipherdata.algtype = ses->cipher_key.alg;
274 cipherdata.algmode = ses->cipher_key.algmode;
276 cdb->sh_desc[0] = cipherdata.keylen;
281 authdata.key = (size_t)ses->auth_key.data;
282 authdata.keylen = ses->auth_key.length;
283 authdata.key_enc_flags = 0;
284 authdata.key_type = RTA_DATA_IMM;
285 authdata.algtype = ses->auth_key.alg;
286 authdata.algmode = ses->auth_key.algmode;
288 p_authdata = &authdata;
290 cdb->sh_desc[1] = authdata.keylen;
293 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
295 (unsigned int *)cdb->sh_desc,
296 &cdb->sh_desc[2], 2);
298 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
302 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
304 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
305 cipherdata.key_type = RTA_DATA_PTR;
307 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
309 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
310 authdata.key_type = RTA_DATA_PTR;
317 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
318 if (ses->dir == DIR_ENC)
319 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
320 cdb->sh_desc, 1, swap,
325 ses->pdcp.hfn_threshold,
326 &cipherdata, &authdata,
328 else if (ses->dir == DIR_DEC)
329 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
330 cdb->sh_desc, 1, swap,
335 ses->pdcp.hfn_threshold,
336 &cipherdata, &authdata,
339 if (ses->dir == DIR_ENC)
340 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
341 cdb->sh_desc, 1, swap,
346 ses->pdcp.hfn_threshold,
347 &cipherdata, p_authdata, 0);
348 else if (ses->dir == DIR_DEC)
349 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
350 cdb->sh_desc, 1, swap,
355 ses->pdcp.hfn_threshold,
356 &cipherdata, p_authdata, 0);
359 return shared_desc_len;
362 /* prepare ipsec proto command block of the session */
364 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
366 struct alginfo cipherdata = {0}, authdata = {0};
367 struct sec_cdb *cdb = &ses->cdb;
368 int32_t shared_desc_len = 0;
370 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
376 cipherdata.key = (size_t)ses->cipher_key.data;
377 cipherdata.keylen = ses->cipher_key.length;
378 cipherdata.key_enc_flags = 0;
379 cipherdata.key_type = RTA_DATA_IMM;
380 cipherdata.algtype = ses->cipher_key.alg;
381 cipherdata.algmode = ses->cipher_key.algmode;
383 authdata.key = (size_t)ses->auth_key.data;
384 authdata.keylen = ses->auth_key.length;
385 authdata.key_enc_flags = 0;
386 authdata.key_type = RTA_DATA_IMM;
387 authdata.algtype = ses->auth_key.alg;
388 authdata.algmode = ses->auth_key.algmode;
390 cdb->sh_desc[0] = cipherdata.keylen;
391 cdb->sh_desc[1] = authdata.keylen;
392 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
394 (unsigned int *)cdb->sh_desc,
395 &cdb->sh_desc[2], 2);
398 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
401 if (cdb->sh_desc[2] & 1)
402 cipherdata.key_type = RTA_DATA_IMM;
404 cipherdata.key = (size_t)dpaa_mem_vtop(
405 (void *)(size_t)cipherdata.key);
406 cipherdata.key_type = RTA_DATA_PTR;
408 if (cdb->sh_desc[2] & (1<<1))
409 authdata.key_type = RTA_DATA_IMM;
411 authdata.key = (size_t)dpaa_mem_vtop(
412 (void *)(size_t)authdata.key);
413 authdata.key_type = RTA_DATA_PTR;
419 if (ses->dir == DIR_ENC) {
420 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
422 true, swap, SHR_SERIAL,
424 (uint8_t *)&ses->ip4_hdr,
425 &cipherdata, &authdata);
426 } else if (ses->dir == DIR_DEC) {
427 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
429 true, swap, SHR_SERIAL,
431 &cipherdata, &authdata);
433 return shared_desc_len;
436 /* prepare command block of the session */
438 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
440 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
441 int32_t shared_desc_len = 0;
442 struct sec_cdb *cdb = &ses->cdb;
444 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
450 memset(cdb, 0, sizeof(struct sec_cdb));
454 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
457 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
459 case DPAA_SEC_CIPHER:
460 alginfo_c.key = (size_t)ses->cipher_key.data;
461 alginfo_c.keylen = ses->cipher_key.length;
462 alginfo_c.key_enc_flags = 0;
463 alginfo_c.key_type = RTA_DATA_IMM;
464 alginfo_c.algtype = ses->cipher_key.alg;
465 alginfo_c.algmode = ses->cipher_key.algmode;
467 switch (ses->cipher_alg) {
468 case RTE_CRYPTO_CIPHER_AES_CBC:
469 case RTE_CRYPTO_CIPHER_3DES_CBC:
470 case RTE_CRYPTO_CIPHER_AES_CTR:
471 case RTE_CRYPTO_CIPHER_3DES_CTR:
472 shared_desc_len = cnstr_shdsc_blkcipher(
474 swap, SHR_NEVER, &alginfo_c,
479 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
480 shared_desc_len = cnstr_shdsc_snow_f8(
481 cdb->sh_desc, true, swap,
485 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
486 shared_desc_len = cnstr_shdsc_zuce(
487 cdb->sh_desc, true, swap,
492 DPAA_SEC_ERR("unsupported cipher alg %d",
498 alginfo_a.key = (size_t)ses->auth_key.data;
499 alginfo_a.keylen = ses->auth_key.length;
500 alginfo_a.key_enc_flags = 0;
501 alginfo_a.key_type = RTA_DATA_IMM;
502 alginfo_a.algtype = ses->auth_key.alg;
503 alginfo_a.algmode = ses->auth_key.algmode;
504 switch (ses->auth_alg) {
505 case RTE_CRYPTO_AUTH_MD5_HMAC:
506 case RTE_CRYPTO_AUTH_SHA1_HMAC:
507 case RTE_CRYPTO_AUTH_SHA224_HMAC:
508 case RTE_CRYPTO_AUTH_SHA256_HMAC:
509 case RTE_CRYPTO_AUTH_SHA384_HMAC:
510 case RTE_CRYPTO_AUTH_SHA512_HMAC:
511 shared_desc_len = cnstr_shdsc_hmac(
513 swap, SHR_NEVER, &alginfo_a,
517 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
518 shared_desc_len = cnstr_shdsc_snow_f9(
519 cdb->sh_desc, true, swap,
524 case RTE_CRYPTO_AUTH_ZUC_EIA3:
525 shared_desc_len = cnstr_shdsc_zuca(
526 cdb->sh_desc, true, swap,
532 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
536 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
537 DPAA_SEC_ERR("not supported aead alg");
540 alginfo.key = (size_t)ses->aead_key.data;
541 alginfo.keylen = ses->aead_key.length;
542 alginfo.key_enc_flags = 0;
543 alginfo.key_type = RTA_DATA_IMM;
544 alginfo.algtype = ses->aead_key.alg;
545 alginfo.algmode = ses->aead_key.algmode;
547 if (ses->dir == DIR_ENC)
548 shared_desc_len = cnstr_shdsc_gcm_encap(
549 cdb->sh_desc, true, swap, SHR_NEVER,
554 shared_desc_len = cnstr_shdsc_gcm_decap(
555 cdb->sh_desc, true, swap, SHR_NEVER,
560 case DPAA_SEC_CIPHER_HASH:
561 alginfo_c.key = (size_t)ses->cipher_key.data;
562 alginfo_c.keylen = ses->cipher_key.length;
563 alginfo_c.key_enc_flags = 0;
564 alginfo_c.key_type = RTA_DATA_IMM;
565 alginfo_c.algtype = ses->cipher_key.alg;
566 alginfo_c.algmode = ses->cipher_key.algmode;
568 alginfo_a.key = (size_t)ses->auth_key.data;
569 alginfo_a.keylen = ses->auth_key.length;
570 alginfo_a.key_enc_flags = 0;
571 alginfo_a.key_type = RTA_DATA_IMM;
572 alginfo_a.algtype = ses->auth_key.alg;
573 alginfo_a.algmode = ses->auth_key.algmode;
575 cdb->sh_desc[0] = alginfo_c.keylen;
576 cdb->sh_desc[1] = alginfo_a.keylen;
577 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
579 (unsigned int *)cdb->sh_desc,
580 &cdb->sh_desc[2], 2);
583 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
586 if (cdb->sh_desc[2] & 1)
587 alginfo_c.key_type = RTA_DATA_IMM;
589 alginfo_c.key = (size_t)dpaa_mem_vtop(
590 (void *)(size_t)alginfo_c.key);
591 alginfo_c.key_type = RTA_DATA_PTR;
593 if (cdb->sh_desc[2] & (1<<1))
594 alginfo_a.key_type = RTA_DATA_IMM;
596 alginfo_a.key = (size_t)dpaa_mem_vtop(
597 (void *)(size_t)alginfo_a.key);
598 alginfo_a.key_type = RTA_DATA_PTR;
603 /* Auth_only_len is set as 0 here and it will be
604 * overwritten in fd for each packet.
606 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
607 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
609 ses->digest_length, ses->dir);
611 case DPAA_SEC_HASH_CIPHER:
613 DPAA_SEC_ERR("error: Unsupported session");
617 if (shared_desc_len < 0) {
618 DPAA_SEC_ERR("error in preparing command block");
619 return shared_desc_len;
622 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
623 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
624 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
629 /* qp is lockless, should be accessed by only one thread */
631 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
634 unsigned int pkts = 0;
635 int num_rx_bufs, ret;
636 struct qm_dqrr_entry *dq;
637 uint32_t vdqcr_flags = 0;
641 * Until request for four buffers, we provide exact number of buffers.
642 * Otherwise we do not set the QM_VDQCR_EXACT flag.
643 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
644 * requested, so we request two less in this case.
647 vdqcr_flags = QM_VDQCR_EXACT;
648 num_rx_bufs = nb_ops;
650 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
651 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
653 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
658 const struct qm_fd *fd;
659 struct dpaa_sec_job *job;
660 struct dpaa_sec_op_ctx *ctx;
661 struct rte_crypto_op *op;
663 dq = qman_dequeue(fq);
668 /* sg is embedded in an op ctx,
669 * sg[0] is for output
672 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
674 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
675 ctx->fd_status = fd->status;
677 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
678 struct qm_sg_entry *sg_out;
680 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
681 op->sym->m_src : op->sym->m_dst;
683 sg_out = &job->sg[0];
684 hw_sg_to_cpu(sg_out);
685 len = sg_out->length;
687 while (mbuf->next != NULL) {
688 len -= mbuf->data_len;
691 mbuf->data_len = len;
693 if (!ctx->fd_status) {
694 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
696 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
697 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
701 /* report op status to sym->op and then free the ctx memeory */
702 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
704 qman_dqrr_consume(fq, dq);
705 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
710 static inline struct dpaa_sec_job *
711 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
713 struct rte_crypto_sym_op *sym = op->sym;
714 struct rte_mbuf *mbuf = sym->m_src;
715 struct dpaa_sec_job *cf;
716 struct dpaa_sec_op_ctx *ctx;
717 struct qm_sg_entry *sg, *out_sg, *in_sg;
718 phys_addr_t start_addr;
719 uint8_t *old_digest, extra_segs;
720 int data_len, data_offset;
722 data_len = sym->auth.data.length;
723 data_offset = sym->auth.data.offset;
725 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
726 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
727 if ((data_len & 7) || (data_offset & 7)) {
728 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
732 data_len = data_len >> 3;
733 data_offset = data_offset >> 3;
741 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
742 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
746 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
752 old_digest = ctx->digest;
756 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
757 out_sg->length = ses->digest_length;
758 cpu_to_hw_sg(out_sg);
762 /* need to extend the input to a compound frame */
763 in_sg->extension = 1;
765 in_sg->length = data_len;
766 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
771 if (ses->iv.length) {
774 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
777 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
778 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
780 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
781 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
784 sg->length = ses->iv.length;
786 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
787 in_sg->length += sg->length;
792 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
793 sg->offset = data_offset;
795 if (data_len <= (mbuf->data_len - data_offset)) {
796 sg->length = data_len;
798 sg->length = mbuf->data_len - data_offset;
800 /* remaining i/p segs */
801 while ((data_len = data_len - sg->length) &&
802 (mbuf = mbuf->next)) {
805 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
806 if (data_len > mbuf->data_len)
807 sg->length = mbuf->data_len;
809 sg->length = data_len;
813 if (is_decode(ses)) {
814 /* Digest verification case */
817 rte_memcpy(old_digest, sym->auth.digest.data,
819 start_addr = dpaa_mem_vtop(old_digest);
820 qm_sg_entry_set64(sg, start_addr);
821 sg->length = ses->digest_length;
822 in_sg->length += ses->digest_length;
833 * |<----data_len------->|
834 * |ip_header|ah_header|icv|payload|
839 static inline struct dpaa_sec_job *
840 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
842 struct rte_crypto_sym_op *sym = op->sym;
843 struct rte_mbuf *mbuf = sym->m_src;
844 struct dpaa_sec_job *cf;
845 struct dpaa_sec_op_ctx *ctx;
846 struct qm_sg_entry *sg, *in_sg;
847 rte_iova_t start_addr;
849 int data_len, data_offset;
851 data_len = sym->auth.data.length;
852 data_offset = sym->auth.data.offset;
854 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
855 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
856 if ((data_len & 7) || (data_offset & 7)) {
857 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
861 data_len = data_len >> 3;
862 data_offset = data_offset >> 3;
865 ctx = dpaa_sec_alloc_ctx(ses, 4);
871 old_digest = ctx->digest;
873 start_addr = rte_pktmbuf_iova(mbuf);
876 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
877 sg->length = ses->digest_length;
882 /* need to extend the input to a compound frame */
883 in_sg->extension = 1;
885 in_sg->length = data_len;
886 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
889 if (ses->iv.length) {
892 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
895 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
896 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
898 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
899 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
902 sg->length = ses->iv.length;
904 qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
905 in_sg->length += sg->length;
910 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
911 sg->offset = data_offset;
912 sg->length = data_len;
914 if (is_decode(ses)) {
915 /* Digest verification case */
917 /* hash result or digest, save digest first */
918 rte_memcpy(old_digest, sym->auth.digest.data,
920 /* let's check digest by hw */
921 start_addr = dpaa_mem_vtop(old_digest);
923 qm_sg_entry_set64(sg, start_addr);
924 sg->length = ses->digest_length;
925 in_sg->length += ses->digest_length;
934 static inline struct dpaa_sec_job *
935 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
937 struct rte_crypto_sym_op *sym = op->sym;
938 struct dpaa_sec_job *cf;
939 struct dpaa_sec_op_ctx *ctx;
940 struct qm_sg_entry *sg, *out_sg, *in_sg;
941 struct rte_mbuf *mbuf;
943 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
945 int data_len, data_offset;
947 data_len = sym->cipher.data.length;
948 data_offset = sym->cipher.data.offset;
950 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
951 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
952 if ((data_len & 7) || (data_offset & 7)) {
953 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
957 data_len = data_len >> 3;
958 data_offset = data_offset >> 3;
963 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
966 req_segs = mbuf->nb_segs * 2 + 3;
968 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
969 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
974 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
983 out_sg->extension = 1;
984 out_sg->length = data_len;
985 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
986 cpu_to_hw_sg(out_sg);
990 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
991 sg->length = mbuf->data_len - data_offset;
992 sg->offset = data_offset;
994 /* Successive segs */
999 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1000 sg->length = mbuf->data_len;
1009 in_sg->extension = 1;
1011 in_sg->length = data_len + ses->iv.length;
1014 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1015 cpu_to_hw_sg(in_sg);
1018 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1019 sg->length = ses->iv.length;
1024 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1025 sg->length = mbuf->data_len - data_offset;
1026 sg->offset = data_offset;
1028 /* Successive segs */
1033 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1034 sg->length = mbuf->data_len;
1043 static inline struct dpaa_sec_job *
1044 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1046 struct rte_crypto_sym_op *sym = op->sym;
1047 struct dpaa_sec_job *cf;
1048 struct dpaa_sec_op_ctx *ctx;
1049 struct qm_sg_entry *sg;
1050 rte_iova_t src_start_addr, dst_start_addr;
1051 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1053 int data_len, data_offset;
1055 data_len = sym->cipher.data.length;
1056 data_offset = sym->cipher.data.offset;
1058 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1059 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1060 if ((data_len & 7) || (data_offset & 7)) {
1061 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1065 data_len = data_len >> 3;
1066 data_offset = data_offset >> 3;
1069 ctx = dpaa_sec_alloc_ctx(ses, 4);
1076 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1079 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1081 dst_start_addr = src_start_addr;
1085 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1086 sg->length = data_len + ses->iv.length;
1092 /* need to extend the input to a compound frame */
1095 sg->length = data_len + ses->iv.length;
1096 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1100 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1101 sg->length = ses->iv.length;
1105 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1106 sg->length = data_len;
1113 static inline struct dpaa_sec_job *
1114 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1116 struct rte_crypto_sym_op *sym = op->sym;
1117 struct dpaa_sec_job *cf;
1118 struct dpaa_sec_op_ctx *ctx;
1119 struct qm_sg_entry *sg, *out_sg, *in_sg;
1120 struct rte_mbuf *mbuf;
1122 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1127 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1130 req_segs = mbuf->nb_segs * 2 + 4;
1133 if (ses->auth_only_len)
1136 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1137 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1142 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1149 rte_prefetch0(cf->sg);
1152 out_sg = &cf->sg[0];
1153 out_sg->extension = 1;
1155 out_sg->length = sym->aead.data.length + ses->digest_length;
1157 out_sg->length = sym->aead.data.length;
1159 /* output sg entries */
1161 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1162 cpu_to_hw_sg(out_sg);
1165 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1166 sg->length = mbuf->data_len - sym->aead.data.offset;
1167 sg->offset = sym->aead.data.offset;
1169 /* Successive segs */
1174 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1175 sg->length = mbuf->data_len;
1178 sg->length -= ses->digest_length;
1180 if (is_encode(ses)) {
1182 /* set auth output */
1184 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1185 sg->length = ses->digest_length;
1193 in_sg->extension = 1;
1196 in_sg->length = ses->iv.length + sym->aead.data.length
1197 + ses->auth_only_len;
1199 in_sg->length = ses->iv.length + sym->aead.data.length
1200 + ses->auth_only_len + ses->digest_length;
1202 /* input sg entries */
1204 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1205 cpu_to_hw_sg(in_sg);
1208 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1209 sg->length = ses->iv.length;
1212 /* 2nd seg auth only */
1213 if (ses->auth_only_len) {
1215 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1216 sg->length = ses->auth_only_len;
1222 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1223 sg->length = mbuf->data_len - sym->aead.data.offset;
1224 sg->offset = sym->aead.data.offset;
1226 /* Successive segs */
1231 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1232 sg->length = mbuf->data_len;
1236 if (is_decode(ses)) {
1239 memcpy(ctx->digest, sym->aead.digest.data,
1240 ses->digest_length);
1241 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1242 sg->length = ses->digest_length;
1250 static inline struct dpaa_sec_job *
1251 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1253 struct rte_crypto_sym_op *sym = op->sym;
1254 struct dpaa_sec_job *cf;
1255 struct dpaa_sec_op_ctx *ctx;
1256 struct qm_sg_entry *sg;
1257 uint32_t length = 0;
1258 rte_iova_t src_start_addr, dst_start_addr;
1259 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1262 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1265 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1267 dst_start_addr = src_start_addr;
1269 ctx = dpaa_sec_alloc_ctx(ses, 7);
1277 rte_prefetch0(cf->sg);
1279 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1280 if (is_encode(ses)) {
1281 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1282 sg->length = ses->iv.length;
1283 length += sg->length;
1287 if (ses->auth_only_len) {
1288 qm_sg_entry_set64(sg,
1289 dpaa_mem_vtop(sym->aead.aad.data));
1290 sg->length = ses->auth_only_len;
1291 length += sg->length;
1295 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1296 sg->length = sym->aead.data.length;
1297 length += sg->length;
1301 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1302 sg->length = ses->iv.length;
1303 length += sg->length;
1307 if (ses->auth_only_len) {
1308 qm_sg_entry_set64(sg,
1309 dpaa_mem_vtop(sym->aead.aad.data));
1310 sg->length = ses->auth_only_len;
1311 length += sg->length;
1315 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1316 sg->length = sym->aead.data.length;
1317 length += sg->length;
1320 memcpy(ctx->digest, sym->aead.digest.data,
1321 ses->digest_length);
1324 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1325 sg->length = ses->digest_length;
1326 length += sg->length;
1330 /* input compound frame */
1331 cf->sg[1].length = length;
1332 cf->sg[1].extension = 1;
1333 cf->sg[1].final = 1;
1334 cpu_to_hw_sg(&cf->sg[1]);
1338 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1339 qm_sg_entry_set64(sg,
1340 dst_start_addr + sym->aead.data.offset);
1341 sg->length = sym->aead.data.length;
1342 length = sg->length;
1343 if (is_encode(ses)) {
1345 /* set auth output */
1347 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1348 sg->length = ses->digest_length;
1349 length += sg->length;
1354 /* output compound frame */
1355 cf->sg[0].length = length;
1356 cf->sg[0].extension = 1;
1357 cpu_to_hw_sg(&cf->sg[0]);
1362 static inline struct dpaa_sec_job *
1363 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1365 struct rte_crypto_sym_op *sym = op->sym;
1366 struct dpaa_sec_job *cf;
1367 struct dpaa_sec_op_ctx *ctx;
1368 struct qm_sg_entry *sg, *out_sg, *in_sg;
1369 struct rte_mbuf *mbuf;
1371 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1376 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1379 req_segs = mbuf->nb_segs * 2 + 4;
1382 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1383 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1388 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1395 rte_prefetch0(cf->sg);
1398 out_sg = &cf->sg[0];
1399 out_sg->extension = 1;
1401 out_sg->length = sym->auth.data.length + ses->digest_length;
1403 out_sg->length = sym->auth.data.length;
1405 /* output sg entries */
1407 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1408 cpu_to_hw_sg(out_sg);
1411 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1412 sg->length = mbuf->data_len - sym->auth.data.offset;
1413 sg->offset = sym->auth.data.offset;
1415 /* Successive segs */
1420 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1421 sg->length = mbuf->data_len;
1424 sg->length -= ses->digest_length;
1426 if (is_encode(ses)) {
1428 /* set auth output */
1430 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1431 sg->length = ses->digest_length;
1439 in_sg->extension = 1;
1442 in_sg->length = ses->iv.length + sym->auth.data.length;
1444 in_sg->length = ses->iv.length + sym->auth.data.length
1445 + ses->digest_length;
1447 /* input sg entries */
1449 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1450 cpu_to_hw_sg(in_sg);
1453 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1454 sg->length = ses->iv.length;
1459 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1460 sg->length = mbuf->data_len - sym->auth.data.offset;
1461 sg->offset = sym->auth.data.offset;
1463 /* Successive segs */
1468 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1469 sg->length = mbuf->data_len;
1473 sg->length -= ses->digest_length;
1474 if (is_decode(ses)) {
1477 memcpy(ctx->digest, sym->auth.digest.data,
1478 ses->digest_length);
1479 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1480 sg->length = ses->digest_length;
1488 static inline struct dpaa_sec_job *
1489 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1491 struct rte_crypto_sym_op *sym = op->sym;
1492 struct dpaa_sec_job *cf;
1493 struct dpaa_sec_op_ctx *ctx;
1494 struct qm_sg_entry *sg;
1495 rte_iova_t src_start_addr, dst_start_addr;
1496 uint32_t length = 0;
1497 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1500 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1502 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1504 dst_start_addr = src_start_addr;
1506 ctx = dpaa_sec_alloc_ctx(ses, 7);
1514 rte_prefetch0(cf->sg);
1516 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1517 if (is_encode(ses)) {
1518 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1519 sg->length = ses->iv.length;
1520 length += sg->length;
1524 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1525 sg->length = sym->auth.data.length;
1526 length += sg->length;
1530 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1531 sg->length = ses->iv.length;
1532 length += sg->length;
1537 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1538 sg->length = sym->auth.data.length;
1539 length += sg->length;
1542 memcpy(ctx->digest, sym->auth.digest.data,
1543 ses->digest_length);
1546 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1547 sg->length = ses->digest_length;
1548 length += sg->length;
1552 /* input compound frame */
1553 cf->sg[1].length = length;
1554 cf->sg[1].extension = 1;
1555 cf->sg[1].final = 1;
1556 cpu_to_hw_sg(&cf->sg[1]);
1560 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1561 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1562 sg->length = sym->cipher.data.length;
1563 length = sg->length;
1564 if (is_encode(ses)) {
1566 /* set auth output */
1568 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1569 sg->length = ses->digest_length;
1570 length += sg->length;
1575 /* output compound frame */
1576 cf->sg[0].length = length;
1577 cf->sg[0].extension = 1;
1578 cpu_to_hw_sg(&cf->sg[0]);
1583 static inline struct dpaa_sec_job *
1584 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1586 struct rte_crypto_sym_op *sym = op->sym;
1587 struct dpaa_sec_job *cf;
1588 struct dpaa_sec_op_ctx *ctx;
1589 struct qm_sg_entry *sg;
1590 phys_addr_t src_start_addr, dst_start_addr;
1592 ctx = dpaa_sec_alloc_ctx(ses, 2);
1598 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1601 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1603 dst_start_addr = src_start_addr;
1607 qm_sg_entry_set64(sg, src_start_addr);
1608 sg->length = sym->m_src->pkt_len;
1612 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1615 qm_sg_entry_set64(sg, dst_start_addr);
1616 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1622 static inline struct dpaa_sec_job *
1623 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1625 struct rte_crypto_sym_op *sym = op->sym;
1626 struct dpaa_sec_job *cf;
1627 struct dpaa_sec_op_ctx *ctx;
1628 struct qm_sg_entry *sg, *out_sg, *in_sg;
1629 struct rte_mbuf *mbuf;
1631 uint32_t in_len = 0, out_len = 0;
1638 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1639 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1640 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1645 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1651 out_sg = &cf->sg[0];
1652 out_sg->extension = 1;
1653 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1657 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1660 /* Successive segs */
1661 while (mbuf->next) {
1662 sg->length = mbuf->data_len;
1663 out_len += sg->length;
1667 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1670 sg->length = mbuf->buf_len - mbuf->data_off;
1671 out_len += sg->length;
1675 out_sg->length = out_len;
1676 cpu_to_hw_sg(out_sg);
1681 in_sg->extension = 1;
1683 in_len = mbuf->data_len;
1686 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1689 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1690 sg->length = mbuf->data_len;
1693 /* Successive segs */
1698 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1699 sg->length = mbuf->data_len;
1701 in_len += sg->length;
1707 in_sg->length = in_len;
1708 cpu_to_hw_sg(in_sg);
1710 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1716 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1719 /* Function to transmit the frames to given device and queuepair */
1721 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1722 uint16_t num_tx = 0;
1723 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1724 uint32_t frames_to_send;
1725 struct rte_crypto_op *op;
1726 struct dpaa_sec_job *cf;
1727 dpaa_sec_session *ses;
1728 uint16_t auth_hdr_len, auth_tail_len;
1729 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1730 struct qman_fq *inq[DPAA_SEC_BURST];
1733 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1734 DPAA_SEC_BURST : nb_ops;
1735 for (loop = 0; loop < frames_to_send; loop++) {
1737 if (op->sym->m_src->seqn != 0) {
1738 index = op->sym->m_src->seqn - 1;
1739 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1740 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1741 flags[loop] = ((index & 0x0f) << 8);
1742 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1743 DPAA_PER_LCORE_DQRR_SIZE--;
1744 DPAA_PER_LCORE_DQRR_HELD &=
1749 switch (op->sess_type) {
1750 case RTE_CRYPTO_OP_WITH_SESSION:
1751 ses = (dpaa_sec_session *)
1752 get_sym_session_private_data(
1754 cryptodev_driver_id);
1756 case RTE_CRYPTO_OP_SECURITY_SESSION:
1757 ses = (dpaa_sec_session *)
1758 get_sec_session_private_data(
1759 op->sym->sec_session);
1763 "sessionless crypto op not supported");
1764 frames_to_send = loop;
1768 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1769 if (dpaa_sec_attach_sess_q(qp, ses)) {
1770 frames_to_send = loop;
1774 } else if (unlikely(ses->qp[rte_lcore_id() %
1775 MAX_DPAA_CORES] != qp)) {
1776 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1778 ses->qp[rte_lcore_id() %
1779 MAX_DPAA_CORES], qp);
1780 frames_to_send = loop;
1785 auth_hdr_len = op->sym->auth.data.length -
1786 op->sym->cipher.data.length;
1789 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1790 ((op->sym->m_dst == NULL) ||
1791 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1792 switch (ses->ctxt) {
1794 case DPAA_SEC_IPSEC:
1795 cf = build_proto(op, ses);
1798 cf = build_auth_only(op, ses);
1800 case DPAA_SEC_CIPHER:
1801 cf = build_cipher_only(op, ses);
1804 cf = build_cipher_auth_gcm(op, ses);
1805 auth_hdr_len = ses->auth_only_len;
1807 case DPAA_SEC_CIPHER_HASH:
1809 op->sym->cipher.data.offset
1810 - op->sym->auth.data.offset;
1812 op->sym->auth.data.length
1813 - op->sym->cipher.data.length
1815 cf = build_cipher_auth(op, ses);
1818 DPAA_SEC_DP_ERR("not supported ops");
1819 frames_to_send = loop;
1824 switch (ses->ctxt) {
1826 case DPAA_SEC_IPSEC:
1827 cf = build_proto_sg(op, ses);
1830 cf = build_auth_only_sg(op, ses);
1832 case DPAA_SEC_CIPHER:
1833 cf = build_cipher_only_sg(op, ses);
1836 cf = build_cipher_auth_gcm_sg(op, ses);
1837 auth_hdr_len = ses->auth_only_len;
1839 case DPAA_SEC_CIPHER_HASH:
1841 op->sym->cipher.data.offset
1842 - op->sym->auth.data.offset;
1844 op->sym->auth.data.length
1845 - op->sym->cipher.data.length
1847 cf = build_cipher_auth_sg(op, ses);
1850 DPAA_SEC_DP_ERR("not supported ops");
1851 frames_to_send = loop;
1856 if (unlikely(!cf)) {
1857 frames_to_send = loop;
1863 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1864 fd->opaque_addr = 0;
1866 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1867 fd->_format1 = qm_fd_compound;
1868 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1870 /* Auth_only_len is set as 0 in descriptor and it is
1871 * overwritten here in the fd.cmd which will update
1874 if (auth_hdr_len || auth_tail_len) {
1875 fd->cmd = 0x80000000;
1877 ((auth_tail_len << 16) | auth_hdr_len);
1880 /* In case of PDCP, per packet HFN is stored in
1881 * mbuf priv after sym_op.
1883 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1884 fd->cmd = 0x80000000 |
1885 *((uint32_t *)((uint8_t *)op +
1886 ses->pdcp.hfn_ovd_offset));
1887 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1888 *((uint32_t *)((uint8_t *)op +
1889 ses->pdcp.hfn_ovd_offset)),
1896 while (loop < frames_to_send) {
1897 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1898 &flags[loop], frames_to_send - loop);
1900 nb_ops -= frames_to_send;
1901 num_tx += frames_to_send;
1904 dpaa_qp->tx_pkts += num_tx;
1905 dpaa_qp->tx_errs += nb_ops - num_tx;
1911 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1915 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1917 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1919 dpaa_qp->rx_pkts += num_rx;
1920 dpaa_qp->rx_errs += nb_ops - num_rx;
1922 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1927 /** Release queue pair */
1929 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1932 struct dpaa_sec_dev_private *internals;
1933 struct dpaa_sec_qp *qp = NULL;
1935 PMD_INIT_FUNC_TRACE();
1937 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1939 internals = dev->data->dev_private;
1940 if (qp_id >= internals->max_nb_queue_pairs) {
1941 DPAA_SEC_ERR("Max supported qpid %d",
1942 internals->max_nb_queue_pairs);
1946 qp = &internals->qps[qp_id];
1947 rte_mempool_free(qp->ctx_pool);
1948 qp->internals = NULL;
1949 dev->data->queue_pairs[qp_id] = NULL;
1954 /** Setup a queue pair */
1956 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1957 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1958 __rte_unused int socket_id)
1960 struct dpaa_sec_dev_private *internals;
1961 struct dpaa_sec_qp *qp = NULL;
1964 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1966 internals = dev->data->dev_private;
1967 if (qp_id >= internals->max_nb_queue_pairs) {
1968 DPAA_SEC_ERR("Max supported qpid %d",
1969 internals->max_nb_queue_pairs);
1973 qp = &internals->qps[qp_id];
1974 qp->internals = internals;
1975 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1976 dev->data->dev_id, qp_id);
1977 if (!qp->ctx_pool) {
1978 qp->ctx_pool = rte_mempool_create((const char *)str,
1981 CTX_POOL_CACHE_SIZE, 0,
1982 NULL, NULL, NULL, NULL,
1984 if (!qp->ctx_pool) {
1985 DPAA_SEC_ERR("%s create failed\n", str);
1989 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1990 dev->data->dev_id, qp_id);
1991 dev->data->queue_pairs[qp_id] = qp;
1996 /** Return the number of allocated queue pairs */
1998 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
2000 PMD_INIT_FUNC_TRACE();
2002 return dev->data->nb_queue_pairs;
2005 /** Returns the size of session structure */
2007 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2009 PMD_INIT_FUNC_TRACE();
2011 return sizeof(dpaa_sec_session);
2015 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2016 struct rte_crypto_sym_xform *xform,
2017 dpaa_sec_session *session)
2019 session->cipher_alg = xform->cipher.algo;
2020 session->iv.length = xform->cipher.iv.length;
2021 session->iv.offset = xform->cipher.iv.offset;
2022 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2023 RTE_CACHE_LINE_SIZE);
2024 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2025 DPAA_SEC_ERR("No Memory for cipher key");
2028 session->cipher_key.length = xform->cipher.key.length;
2030 memcpy(session->cipher_key.data, xform->cipher.key.data,
2031 xform->cipher.key.length);
2032 switch (xform->cipher.algo) {
2033 case RTE_CRYPTO_CIPHER_AES_CBC:
2034 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2035 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2037 case RTE_CRYPTO_CIPHER_3DES_CBC:
2038 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2039 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2041 case RTE_CRYPTO_CIPHER_AES_CTR:
2042 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2043 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2045 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2046 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2048 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2049 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2052 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2053 xform->cipher.algo);
2054 rte_free(session->cipher_key.data);
2057 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2064 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2065 struct rte_crypto_sym_xform *xform,
2066 dpaa_sec_session *session)
2068 session->auth_alg = xform->auth.algo;
2069 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2070 RTE_CACHE_LINE_SIZE);
2071 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2072 DPAA_SEC_ERR("No Memory for auth key");
2075 session->auth_key.length = xform->auth.key.length;
2076 session->digest_length = xform->auth.digest_length;
2077 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2078 session->iv.offset = xform->auth.iv.offset;
2079 session->iv.length = xform->auth.iv.length;
2082 memcpy(session->auth_key.data, xform->auth.key.data,
2083 xform->auth.key.length);
2085 switch (xform->auth.algo) {
2086 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2087 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2088 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2090 case RTE_CRYPTO_AUTH_MD5_HMAC:
2091 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2092 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2094 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2095 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2096 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2098 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2099 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2100 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2102 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2103 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2104 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2106 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2107 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2108 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2110 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2111 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2112 session->auth_key.algmode = OP_ALG_AAI_F9;
2114 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2115 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2116 session->auth_key.algmode = OP_ALG_AAI_F9;
2119 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2121 rte_free(session->auth_key.data);
2125 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2132 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2133 struct rte_crypto_sym_xform *xform,
2134 dpaa_sec_session *session)
2137 struct rte_crypto_cipher_xform *cipher_xform;
2138 struct rte_crypto_auth_xform *auth_xform;
2140 if (session->auth_cipher_text) {
2141 cipher_xform = &xform->cipher;
2142 auth_xform = &xform->next->auth;
2144 cipher_xform = &xform->next->cipher;
2145 auth_xform = &xform->auth;
2148 /* Set IV parameters */
2149 session->iv.offset = cipher_xform->iv.offset;
2150 session->iv.length = cipher_xform->iv.length;
2152 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2153 RTE_CACHE_LINE_SIZE);
2154 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2155 DPAA_SEC_ERR("No Memory for cipher key");
2158 session->cipher_key.length = cipher_xform->key.length;
2159 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2160 RTE_CACHE_LINE_SIZE);
2161 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2162 DPAA_SEC_ERR("No Memory for auth key");
2163 rte_free(session->cipher_key.data);
2166 session->auth_key.length = auth_xform->key.length;
2167 memcpy(session->cipher_key.data, cipher_xform->key.data,
2168 cipher_xform->key.length);
2169 memcpy(session->auth_key.data, auth_xform->key.data,
2170 auth_xform->key.length);
2172 session->digest_length = auth_xform->digest_length;
2173 session->auth_alg = auth_xform->algo;
2175 switch (auth_xform->algo) {
2176 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2177 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2178 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2180 case RTE_CRYPTO_AUTH_MD5_HMAC:
2181 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2182 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2184 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2185 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2186 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2188 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2189 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2190 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2192 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2193 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2194 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2196 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2197 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2198 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2201 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2206 session->cipher_alg = cipher_xform->algo;
2208 switch (cipher_xform->algo) {
2209 case RTE_CRYPTO_CIPHER_AES_CBC:
2210 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2211 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2213 case RTE_CRYPTO_CIPHER_3DES_CBC:
2214 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2215 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2217 case RTE_CRYPTO_CIPHER_AES_CTR:
2218 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2219 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2222 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2223 cipher_xform->algo);
2226 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2231 rte_free(session->cipher_key.data);
2232 rte_free(session->auth_key.data);
2237 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2238 struct rte_crypto_sym_xform *xform,
2239 dpaa_sec_session *session)
2241 session->aead_alg = xform->aead.algo;
2242 session->ctxt = DPAA_SEC_AEAD;
2243 session->iv.length = xform->aead.iv.length;
2244 session->iv.offset = xform->aead.iv.offset;
2245 session->auth_only_len = xform->aead.aad_length;
2246 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2247 RTE_CACHE_LINE_SIZE);
2248 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2249 DPAA_SEC_ERR("No Memory for aead key\n");
2252 session->aead_key.length = xform->aead.key.length;
2253 session->digest_length = xform->aead.digest_length;
2255 memcpy(session->aead_key.data, xform->aead.key.data,
2256 xform->aead.key.length);
2258 switch (session->aead_alg) {
2259 case RTE_CRYPTO_AEAD_AES_GCM:
2260 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2261 session->aead_key.algmode = OP_ALG_AAI_GCM;
2264 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2265 rte_free(session->aead_key.data);
2269 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2275 static struct qman_fq *
2276 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2280 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2281 if (qi->inq_attach[i] == 0) {
2282 qi->inq_attach[i] = 1;
2286 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2292 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2296 for (i = 0; i < qi->max_nb_sessions; i++) {
2297 if (&qi->inq[i] == fq) {
2298 qman_retire_fq(fq, NULL);
2300 qi->inq_attach[i] = 0;
2308 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2312 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2313 ret = dpaa_sec_prep_cdb(sess);
2315 DPAA_SEC_ERR("Unable to prepare sec cdb");
2318 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2319 ret = rte_dpaa_portal_init((void *)0);
2321 DPAA_SEC_ERR("Failure in affining portal");
2325 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2326 dpaa_mem_vtop(&sess->cdb),
2327 qman_fq_fqid(&qp->outq));
2329 DPAA_SEC_ERR("Unable to init sec queue");
2335 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2336 struct rte_crypto_sym_xform *xform, void *sess)
2338 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2339 dpaa_sec_session *session = sess;
2342 PMD_INIT_FUNC_TRACE();
2344 if (unlikely(sess == NULL)) {
2345 DPAA_SEC_ERR("invalid session struct");
2348 memset(session, 0, sizeof(dpaa_sec_session));
2350 /* Default IV length = 0 */
2351 session->iv.length = 0;
2354 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2355 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2356 session->ctxt = DPAA_SEC_CIPHER;
2357 dpaa_sec_cipher_init(dev, xform, session);
2359 /* Authentication Only */
2360 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2361 xform->next == NULL) {
2362 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2363 session->ctxt = DPAA_SEC_AUTH;
2364 dpaa_sec_auth_init(dev, xform, session);
2366 /* Cipher then Authenticate */
2367 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2368 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2369 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2370 session->ctxt = DPAA_SEC_CIPHER_HASH;
2371 session->auth_cipher_text = 1;
2372 dpaa_sec_chain_init(dev, xform, session);
2374 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2377 /* Authenticate then Cipher */
2378 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2379 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2380 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2381 session->ctxt = DPAA_SEC_CIPHER_HASH;
2382 session->auth_cipher_text = 0;
2383 dpaa_sec_chain_init(dev, xform, session);
2385 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2389 /* AEAD operation for AES-GCM kind of Algorithms */
2390 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2391 xform->next == NULL) {
2392 dpaa_sec_aead_init(dev, xform, session);
2395 DPAA_SEC_ERR("Invalid crypto type");
2398 rte_spinlock_lock(&internals->lock);
2399 for (i = 0; i < MAX_DPAA_CORES; i++) {
2400 session->inq[i] = dpaa_sec_attach_rxq(internals);
2401 if (session->inq[i] == NULL) {
2402 DPAA_SEC_ERR("unable to attach sec queue");
2403 rte_spinlock_unlock(&internals->lock);
2407 rte_spinlock_unlock(&internals->lock);
2412 rte_free(session->cipher_key.data);
2413 rte_free(session->auth_key.data);
2414 memset(session, 0, sizeof(dpaa_sec_session));
2420 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2421 struct rte_crypto_sym_xform *xform,
2422 struct rte_cryptodev_sym_session *sess,
2423 struct rte_mempool *mempool)
2425 void *sess_private_data;
2428 PMD_INIT_FUNC_TRACE();
2430 if (rte_mempool_get(mempool, &sess_private_data)) {
2431 DPAA_SEC_ERR("Couldn't get object from session mempool");
2435 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2437 DPAA_SEC_ERR("failed to configure session parameters");
2439 /* Return session to mempool */
2440 rte_mempool_put(mempool, sess_private_data);
2444 set_sym_session_private_data(sess, dev->driver_id,
2452 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2454 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2455 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2458 for (i = 0; i < MAX_DPAA_CORES; i++) {
2460 dpaa_sec_detach_rxq(qi, s->inq[i]);
2464 rte_free(s->cipher_key.data);
2465 rte_free(s->auth_key.data);
2466 memset(s, 0, sizeof(dpaa_sec_session));
2467 rte_mempool_put(sess_mp, (void *)s);
2470 /** Clear the memory of session so it doesn't leave key material behind */
2472 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2473 struct rte_cryptodev_sym_session *sess)
2475 PMD_INIT_FUNC_TRACE();
2476 uint8_t index = dev->driver_id;
2477 void *sess_priv = get_sym_session_private_data(sess, index);
2478 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2481 free_session_memory(dev, s);
2482 set_sym_session_private_data(sess, index, NULL);
2487 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2488 struct rte_security_session_conf *conf,
2491 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2492 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2493 struct rte_crypto_auth_xform *auth_xform = NULL;
2494 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2495 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2498 PMD_INIT_FUNC_TRACE();
2500 memset(session, 0, sizeof(dpaa_sec_session));
2501 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2502 cipher_xform = &conf->crypto_xform->cipher;
2503 if (conf->crypto_xform->next)
2504 auth_xform = &conf->crypto_xform->next->auth;
2506 auth_xform = &conf->crypto_xform->auth;
2507 if (conf->crypto_xform->next)
2508 cipher_xform = &conf->crypto_xform->next->cipher;
2510 session->proto_alg = conf->protocol;
2511 session->ctxt = DPAA_SEC_IPSEC;
2513 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2514 session->cipher_key.data = rte_zmalloc(NULL,
2515 cipher_xform->key.length,
2516 RTE_CACHE_LINE_SIZE);
2517 if (session->cipher_key.data == NULL &&
2518 cipher_xform->key.length > 0) {
2519 DPAA_SEC_ERR("No Memory for cipher key");
2522 memcpy(session->cipher_key.data, cipher_xform->key.data,
2523 cipher_xform->key.length);
2524 session->cipher_key.length = cipher_xform->key.length;
2526 switch (cipher_xform->algo) {
2527 case RTE_CRYPTO_CIPHER_NULL:
2528 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2530 case RTE_CRYPTO_CIPHER_AES_CBC:
2531 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2532 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2534 case RTE_CRYPTO_CIPHER_3DES_CBC:
2535 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2536 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2538 case RTE_CRYPTO_CIPHER_AES_CTR:
2539 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2540 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2543 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2544 cipher_xform->algo);
2547 session->cipher_alg = cipher_xform->algo;
2549 session->cipher_key.data = NULL;
2550 session->cipher_key.length = 0;
2551 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2554 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2555 session->auth_key.data = rte_zmalloc(NULL,
2556 auth_xform->key.length,
2557 RTE_CACHE_LINE_SIZE);
2558 if (session->auth_key.data == NULL &&
2559 auth_xform->key.length > 0) {
2560 DPAA_SEC_ERR("No Memory for auth key");
2561 rte_free(session->cipher_key.data);
2564 memcpy(session->auth_key.data, auth_xform->key.data,
2565 auth_xform->key.length);
2566 session->auth_key.length = auth_xform->key.length;
2568 switch (auth_xform->algo) {
2569 case RTE_CRYPTO_AUTH_NULL:
2570 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2571 session->digest_length = 0;
2573 case RTE_CRYPTO_AUTH_MD5_HMAC:
2574 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2575 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2577 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2578 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2579 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2581 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2582 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160;
2583 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2585 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2586 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2587 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2589 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2590 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2591 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2593 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2594 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2595 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2598 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2602 session->auth_alg = auth_xform->algo;
2604 session->auth_key.data = NULL;
2605 session->auth_key.length = 0;
2606 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2609 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2610 if (ipsec_xform->tunnel.type ==
2611 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2612 memset(&session->encap_pdb, 0,
2613 sizeof(struct ipsec_encap_pdb) +
2614 sizeof(session->ip4_hdr));
2615 session->ip4_hdr.ip_v = IPVERSION;
2616 session->ip4_hdr.ip_hl = 5;
2617 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2618 sizeof(session->ip4_hdr));
2619 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2620 session->ip4_hdr.ip_id = 0;
2621 session->ip4_hdr.ip_off = 0;
2622 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2623 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2624 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2625 IPPROTO_ESP : IPPROTO_AH;
2626 session->ip4_hdr.ip_sum = 0;
2627 session->ip4_hdr.ip_src =
2628 ipsec_xform->tunnel.ipv4.src_ip;
2629 session->ip4_hdr.ip_dst =
2630 ipsec_xform->tunnel.ipv4.dst_ip;
2631 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2632 (void *)&session->ip4_hdr,
2634 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2635 } else if (ipsec_xform->tunnel.type ==
2636 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2637 memset(&session->encap_pdb, 0,
2638 sizeof(struct ipsec_encap_pdb) +
2639 sizeof(session->ip6_hdr));
2640 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2641 DPAA_IPv6_DEFAULT_VTC_FLOW |
2642 ((ipsec_xform->tunnel.ipv6.dscp <<
2643 RTE_IPV6_HDR_TC_SHIFT) &
2644 RTE_IPV6_HDR_TC_MASK) |
2645 ((ipsec_xform->tunnel.ipv6.flabel <<
2646 RTE_IPV6_HDR_FL_SHIFT) &
2647 RTE_IPV6_HDR_FL_MASK));
2648 /* Payload length will be updated by HW */
2649 session->ip6_hdr.payload_len = 0;
2650 session->ip6_hdr.hop_limits =
2651 ipsec_xform->tunnel.ipv6.hlimit;
2652 session->ip6_hdr.proto = (ipsec_xform->proto ==
2653 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2654 IPPROTO_ESP : IPPROTO_AH;
2655 memcpy(&session->ip6_hdr.src_addr,
2656 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2657 memcpy(&session->ip6_hdr.dst_addr,
2658 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2659 session->encap_pdb.ip_hdr_len =
2660 sizeof(struct rte_ipv6_hdr);
2662 session->encap_pdb.options =
2663 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2664 PDBOPTS_ESP_OIHI_PDB_INL |
2666 PDBHMO_ESP_ENCAP_DTTL |
2668 if (ipsec_xform->options.esn)
2669 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2670 session->encap_pdb.spi = ipsec_xform->spi;
2671 session->dir = DIR_ENC;
2672 } else if (ipsec_xform->direction ==
2673 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2674 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2675 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2676 session->decap_pdb.options = sizeof(struct ip) << 16;
2678 session->decap_pdb.options =
2679 sizeof(struct rte_ipv6_hdr) << 16;
2680 if (ipsec_xform->options.esn)
2681 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2682 session->dir = DIR_DEC;
2685 rte_spinlock_lock(&internals->lock);
2686 for (i = 0; i < MAX_DPAA_CORES; i++) {
2687 session->inq[i] = dpaa_sec_attach_rxq(internals);
2688 if (session->inq[i] == NULL) {
2689 DPAA_SEC_ERR("unable to attach sec queue");
2690 rte_spinlock_unlock(&internals->lock);
2694 rte_spinlock_unlock(&internals->lock);
2698 rte_free(session->auth_key.data);
2699 rte_free(session->cipher_key.data);
2700 memset(session, 0, sizeof(dpaa_sec_session));
2705 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2706 struct rte_security_session_conf *conf,
2709 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2710 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2711 struct rte_crypto_auth_xform *auth_xform = NULL;
2712 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2713 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2714 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2717 PMD_INIT_FUNC_TRACE();
2719 memset(session, 0, sizeof(dpaa_sec_session));
2721 /* find xfrm types */
2722 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2723 cipher_xform = &xform->cipher;
2724 if (xform->next != NULL)
2725 auth_xform = &xform->next->auth;
2726 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2727 auth_xform = &xform->auth;
2728 if (xform->next != NULL)
2729 cipher_xform = &xform->next->cipher;
2731 DPAA_SEC_ERR("Invalid crypto type");
2735 session->proto_alg = conf->protocol;
2736 session->ctxt = DPAA_SEC_PDCP;
2739 switch (cipher_xform->algo) {
2740 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2741 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2743 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2744 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2746 case RTE_CRYPTO_CIPHER_AES_CTR:
2747 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2749 case RTE_CRYPTO_CIPHER_NULL:
2750 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2753 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2754 session->cipher_alg);
2758 session->cipher_key.data = rte_zmalloc(NULL,
2759 cipher_xform->key.length,
2760 RTE_CACHE_LINE_SIZE);
2761 if (session->cipher_key.data == NULL &&
2762 cipher_xform->key.length > 0) {
2763 DPAA_SEC_ERR("No Memory for cipher key");
2766 session->cipher_key.length = cipher_xform->key.length;
2767 memcpy(session->cipher_key.data, cipher_xform->key.data,
2768 cipher_xform->key.length);
2769 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2771 session->cipher_alg = cipher_xform->algo;
2773 session->cipher_key.data = NULL;
2774 session->cipher_key.length = 0;
2775 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2776 session->dir = DIR_ENC;
2779 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2780 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2781 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2783 "PDCP Seq Num size should be 5/12 bits for cmode");
2789 switch (auth_xform->algo) {
2790 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2791 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2793 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2794 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2796 case RTE_CRYPTO_AUTH_AES_CMAC:
2797 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2799 case RTE_CRYPTO_AUTH_NULL:
2800 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2803 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2805 rte_free(session->cipher_key.data);
2808 session->auth_key.data = rte_zmalloc(NULL,
2809 auth_xform->key.length,
2810 RTE_CACHE_LINE_SIZE);
2811 if (!session->auth_key.data &&
2812 auth_xform->key.length > 0) {
2813 DPAA_SEC_ERR("No Memory for auth key");
2814 rte_free(session->cipher_key.data);
2817 session->auth_key.length = auth_xform->key.length;
2818 memcpy(session->auth_key.data, auth_xform->key.data,
2819 auth_xform->key.length);
2820 session->auth_alg = auth_xform->algo;
2822 session->auth_key.data = NULL;
2823 session->auth_key.length = 0;
2824 session->auth_alg = 0;
2826 session->pdcp.domain = pdcp_xform->domain;
2827 session->pdcp.bearer = pdcp_xform->bearer;
2828 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2829 session->pdcp.sn_size = pdcp_xform->sn_size;
2830 session->pdcp.hfn = pdcp_xform->hfn;
2831 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2832 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2833 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2835 rte_spinlock_lock(&dev_priv->lock);
2836 for (i = 0; i < MAX_DPAA_CORES; i++) {
2837 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2838 if (session->inq[i] == NULL) {
2839 DPAA_SEC_ERR("unable to attach sec queue");
2840 rte_spinlock_unlock(&dev_priv->lock);
2844 rte_spinlock_unlock(&dev_priv->lock);
2847 rte_free(session->auth_key.data);
2848 rte_free(session->cipher_key.data);
2849 memset(session, 0, sizeof(dpaa_sec_session));
2854 dpaa_sec_security_session_create(void *dev,
2855 struct rte_security_session_conf *conf,
2856 struct rte_security_session *sess,
2857 struct rte_mempool *mempool)
2859 void *sess_private_data;
2860 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2863 if (rte_mempool_get(mempool, &sess_private_data)) {
2864 DPAA_SEC_ERR("Couldn't get object from session mempool");
2868 switch (conf->protocol) {
2869 case RTE_SECURITY_PROTOCOL_IPSEC:
2870 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2873 case RTE_SECURITY_PROTOCOL_PDCP:
2874 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2877 case RTE_SECURITY_PROTOCOL_MACSEC:
2883 DPAA_SEC_ERR("failed to configure session parameters");
2884 /* Return session to mempool */
2885 rte_mempool_put(mempool, sess_private_data);
2889 set_sec_session_private_data(sess, sess_private_data);
2894 /** Clear the memory of session so it doesn't leave key material behind */
2896 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2897 struct rte_security_session *sess)
2899 PMD_INIT_FUNC_TRACE();
2900 void *sess_priv = get_sec_session_private_data(sess);
2901 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2904 free_session_memory((struct rte_cryptodev *)dev, s);
2905 set_sec_session_private_data(sess, NULL);
2911 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2912 struct rte_cryptodev_config *config __rte_unused)
2914 PMD_INIT_FUNC_TRACE();
2920 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2922 PMD_INIT_FUNC_TRACE();
2927 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2929 PMD_INIT_FUNC_TRACE();
2933 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2935 PMD_INIT_FUNC_TRACE();
2944 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2945 struct rte_cryptodev_info *info)
2947 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2949 PMD_INIT_FUNC_TRACE();
2951 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2952 info->feature_flags = dev->feature_flags;
2953 info->capabilities = dpaa_sec_capabilities;
2954 info->sym.max_nb_sessions = internals->max_nb_sessions;
2955 info->driver_id = cryptodev_driver_id;
2959 static enum qman_cb_dqrr_result
2960 dpaa_sec_process_parallel_event(void *event,
2961 struct qman_portal *qm __always_unused,
2962 struct qman_fq *outq,
2963 const struct qm_dqrr_entry *dqrr,
2966 const struct qm_fd *fd;
2967 struct dpaa_sec_job *job;
2968 struct dpaa_sec_op_ctx *ctx;
2969 struct rte_event *ev = (struct rte_event *)event;
2973 /* sg is embedded in an op ctx,
2974 * sg[0] is for output
2977 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2979 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2980 ctx->fd_status = fd->status;
2981 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2982 struct qm_sg_entry *sg_out;
2985 sg_out = &job->sg[0];
2986 hw_sg_to_cpu(sg_out);
2987 len = sg_out->length;
2988 ctx->op->sym->m_src->pkt_len = len;
2989 ctx->op->sym->m_src->data_len = len;
2991 if (!ctx->fd_status) {
2992 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2994 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2995 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2997 ev->event_ptr = (void *)ctx->op;
2999 ev->flow_id = outq->ev.flow_id;
3000 ev->sub_event_type = outq->ev.sub_event_type;
3001 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3002 ev->op = RTE_EVENT_OP_NEW;
3003 ev->sched_type = outq->ev.sched_type;
3004 ev->queue_id = outq->ev.queue_id;
3005 ev->priority = outq->ev.priority;
3006 *bufs = (void *)ctx->op;
3008 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3010 return qman_cb_dqrr_consume;
3013 static enum qman_cb_dqrr_result
3014 dpaa_sec_process_atomic_event(void *event,
3015 struct qman_portal *qm __rte_unused,
3016 struct qman_fq *outq,
3017 const struct qm_dqrr_entry *dqrr,
3021 const struct qm_fd *fd;
3022 struct dpaa_sec_job *job;
3023 struct dpaa_sec_op_ctx *ctx;
3024 struct rte_event *ev = (struct rte_event *)event;
3028 /* sg is embedded in an op ctx,
3029 * sg[0] is for output
3032 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
3034 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3035 ctx->fd_status = fd->status;
3036 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3037 struct qm_sg_entry *sg_out;
3040 sg_out = &job->sg[0];
3041 hw_sg_to_cpu(sg_out);
3042 len = sg_out->length;
3043 ctx->op->sym->m_src->pkt_len = len;
3044 ctx->op->sym->m_src->data_len = len;
3046 if (!ctx->fd_status) {
3047 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3049 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3050 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3052 ev->event_ptr = (void *)ctx->op;
3053 ev->flow_id = outq->ev.flow_id;
3054 ev->sub_event_type = outq->ev.sub_event_type;
3055 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3056 ev->op = RTE_EVENT_OP_NEW;
3057 ev->sched_type = outq->ev.sched_type;
3058 ev->queue_id = outq->ev.queue_id;
3059 ev->priority = outq->ev.priority;
3061 /* Save active dqrr entries */
3062 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3063 DPAA_PER_LCORE_DQRR_SIZE++;
3064 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3065 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3066 ev->impl_opaque = index + 1;
3067 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3068 *bufs = (void *)ctx->op;
3070 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3072 return qman_cb_dqrr_defer;
3076 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3079 const struct rte_event *event)
3081 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3082 struct qm_mcc_initfq opts = {0};
3086 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3087 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3088 opts.fqd.dest.channel = ch_id;
3090 switch (event->sched_type) {
3091 case RTE_SCHED_TYPE_ATOMIC:
3092 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3093 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3094 * configuration with HOLD_ACTIVE setting
3096 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3097 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3099 case RTE_SCHED_TYPE_ORDERED:
3100 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3103 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3104 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3108 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3109 if (unlikely(ret)) {
3110 DPAA_SEC_ERR("unable to init caam source fq!");
3114 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3120 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3123 struct qm_mcc_initfq opts = {0};
3125 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3127 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3128 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3129 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3130 qp->outq.cb.ern = ern_sec_fq_handler;
3131 qman_retire_fq(&qp->outq, NULL);
3132 qman_oos_fq(&qp->outq);
3133 ret = qman_init_fq(&qp->outq, 0, &opts);
3135 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3136 qp->outq.cb.dqrr = NULL;
3141 static struct rte_cryptodev_ops crypto_ops = {
3142 .dev_configure = dpaa_sec_dev_configure,
3143 .dev_start = dpaa_sec_dev_start,
3144 .dev_stop = dpaa_sec_dev_stop,
3145 .dev_close = dpaa_sec_dev_close,
3146 .dev_infos_get = dpaa_sec_dev_infos_get,
3147 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3148 .queue_pair_release = dpaa_sec_queue_pair_release,
3149 .queue_pair_count = dpaa_sec_queue_pair_count,
3150 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3151 .sym_session_configure = dpaa_sec_sym_session_configure,
3152 .sym_session_clear = dpaa_sec_sym_session_clear
3155 static const struct rte_security_capability *
3156 dpaa_sec_capabilities_get(void *device __rte_unused)
3158 return dpaa_sec_security_cap;
3161 static const struct rte_security_ops dpaa_sec_security_ops = {
3162 .session_create = dpaa_sec_security_session_create,
3163 .session_update = NULL,
3164 .session_stats_get = NULL,
3165 .session_destroy = dpaa_sec_security_session_destroy,
3166 .set_pkt_metadata = NULL,
3167 .capabilities_get = dpaa_sec_capabilities_get
3171 dpaa_sec_uninit(struct rte_cryptodev *dev)
3173 struct dpaa_sec_dev_private *internals;
3178 internals = dev->data->dev_private;
3179 rte_free(dev->security_ctx);
3181 rte_free(internals);
3183 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3184 dev->data->name, rte_socket_id());
3190 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3192 struct dpaa_sec_dev_private *internals;
3193 struct rte_security_ctx *security_instance;
3194 struct dpaa_sec_qp *qp;
3198 PMD_INIT_FUNC_TRACE();
3200 cryptodev->driver_id = cryptodev_driver_id;
3201 cryptodev->dev_ops = &crypto_ops;
3203 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3204 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3205 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3206 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3207 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3208 RTE_CRYPTODEV_FF_SECURITY |
3209 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3210 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3211 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3212 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3213 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3215 internals = cryptodev->data->dev_private;
3216 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3217 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3220 * For secondary processes, we don't initialise any further as primary
3221 * has already done this work. Only check we don't need a different
3224 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3225 DPAA_SEC_WARN("Device already init by primary process");
3229 /* Initialize security_ctx only for primary process*/
3230 security_instance = rte_malloc("rte_security_instances_ops",
3231 sizeof(struct rte_security_ctx), 0);
3232 if (security_instance == NULL)
3234 security_instance->device = (void *)cryptodev;
3235 security_instance->ops = &dpaa_sec_security_ops;
3236 security_instance->sess_cnt = 0;
3237 cryptodev->security_ctx = security_instance;
3239 rte_spinlock_init(&internals->lock);
3240 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3241 /* init qman fq for queue pair */
3242 qp = &internals->qps[i];
3243 ret = dpaa_sec_init_tx(&qp->outq);
3245 DPAA_SEC_ERR("config tx of queue pair %d", i);
3250 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3251 QMAN_FQ_FLAG_TO_DCPORTAL;
3252 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
3253 /* create rx qman fq for sessions*/
3254 ret = qman_create_fq(0, flags, &internals->inq[i]);
3255 if (unlikely(ret != 0)) {
3256 DPAA_SEC_ERR("sec qman_create_fq failed");
3261 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3265 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3267 dpaa_sec_uninit(cryptodev);
3272 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3273 struct rte_dpaa_device *dpaa_dev)
3275 struct rte_cryptodev *cryptodev;
3276 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3280 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3282 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3283 if (cryptodev == NULL)
3286 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3287 cryptodev->data->dev_private = rte_zmalloc_socket(
3288 "cryptodev private structure",
3289 sizeof(struct dpaa_sec_dev_private),
3290 RTE_CACHE_LINE_SIZE,
3293 if (cryptodev->data->dev_private == NULL)
3294 rte_panic("Cannot allocate memzone for private "
3298 dpaa_dev->crypto_dev = cryptodev;
3299 cryptodev->device = &dpaa_dev->device;
3301 /* init user callbacks */
3302 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3304 /* if sec device version is not configured */
3305 if (!rta_get_sec_era()) {
3306 const struct device_node *caam_node;
3308 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3309 const uint32_t *prop = of_get_property(caam_node,
3314 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3320 /* Invoke PMD device initialization function */
3321 retval = dpaa_sec_dev_init(cryptodev);
3325 /* In case of error, cleanup is done */
3326 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3327 rte_free(cryptodev->data->dev_private);
3329 rte_cryptodev_pmd_release_device(cryptodev);
3335 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3337 struct rte_cryptodev *cryptodev;
3340 cryptodev = dpaa_dev->crypto_dev;
3341 if (cryptodev == NULL)
3344 ret = dpaa_sec_uninit(cryptodev);
3348 return rte_cryptodev_pmd_destroy(cryptodev);
3351 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3352 .drv_type = FSL_DPAA_CRYPTO,
3354 .name = "DPAA SEC PMD"
3356 .probe = cryptodev_dpaa_sec_probe,
3357 .remove = cryptodev_dpaa_sec_remove,
3360 static struct cryptodev_driver dpaa_sec_crypto_drv;
3362 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3363 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3364 cryptodev_driver_id);
3366 RTE_INIT(dpaa_sec_init_log)
3368 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3369 if (dpaa_logtype_sec >= 0)
3370 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);