1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
40 #include <rte_dpaa_bus.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
46 static uint8_t cryptodev_driver_id;
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
57 if (!ctx->fd_status) {
58 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
60 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
65 static inline struct dpaa_sec_op_ctx *
66 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
68 struct dpaa_sec_op_ctx *ctx;
71 retval = rte_mempool_get(
72 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
75 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
79 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82 * each packet, memset is costlier than dcbz_64().
84 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
85 dcbz_64(&ctx->job.sg[i]);
87 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
88 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
94 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
96 const struct qm_mr_entry *msg)
98 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
99 fq->fqid, msg->ern.rc, msg->ern.seqnum);
102 /* initialize the queue with dest chan as caam chan so that
103 * all the packets in this queue could be dispatched into caam
106 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
109 struct qm_mcc_initfq fq_opts;
113 /* Clear FQ options */
114 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
116 flags = QMAN_INITFQ_FLAG_SCHED;
117 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
118 QM_INITFQ_WE_CONTEXTB;
120 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
121 fq_opts.fqd.context_b = fqid_out;
122 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
123 fq_opts.fqd.dest.wq = 0;
125 fq_in->cb.ern = ern_sec_fq_handler;
127 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
129 ret = qman_init_fq(fq_in, flags, &fq_opts);
130 if (unlikely(ret != 0))
131 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
136 /* something is put into in_fq and caam put the crypto result into out_fq */
137 static enum qman_cb_dqrr_result
138 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
139 struct qman_fq *fq __always_unused,
140 const struct qm_dqrr_entry *dqrr)
142 const struct qm_fd *fd;
143 struct dpaa_sec_job *job;
144 struct dpaa_sec_op_ctx *ctx;
146 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
147 return qman_cb_dqrr_defer;
149 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
150 return qman_cb_dqrr_consume;
153 /* sg is embedded in an op ctx,
154 * sg[0] is for output
157 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
159 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
160 ctx->fd_status = fd->status;
161 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
162 struct qm_sg_entry *sg_out;
164 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
165 ctx->op->sym->m_src : ctx->op->sym->m_dst;
167 sg_out = &job->sg[0];
168 hw_sg_to_cpu(sg_out);
169 len = sg_out->length;
171 while (mbuf->next != NULL) {
172 len -= mbuf->data_len;
175 mbuf->data_len = len;
177 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
178 dpaa_sec_op_ending(ctx);
180 return qman_cb_dqrr_consume;
183 /* caam result is put into this queue */
185 dpaa_sec_init_tx(struct qman_fq *fq)
188 struct qm_mcc_initfq opts;
191 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
192 QMAN_FQ_FLAG_DYNAMIC_FQID;
194 ret = qman_create_fq(0, flags, fq);
196 DPAA_SEC_ERR("qman_create_fq failed");
200 memset(&opts, 0, sizeof(opts));
201 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
202 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
204 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
206 fq->cb.dqrr = dqrr_out_fq_cb_rx;
207 fq->cb.ern = ern_sec_fq_handler;
209 ret = qman_init_fq(fq, 0, &opts);
211 DPAA_SEC_ERR("unable to init caam source fq!");
218 static inline int is_aead(dpaa_sec_session *ses)
220 return ((ses->cipher_alg == 0) &&
221 (ses->auth_alg == 0) &&
222 (ses->aead_alg != 0));
225 static inline int is_encode(dpaa_sec_session *ses)
227 return ses->dir == DIR_ENC;
230 static inline int is_decode(dpaa_sec_session *ses)
232 return ses->dir == DIR_DEC;
235 #ifdef RTE_LIBRTE_SECURITY
237 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
239 struct alginfo authdata = {0}, cipherdata = {0};
240 struct sec_cdb *cdb = &ses->cdb;
241 struct alginfo *p_authdata = NULL;
242 int32_t shared_desc_len = 0;
243 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
249 cipherdata.key = (size_t)ses->cipher_key.data;
250 cipherdata.keylen = ses->cipher_key.length;
251 cipherdata.key_enc_flags = 0;
252 cipherdata.key_type = RTA_DATA_IMM;
253 cipherdata.algtype = ses->cipher_key.alg;
254 cipherdata.algmode = ses->cipher_key.algmode;
257 authdata.key = (size_t)ses->auth_key.data;
258 authdata.keylen = ses->auth_key.length;
259 authdata.key_enc_flags = 0;
260 authdata.key_type = RTA_DATA_IMM;
261 authdata.algtype = ses->auth_key.alg;
262 authdata.algmode = ses->auth_key.algmode;
264 p_authdata = &authdata;
267 if (rta_inline_pdcp_query(authdata.algtype,
270 ses->pdcp.hfn_ovd)) {
272 (size_t)rte_dpaa_mem_vtop((void *)
273 (size_t)cipherdata.key);
274 cipherdata.key_type = RTA_DATA_PTR;
277 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
278 if (ses->dir == DIR_ENC)
279 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
280 cdb->sh_desc, 1, swap,
285 ses->pdcp.hfn_threshold,
286 &cipherdata, &authdata,
288 else if (ses->dir == DIR_DEC)
289 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
290 cdb->sh_desc, 1, swap,
295 ses->pdcp.hfn_threshold,
296 &cipherdata, &authdata,
299 if (ses->dir == DIR_ENC)
300 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
301 cdb->sh_desc, 1, swap,
306 ses->pdcp.hfn_threshold,
307 &cipherdata, p_authdata, 0);
308 else if (ses->dir == DIR_DEC)
309 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
310 cdb->sh_desc, 1, swap,
315 ses->pdcp.hfn_threshold,
316 &cipherdata, p_authdata, 0);
318 return shared_desc_len;
321 /* prepare ipsec proto command block of the session */
323 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
325 struct alginfo cipherdata = {0}, authdata = {0};
326 struct sec_cdb *cdb = &ses->cdb;
327 int32_t shared_desc_len = 0;
329 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
335 cipherdata.key = (size_t)ses->cipher_key.data;
336 cipherdata.keylen = ses->cipher_key.length;
337 cipherdata.key_enc_flags = 0;
338 cipherdata.key_type = RTA_DATA_IMM;
339 cipherdata.algtype = ses->cipher_key.alg;
340 cipherdata.algmode = ses->cipher_key.algmode;
342 if (ses->auth_key.length) {
343 authdata.key = (size_t)ses->auth_key.data;
344 authdata.keylen = ses->auth_key.length;
345 authdata.key_enc_flags = 0;
346 authdata.key_type = RTA_DATA_IMM;
347 authdata.algtype = ses->auth_key.alg;
348 authdata.algmode = ses->auth_key.algmode;
351 cdb->sh_desc[0] = cipherdata.keylen;
352 cdb->sh_desc[1] = authdata.keylen;
353 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
355 (unsigned int *)cdb->sh_desc,
356 &cdb->sh_desc[2], 2);
359 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
362 if (cdb->sh_desc[2] & 1)
363 cipherdata.key_type = RTA_DATA_IMM;
365 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
366 (void *)(size_t)cipherdata.key);
367 cipherdata.key_type = RTA_DATA_PTR;
369 if (cdb->sh_desc[2] & (1<<1))
370 authdata.key_type = RTA_DATA_IMM;
372 authdata.key = (size_t)rte_dpaa_mem_vtop(
373 (void *)(size_t)authdata.key);
374 authdata.key_type = RTA_DATA_PTR;
380 if (ses->dir == DIR_ENC) {
381 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
383 true, swap, SHR_SERIAL,
385 (uint8_t *)&ses->ip4_hdr,
386 &cipherdata, &authdata);
387 } else if (ses->dir == DIR_DEC) {
388 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
390 true, swap, SHR_SERIAL,
392 &cipherdata, &authdata);
394 return shared_desc_len;
397 /* prepare command block of the session */
399 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
401 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
402 int32_t shared_desc_len = 0;
403 struct sec_cdb *cdb = &ses->cdb;
405 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
411 memset(cdb, 0, sizeof(struct sec_cdb));
414 #ifdef RTE_LIBRTE_SECURITY
416 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
419 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
422 case DPAA_SEC_CIPHER:
423 alginfo_c.key = (size_t)ses->cipher_key.data;
424 alginfo_c.keylen = ses->cipher_key.length;
425 alginfo_c.key_enc_flags = 0;
426 alginfo_c.key_type = RTA_DATA_IMM;
427 alginfo_c.algtype = ses->cipher_key.alg;
428 alginfo_c.algmode = ses->cipher_key.algmode;
430 switch (ses->cipher_alg) {
431 case RTE_CRYPTO_CIPHER_AES_CBC:
432 case RTE_CRYPTO_CIPHER_3DES_CBC:
433 case RTE_CRYPTO_CIPHER_AES_CTR:
434 case RTE_CRYPTO_CIPHER_3DES_CTR:
435 shared_desc_len = cnstr_shdsc_blkcipher(
437 swap, SHR_NEVER, &alginfo_c,
441 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
442 shared_desc_len = cnstr_shdsc_snow_f8(
443 cdb->sh_desc, true, swap,
447 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
448 shared_desc_len = cnstr_shdsc_zuce(
449 cdb->sh_desc, true, swap,
454 DPAA_SEC_ERR("unsupported cipher alg %d",
460 alginfo_a.key = (size_t)ses->auth_key.data;
461 alginfo_a.keylen = ses->auth_key.length;
462 alginfo_a.key_enc_flags = 0;
463 alginfo_a.key_type = RTA_DATA_IMM;
464 alginfo_a.algtype = ses->auth_key.alg;
465 alginfo_a.algmode = ses->auth_key.algmode;
466 switch (ses->auth_alg) {
467 case RTE_CRYPTO_AUTH_MD5_HMAC:
468 case RTE_CRYPTO_AUTH_SHA1_HMAC:
469 case RTE_CRYPTO_AUTH_SHA224_HMAC:
470 case RTE_CRYPTO_AUTH_SHA256_HMAC:
471 case RTE_CRYPTO_AUTH_SHA384_HMAC:
472 case RTE_CRYPTO_AUTH_SHA512_HMAC:
473 shared_desc_len = cnstr_shdsc_hmac(
475 swap, SHR_NEVER, &alginfo_a,
479 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
480 shared_desc_len = cnstr_shdsc_snow_f9(
481 cdb->sh_desc, true, swap,
486 case RTE_CRYPTO_AUTH_ZUC_EIA3:
487 shared_desc_len = cnstr_shdsc_zuca(
488 cdb->sh_desc, true, swap,
494 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
498 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
499 DPAA_SEC_ERR("not supported aead alg");
502 alginfo.key = (size_t)ses->aead_key.data;
503 alginfo.keylen = ses->aead_key.length;
504 alginfo.key_enc_flags = 0;
505 alginfo.key_type = RTA_DATA_IMM;
506 alginfo.algtype = ses->aead_key.alg;
507 alginfo.algmode = ses->aead_key.algmode;
509 if (ses->dir == DIR_ENC)
510 shared_desc_len = cnstr_shdsc_gcm_encap(
511 cdb->sh_desc, true, swap, SHR_NEVER,
516 shared_desc_len = cnstr_shdsc_gcm_decap(
517 cdb->sh_desc, true, swap, SHR_NEVER,
522 case DPAA_SEC_CIPHER_HASH:
523 alginfo_c.key = (size_t)ses->cipher_key.data;
524 alginfo_c.keylen = ses->cipher_key.length;
525 alginfo_c.key_enc_flags = 0;
526 alginfo_c.key_type = RTA_DATA_IMM;
527 alginfo_c.algtype = ses->cipher_key.alg;
528 alginfo_c.algmode = ses->cipher_key.algmode;
530 alginfo_a.key = (size_t)ses->auth_key.data;
531 alginfo_a.keylen = ses->auth_key.length;
532 alginfo_a.key_enc_flags = 0;
533 alginfo_a.key_type = RTA_DATA_IMM;
534 alginfo_a.algtype = ses->auth_key.alg;
535 alginfo_a.algmode = ses->auth_key.algmode;
537 cdb->sh_desc[0] = alginfo_c.keylen;
538 cdb->sh_desc[1] = alginfo_a.keylen;
539 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
541 (unsigned int *)cdb->sh_desc,
542 &cdb->sh_desc[2], 2);
545 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
548 if (cdb->sh_desc[2] & 1)
549 alginfo_c.key_type = RTA_DATA_IMM;
551 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
552 (void *)(size_t)alginfo_c.key);
553 alginfo_c.key_type = RTA_DATA_PTR;
555 if (cdb->sh_desc[2] & (1<<1))
556 alginfo_a.key_type = RTA_DATA_IMM;
558 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
559 (void *)(size_t)alginfo_a.key);
560 alginfo_a.key_type = RTA_DATA_PTR;
565 /* Auth_only_len is set as 0 here and it will be
566 * overwritten in fd for each packet.
568 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
569 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
571 ses->digest_length, ses->dir);
573 case DPAA_SEC_HASH_CIPHER:
575 DPAA_SEC_ERR("error: Unsupported session");
579 if (shared_desc_len < 0) {
580 DPAA_SEC_ERR("error in preparing command block");
581 return shared_desc_len;
584 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
585 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
586 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
591 /* qp is lockless, should be accessed by only one thread */
593 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
596 unsigned int pkts = 0;
597 int num_rx_bufs, ret;
598 struct qm_dqrr_entry *dq;
599 uint32_t vdqcr_flags = 0;
603 * Until request for four buffers, we provide exact number of buffers.
604 * Otherwise we do not set the QM_VDQCR_EXACT flag.
605 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
606 * requested, so we request two less in this case.
609 vdqcr_flags = QM_VDQCR_EXACT;
610 num_rx_bufs = nb_ops;
612 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
613 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
615 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
620 const struct qm_fd *fd;
621 struct dpaa_sec_job *job;
622 struct dpaa_sec_op_ctx *ctx;
623 struct rte_crypto_op *op;
625 dq = qman_dequeue(fq);
630 /* sg is embedded in an op ctx,
631 * sg[0] is for output
634 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
636 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
637 ctx->fd_status = fd->status;
639 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
640 struct qm_sg_entry *sg_out;
642 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
643 op->sym->m_src : op->sym->m_dst;
645 sg_out = &job->sg[0];
646 hw_sg_to_cpu(sg_out);
647 len = sg_out->length;
649 while (mbuf->next != NULL) {
650 len -= mbuf->data_len;
653 mbuf->data_len = len;
655 if (!ctx->fd_status) {
656 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
658 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
659 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
663 /* report op status to sym->op and then free the ctx memeory */
664 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
666 qman_dqrr_consume(fq, dq);
667 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
672 static inline struct dpaa_sec_job *
673 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
675 struct rte_crypto_sym_op *sym = op->sym;
676 struct rte_mbuf *mbuf = sym->m_src;
677 struct dpaa_sec_job *cf;
678 struct dpaa_sec_op_ctx *ctx;
679 struct qm_sg_entry *sg, *out_sg, *in_sg;
680 phys_addr_t start_addr;
681 uint8_t *old_digest, extra_segs;
682 int data_len, data_offset;
684 data_len = sym->auth.data.length;
685 data_offset = sym->auth.data.offset;
687 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
688 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
689 if ((data_len & 7) || (data_offset & 7)) {
690 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
694 data_len = data_len >> 3;
695 data_offset = data_offset >> 3;
703 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
704 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
708 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
714 old_digest = ctx->digest;
718 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
719 out_sg->length = ses->digest_length;
720 cpu_to_hw_sg(out_sg);
724 /* need to extend the input to a compound frame */
725 in_sg->extension = 1;
727 in_sg->length = data_len;
728 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
733 if (ses->iv.length) {
736 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
739 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
740 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
742 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
743 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
746 sg->length = ses->iv.length;
748 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
749 in_sg->length += sg->length;
754 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
755 sg->offset = data_offset;
757 if (data_len <= (mbuf->data_len - data_offset)) {
758 sg->length = data_len;
760 sg->length = mbuf->data_len - data_offset;
762 /* remaining i/p segs */
763 while ((data_len = data_len - sg->length) &&
764 (mbuf = mbuf->next)) {
767 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
768 if (data_len > mbuf->data_len)
769 sg->length = mbuf->data_len;
771 sg->length = data_len;
775 if (is_decode(ses)) {
776 /* Digest verification case */
779 rte_memcpy(old_digest, sym->auth.digest.data,
781 start_addr = rte_dpaa_mem_vtop(old_digest);
782 qm_sg_entry_set64(sg, start_addr);
783 sg->length = ses->digest_length;
784 in_sg->length += ses->digest_length;
795 * |<----data_len------->|
796 * |ip_header|ah_header|icv|payload|
801 static inline struct dpaa_sec_job *
802 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
804 struct rte_crypto_sym_op *sym = op->sym;
805 struct rte_mbuf *mbuf = sym->m_src;
806 struct dpaa_sec_job *cf;
807 struct dpaa_sec_op_ctx *ctx;
808 struct qm_sg_entry *sg, *in_sg;
809 rte_iova_t start_addr;
811 int data_len, data_offset;
813 data_len = sym->auth.data.length;
814 data_offset = sym->auth.data.offset;
816 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
817 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
818 if ((data_len & 7) || (data_offset & 7)) {
819 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
823 data_len = data_len >> 3;
824 data_offset = data_offset >> 3;
827 ctx = dpaa_sec_alloc_ctx(ses, 4);
833 old_digest = ctx->digest;
835 start_addr = rte_pktmbuf_iova(mbuf);
838 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
839 sg->length = ses->digest_length;
844 /* need to extend the input to a compound frame */
845 in_sg->extension = 1;
847 in_sg->length = data_len;
848 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
851 if (ses->iv.length) {
854 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
857 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
858 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
860 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
861 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
864 sg->length = ses->iv.length;
866 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
867 in_sg->length += sg->length;
872 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
873 sg->offset = data_offset;
874 sg->length = data_len;
876 if (is_decode(ses)) {
877 /* Digest verification case */
879 /* hash result or digest, save digest first */
880 rte_memcpy(old_digest, sym->auth.digest.data,
882 /* let's check digest by hw */
883 start_addr = rte_dpaa_mem_vtop(old_digest);
885 qm_sg_entry_set64(sg, start_addr);
886 sg->length = ses->digest_length;
887 in_sg->length += ses->digest_length;
896 static inline struct dpaa_sec_job *
897 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
899 struct rte_crypto_sym_op *sym = op->sym;
900 struct dpaa_sec_job *cf;
901 struct dpaa_sec_op_ctx *ctx;
902 struct qm_sg_entry *sg, *out_sg, *in_sg;
903 struct rte_mbuf *mbuf;
905 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
907 int data_len, data_offset;
909 data_len = sym->cipher.data.length;
910 data_offset = sym->cipher.data.offset;
912 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
913 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
914 if ((data_len & 7) || (data_offset & 7)) {
915 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
919 data_len = data_len >> 3;
920 data_offset = data_offset >> 3;
925 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
928 req_segs = mbuf->nb_segs * 2 + 3;
930 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
931 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
936 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
945 out_sg->extension = 1;
946 out_sg->length = data_len;
947 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
948 cpu_to_hw_sg(out_sg);
952 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
953 sg->length = mbuf->data_len - data_offset;
954 sg->offset = data_offset;
956 /* Successive segs */
961 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
962 sg->length = mbuf->data_len;
971 in_sg->extension = 1;
973 in_sg->length = data_len + ses->iv.length;
976 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
980 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
981 sg->length = ses->iv.length;
986 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
987 sg->length = mbuf->data_len - data_offset;
988 sg->offset = data_offset;
990 /* Successive segs */
995 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996 sg->length = mbuf->data_len;
1005 static inline struct dpaa_sec_job *
1006 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1008 struct rte_crypto_sym_op *sym = op->sym;
1009 struct dpaa_sec_job *cf;
1010 struct dpaa_sec_op_ctx *ctx;
1011 struct qm_sg_entry *sg;
1012 rte_iova_t src_start_addr, dst_start_addr;
1013 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1015 int data_len, data_offset;
1017 data_len = sym->cipher.data.length;
1018 data_offset = sym->cipher.data.offset;
1020 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1021 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1022 if ((data_len & 7) || (data_offset & 7)) {
1023 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1027 data_len = data_len >> 3;
1028 data_offset = data_offset >> 3;
1031 ctx = dpaa_sec_alloc_ctx(ses, 4);
1038 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1041 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1043 dst_start_addr = src_start_addr;
1047 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1048 sg->length = data_len + ses->iv.length;
1054 /* need to extend the input to a compound frame */
1057 sg->length = data_len + ses->iv.length;
1058 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1062 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1063 sg->length = ses->iv.length;
1067 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1068 sg->length = data_len;
1075 static inline struct dpaa_sec_job *
1076 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1078 struct rte_crypto_sym_op *sym = op->sym;
1079 struct dpaa_sec_job *cf;
1080 struct dpaa_sec_op_ctx *ctx;
1081 struct qm_sg_entry *sg, *out_sg, *in_sg;
1082 struct rte_mbuf *mbuf;
1084 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1089 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1092 req_segs = mbuf->nb_segs * 2 + 4;
1095 if (ses->auth_only_len)
1098 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1099 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1104 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1111 rte_prefetch0(cf->sg);
1114 out_sg = &cf->sg[0];
1115 out_sg->extension = 1;
1117 out_sg->length = sym->aead.data.length + ses->digest_length;
1119 out_sg->length = sym->aead.data.length;
1121 /* output sg entries */
1123 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1124 cpu_to_hw_sg(out_sg);
1127 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1128 sg->length = mbuf->data_len - sym->aead.data.offset;
1129 sg->offset = sym->aead.data.offset;
1131 /* Successive segs */
1136 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1137 sg->length = mbuf->data_len;
1140 sg->length -= ses->digest_length;
1142 if (is_encode(ses)) {
1144 /* set auth output */
1146 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1147 sg->length = ses->digest_length;
1155 in_sg->extension = 1;
1158 in_sg->length = ses->iv.length + sym->aead.data.length
1159 + ses->auth_only_len;
1161 in_sg->length = ses->iv.length + sym->aead.data.length
1162 + ses->auth_only_len + ses->digest_length;
1164 /* input sg entries */
1166 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1167 cpu_to_hw_sg(in_sg);
1170 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1171 sg->length = ses->iv.length;
1174 /* 2nd seg auth only */
1175 if (ses->auth_only_len) {
1177 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1178 sg->length = ses->auth_only_len;
1184 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1185 sg->length = mbuf->data_len - sym->aead.data.offset;
1186 sg->offset = sym->aead.data.offset;
1188 /* Successive segs */
1193 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 sg->length = mbuf->data_len;
1198 if (is_decode(ses)) {
1201 memcpy(ctx->digest, sym->aead.digest.data,
1202 ses->digest_length);
1203 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1204 sg->length = ses->digest_length;
1212 static inline struct dpaa_sec_job *
1213 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1215 struct rte_crypto_sym_op *sym = op->sym;
1216 struct dpaa_sec_job *cf;
1217 struct dpaa_sec_op_ctx *ctx;
1218 struct qm_sg_entry *sg;
1219 uint32_t length = 0;
1220 rte_iova_t src_start_addr, dst_start_addr;
1221 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1224 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1227 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1229 dst_start_addr = src_start_addr;
1231 ctx = dpaa_sec_alloc_ctx(ses, 7);
1239 rte_prefetch0(cf->sg);
1241 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1242 if (is_encode(ses)) {
1243 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1244 sg->length = ses->iv.length;
1245 length += sg->length;
1249 if (ses->auth_only_len) {
1250 qm_sg_entry_set64(sg,
1251 rte_dpaa_mem_vtop(sym->aead.aad.data));
1252 sg->length = ses->auth_only_len;
1253 length += sg->length;
1257 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1258 sg->length = sym->aead.data.length;
1259 length += sg->length;
1263 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1264 sg->length = ses->iv.length;
1265 length += sg->length;
1269 if (ses->auth_only_len) {
1270 qm_sg_entry_set64(sg,
1271 rte_dpaa_mem_vtop(sym->aead.aad.data));
1272 sg->length = ses->auth_only_len;
1273 length += sg->length;
1277 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1278 sg->length = sym->aead.data.length;
1279 length += sg->length;
1282 memcpy(ctx->digest, sym->aead.digest.data,
1283 ses->digest_length);
1286 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1287 sg->length = ses->digest_length;
1288 length += sg->length;
1292 /* input compound frame */
1293 cf->sg[1].length = length;
1294 cf->sg[1].extension = 1;
1295 cf->sg[1].final = 1;
1296 cpu_to_hw_sg(&cf->sg[1]);
1300 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1301 qm_sg_entry_set64(sg,
1302 dst_start_addr + sym->aead.data.offset);
1303 sg->length = sym->aead.data.length;
1304 length = sg->length;
1305 if (is_encode(ses)) {
1307 /* set auth output */
1309 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1310 sg->length = ses->digest_length;
1311 length += sg->length;
1316 /* output compound frame */
1317 cf->sg[0].length = length;
1318 cf->sg[0].extension = 1;
1319 cpu_to_hw_sg(&cf->sg[0]);
1324 static inline struct dpaa_sec_job *
1325 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1327 struct rte_crypto_sym_op *sym = op->sym;
1328 struct dpaa_sec_job *cf;
1329 struct dpaa_sec_op_ctx *ctx;
1330 struct qm_sg_entry *sg, *out_sg, *in_sg;
1331 struct rte_mbuf *mbuf;
1333 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1338 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1341 req_segs = mbuf->nb_segs * 2 + 4;
1344 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1345 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1350 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1357 rte_prefetch0(cf->sg);
1360 out_sg = &cf->sg[0];
1361 out_sg->extension = 1;
1363 out_sg->length = sym->auth.data.length + ses->digest_length;
1365 out_sg->length = sym->auth.data.length;
1367 /* output sg entries */
1369 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1370 cpu_to_hw_sg(out_sg);
1373 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1374 sg->length = mbuf->data_len - sym->auth.data.offset;
1375 sg->offset = sym->auth.data.offset;
1377 /* Successive segs */
1382 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1383 sg->length = mbuf->data_len;
1386 sg->length -= ses->digest_length;
1388 if (is_encode(ses)) {
1390 /* set auth output */
1392 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1393 sg->length = ses->digest_length;
1401 in_sg->extension = 1;
1404 in_sg->length = ses->iv.length + sym->auth.data.length;
1406 in_sg->length = ses->iv.length + sym->auth.data.length
1407 + ses->digest_length;
1409 /* input sg entries */
1411 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1412 cpu_to_hw_sg(in_sg);
1415 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1416 sg->length = ses->iv.length;
1421 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1422 sg->length = mbuf->data_len - sym->auth.data.offset;
1423 sg->offset = sym->auth.data.offset;
1425 /* Successive segs */
1430 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1431 sg->length = mbuf->data_len;
1435 sg->length -= ses->digest_length;
1436 if (is_decode(ses)) {
1439 memcpy(ctx->digest, sym->auth.digest.data,
1440 ses->digest_length);
1441 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1442 sg->length = ses->digest_length;
1450 static inline struct dpaa_sec_job *
1451 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1453 struct rte_crypto_sym_op *sym = op->sym;
1454 struct dpaa_sec_job *cf;
1455 struct dpaa_sec_op_ctx *ctx;
1456 struct qm_sg_entry *sg;
1457 rte_iova_t src_start_addr, dst_start_addr;
1458 uint32_t length = 0;
1459 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1462 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1464 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1466 dst_start_addr = src_start_addr;
1468 ctx = dpaa_sec_alloc_ctx(ses, 7);
1476 rte_prefetch0(cf->sg);
1478 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1479 if (is_encode(ses)) {
1480 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1481 sg->length = ses->iv.length;
1482 length += sg->length;
1486 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1487 sg->length = sym->auth.data.length;
1488 length += sg->length;
1492 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1493 sg->length = ses->iv.length;
1494 length += sg->length;
1499 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1500 sg->length = sym->auth.data.length;
1501 length += sg->length;
1504 memcpy(ctx->digest, sym->auth.digest.data,
1505 ses->digest_length);
1508 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1509 sg->length = ses->digest_length;
1510 length += sg->length;
1514 /* input compound frame */
1515 cf->sg[1].length = length;
1516 cf->sg[1].extension = 1;
1517 cf->sg[1].final = 1;
1518 cpu_to_hw_sg(&cf->sg[1]);
1522 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1523 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1524 sg->length = sym->cipher.data.length;
1525 length = sg->length;
1526 if (is_encode(ses)) {
1528 /* set auth output */
1530 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1531 sg->length = ses->digest_length;
1532 length += sg->length;
1537 /* output compound frame */
1538 cf->sg[0].length = length;
1539 cf->sg[0].extension = 1;
1540 cpu_to_hw_sg(&cf->sg[0]);
1545 #ifdef RTE_LIBRTE_SECURITY
1546 static inline struct dpaa_sec_job *
1547 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1549 struct rte_crypto_sym_op *sym = op->sym;
1550 struct dpaa_sec_job *cf;
1551 struct dpaa_sec_op_ctx *ctx;
1552 struct qm_sg_entry *sg;
1553 phys_addr_t src_start_addr, dst_start_addr;
1555 ctx = dpaa_sec_alloc_ctx(ses, 2);
1561 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1564 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1566 dst_start_addr = src_start_addr;
1570 qm_sg_entry_set64(sg, src_start_addr);
1571 sg->length = sym->m_src->pkt_len;
1575 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1578 qm_sg_entry_set64(sg, dst_start_addr);
1579 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1585 static inline struct dpaa_sec_job *
1586 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1588 struct rte_crypto_sym_op *sym = op->sym;
1589 struct dpaa_sec_job *cf;
1590 struct dpaa_sec_op_ctx *ctx;
1591 struct qm_sg_entry *sg, *out_sg, *in_sg;
1592 struct rte_mbuf *mbuf;
1594 uint32_t in_len = 0, out_len = 0;
1601 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1602 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1603 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1608 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1614 out_sg = &cf->sg[0];
1615 out_sg->extension = 1;
1616 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1620 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1623 /* Successive segs */
1624 while (mbuf->next) {
1625 sg->length = mbuf->data_len;
1626 out_len += sg->length;
1630 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1633 sg->length = mbuf->buf_len - mbuf->data_off;
1634 out_len += sg->length;
1638 out_sg->length = out_len;
1639 cpu_to_hw_sg(out_sg);
1644 in_sg->extension = 1;
1646 in_len = mbuf->data_len;
1649 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1652 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1653 sg->length = mbuf->data_len;
1656 /* Successive segs */
1661 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1662 sg->length = mbuf->data_len;
1664 in_len += sg->length;
1670 in_sg->length = in_len;
1671 cpu_to_hw_sg(in_sg);
1673 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1680 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1683 /* Function to transmit the frames to given device and queuepair */
1685 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1686 uint16_t num_tx = 0;
1687 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1688 uint32_t frames_to_send;
1689 struct rte_crypto_op *op;
1690 struct dpaa_sec_job *cf;
1691 dpaa_sec_session *ses;
1692 uint16_t auth_hdr_len, auth_tail_len;
1693 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1694 struct qman_fq *inq[DPAA_SEC_BURST];
1697 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1698 DPAA_SEC_BURST : nb_ops;
1699 for (loop = 0; loop < frames_to_send; loop++) {
1701 if (op->sym->m_src->seqn != 0) {
1702 index = op->sym->m_src->seqn - 1;
1703 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1704 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1705 flags[loop] = ((index & 0x0f) << 8);
1706 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1707 DPAA_PER_LCORE_DQRR_SIZE--;
1708 DPAA_PER_LCORE_DQRR_HELD &=
1713 switch (op->sess_type) {
1714 case RTE_CRYPTO_OP_WITH_SESSION:
1715 ses = (dpaa_sec_session *)
1716 get_sym_session_private_data(
1718 cryptodev_driver_id);
1720 #ifdef RTE_LIBRTE_SECURITY
1721 case RTE_CRYPTO_OP_SECURITY_SESSION:
1722 ses = (dpaa_sec_session *)
1723 get_sec_session_private_data(
1724 op->sym->sec_session);
1729 "sessionless crypto op not supported");
1730 frames_to_send = loop;
1736 DPAA_SEC_DP_ERR("session not available");
1737 frames_to_send = loop;
1742 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1743 if (dpaa_sec_attach_sess_q(qp, ses)) {
1744 frames_to_send = loop;
1748 } else if (unlikely(ses->qp[rte_lcore_id() %
1749 MAX_DPAA_CORES] != qp)) {
1750 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1752 ses->qp[rte_lcore_id() %
1753 MAX_DPAA_CORES], qp);
1754 frames_to_send = loop;
1759 auth_hdr_len = op->sym->auth.data.length -
1760 op->sym->cipher.data.length;
1763 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1764 ((op->sym->m_dst == NULL) ||
1765 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1766 switch (ses->ctxt) {
1767 #ifdef RTE_LIBRTE_SECURITY
1769 case DPAA_SEC_IPSEC:
1770 cf = build_proto(op, ses);
1774 cf = build_auth_only(op, ses);
1776 case DPAA_SEC_CIPHER:
1777 cf = build_cipher_only(op, ses);
1780 cf = build_cipher_auth_gcm(op, ses);
1781 auth_hdr_len = ses->auth_only_len;
1783 case DPAA_SEC_CIPHER_HASH:
1785 op->sym->cipher.data.offset
1786 - op->sym->auth.data.offset;
1788 op->sym->auth.data.length
1789 - op->sym->cipher.data.length
1791 cf = build_cipher_auth(op, ses);
1794 DPAA_SEC_DP_ERR("not supported ops");
1795 frames_to_send = loop;
1800 switch (ses->ctxt) {
1801 #ifdef RTE_LIBRTE_SECURITY
1803 case DPAA_SEC_IPSEC:
1804 cf = build_proto_sg(op, ses);
1808 cf = build_auth_only_sg(op, ses);
1810 case DPAA_SEC_CIPHER:
1811 cf = build_cipher_only_sg(op, ses);
1814 cf = build_cipher_auth_gcm_sg(op, ses);
1815 auth_hdr_len = ses->auth_only_len;
1817 case DPAA_SEC_CIPHER_HASH:
1819 op->sym->cipher.data.offset
1820 - op->sym->auth.data.offset;
1822 op->sym->auth.data.length
1823 - op->sym->cipher.data.length
1825 cf = build_cipher_auth_sg(op, ses);
1828 DPAA_SEC_DP_ERR("not supported ops");
1829 frames_to_send = loop;
1834 if (unlikely(!cf)) {
1835 frames_to_send = loop;
1841 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1842 fd->opaque_addr = 0;
1844 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1845 fd->_format1 = qm_fd_compound;
1846 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1848 /* Auth_only_len is set as 0 in descriptor and it is
1849 * overwritten here in the fd.cmd which will update
1852 if (auth_hdr_len || auth_tail_len) {
1853 fd->cmd = 0x80000000;
1855 ((auth_tail_len << 16) | auth_hdr_len);
1858 #ifdef RTE_LIBRTE_SECURITY
1859 /* In case of PDCP, per packet HFN is stored in
1860 * mbuf priv after sym_op.
1862 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1863 fd->cmd = 0x80000000 |
1864 *((uint32_t *)((uint8_t *)op +
1865 ses->pdcp.hfn_ovd_offset));
1866 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1867 *((uint32_t *)((uint8_t *)op +
1868 ses->pdcp.hfn_ovd_offset)),
1875 while (loop < frames_to_send) {
1876 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1877 &flags[loop], frames_to_send - loop);
1879 nb_ops -= frames_to_send;
1880 num_tx += frames_to_send;
1883 dpaa_qp->tx_pkts += num_tx;
1884 dpaa_qp->tx_errs += nb_ops - num_tx;
1890 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1894 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1896 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1898 dpaa_qp->rx_pkts += num_rx;
1899 dpaa_qp->rx_errs += nb_ops - num_rx;
1901 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1906 /** Release queue pair */
1908 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1911 struct dpaa_sec_dev_private *internals;
1912 struct dpaa_sec_qp *qp = NULL;
1914 PMD_INIT_FUNC_TRACE();
1916 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1918 internals = dev->data->dev_private;
1919 if (qp_id >= internals->max_nb_queue_pairs) {
1920 DPAA_SEC_ERR("Max supported qpid %d",
1921 internals->max_nb_queue_pairs);
1925 qp = &internals->qps[qp_id];
1926 rte_mempool_free(qp->ctx_pool);
1927 qp->internals = NULL;
1928 dev->data->queue_pairs[qp_id] = NULL;
1933 /** Setup a queue pair */
1935 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1936 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1937 __rte_unused int socket_id)
1939 struct dpaa_sec_dev_private *internals;
1940 struct dpaa_sec_qp *qp = NULL;
1943 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1945 internals = dev->data->dev_private;
1946 if (qp_id >= internals->max_nb_queue_pairs) {
1947 DPAA_SEC_ERR("Max supported qpid %d",
1948 internals->max_nb_queue_pairs);
1952 qp = &internals->qps[qp_id];
1953 qp->internals = internals;
1954 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1955 dev->data->dev_id, qp_id);
1956 if (!qp->ctx_pool) {
1957 qp->ctx_pool = rte_mempool_create((const char *)str,
1960 CTX_POOL_CACHE_SIZE, 0,
1961 NULL, NULL, NULL, NULL,
1963 if (!qp->ctx_pool) {
1964 DPAA_SEC_ERR("%s create failed\n", str);
1968 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1969 dev->data->dev_id, qp_id);
1970 dev->data->queue_pairs[qp_id] = qp;
1975 /** Returns the size of session structure */
1977 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1979 PMD_INIT_FUNC_TRACE();
1981 return sizeof(dpaa_sec_session);
1985 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1986 struct rte_crypto_sym_xform *xform,
1987 dpaa_sec_session *session)
1989 session->ctxt = DPAA_SEC_CIPHER;
1990 session->cipher_alg = xform->cipher.algo;
1991 session->iv.length = xform->cipher.iv.length;
1992 session->iv.offset = xform->cipher.iv.offset;
1993 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1994 RTE_CACHE_LINE_SIZE);
1995 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1996 DPAA_SEC_ERR("No Memory for cipher key");
1999 session->cipher_key.length = xform->cipher.key.length;
2001 memcpy(session->cipher_key.data, xform->cipher.key.data,
2002 xform->cipher.key.length);
2003 switch (xform->cipher.algo) {
2004 case RTE_CRYPTO_CIPHER_AES_CBC:
2005 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2006 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2008 case RTE_CRYPTO_CIPHER_3DES_CBC:
2009 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2010 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2012 case RTE_CRYPTO_CIPHER_AES_CTR:
2013 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2014 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2016 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2017 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2019 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2020 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2023 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2024 xform->cipher.algo);
2027 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2034 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2035 struct rte_crypto_sym_xform *xform,
2036 dpaa_sec_session *session)
2038 session->ctxt = DPAA_SEC_AUTH;
2039 session->auth_alg = xform->auth.algo;
2040 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2041 RTE_CACHE_LINE_SIZE);
2042 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2043 DPAA_SEC_ERR("No Memory for auth key");
2046 session->auth_key.length = xform->auth.key.length;
2047 session->digest_length = xform->auth.digest_length;
2048 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2049 session->iv.offset = xform->auth.iv.offset;
2050 session->iv.length = xform->auth.iv.length;
2053 memcpy(session->auth_key.data, xform->auth.key.data,
2054 xform->auth.key.length);
2056 switch (xform->auth.algo) {
2057 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2058 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2059 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2061 case RTE_CRYPTO_AUTH_MD5_HMAC:
2062 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2063 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2065 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2066 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2067 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2069 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2070 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2071 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2073 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2074 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2075 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2077 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2078 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2079 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2081 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2082 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2083 session->auth_key.algmode = OP_ALG_AAI_F9;
2085 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2086 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2087 session->auth_key.algmode = OP_ALG_AAI_F9;
2090 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2095 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2102 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2103 struct rte_crypto_sym_xform *xform,
2104 dpaa_sec_session *session)
2107 struct rte_crypto_cipher_xform *cipher_xform;
2108 struct rte_crypto_auth_xform *auth_xform;
2110 session->ctxt = DPAA_SEC_CIPHER_HASH;
2111 if (session->auth_cipher_text) {
2112 cipher_xform = &xform->cipher;
2113 auth_xform = &xform->next->auth;
2115 cipher_xform = &xform->next->cipher;
2116 auth_xform = &xform->auth;
2119 /* Set IV parameters */
2120 session->iv.offset = cipher_xform->iv.offset;
2121 session->iv.length = cipher_xform->iv.length;
2123 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2124 RTE_CACHE_LINE_SIZE);
2125 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2126 DPAA_SEC_ERR("No Memory for cipher key");
2129 session->cipher_key.length = cipher_xform->key.length;
2130 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2131 RTE_CACHE_LINE_SIZE);
2132 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2133 DPAA_SEC_ERR("No Memory for auth key");
2136 session->auth_key.length = auth_xform->key.length;
2137 memcpy(session->cipher_key.data, cipher_xform->key.data,
2138 cipher_xform->key.length);
2139 memcpy(session->auth_key.data, auth_xform->key.data,
2140 auth_xform->key.length);
2142 session->digest_length = auth_xform->digest_length;
2143 session->auth_alg = auth_xform->algo;
2145 switch (auth_xform->algo) {
2146 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2147 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2148 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2150 case RTE_CRYPTO_AUTH_MD5_HMAC:
2151 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2152 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2154 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2155 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2156 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2158 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2159 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2160 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2162 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2163 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2164 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2166 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2167 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2168 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2171 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2176 session->cipher_alg = cipher_xform->algo;
2178 switch (cipher_xform->algo) {
2179 case RTE_CRYPTO_CIPHER_AES_CBC:
2180 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2181 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2183 case RTE_CRYPTO_CIPHER_3DES_CBC:
2184 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2185 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2187 case RTE_CRYPTO_CIPHER_AES_CTR:
2188 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2189 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2192 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2193 cipher_xform->algo);
2196 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2202 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2203 struct rte_crypto_sym_xform *xform,
2204 dpaa_sec_session *session)
2206 session->aead_alg = xform->aead.algo;
2207 session->ctxt = DPAA_SEC_AEAD;
2208 session->iv.length = xform->aead.iv.length;
2209 session->iv.offset = xform->aead.iv.offset;
2210 session->auth_only_len = xform->aead.aad_length;
2211 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2212 RTE_CACHE_LINE_SIZE);
2213 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2214 DPAA_SEC_ERR("No Memory for aead key\n");
2217 session->aead_key.length = xform->aead.key.length;
2218 session->digest_length = xform->aead.digest_length;
2220 memcpy(session->aead_key.data, xform->aead.key.data,
2221 xform->aead.key.length);
2223 switch (session->aead_alg) {
2224 case RTE_CRYPTO_AEAD_AES_GCM:
2225 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2226 session->aead_key.algmode = OP_ALG_AAI_GCM;
2229 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2233 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2239 static struct qman_fq *
2240 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2244 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2245 if (qi->inq_attach[i] == 0) {
2246 qi->inq_attach[i] = 1;
2250 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2256 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2260 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2261 if (&qi->inq[i] == fq) {
2262 if (qman_retire_fq(fq, NULL) != 0)
2263 DPAA_SEC_WARN("Queue is not retired\n");
2265 qi->inq_attach[i] = 0;
2273 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2277 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2278 ret = dpaa_sec_prep_cdb(sess);
2280 DPAA_SEC_ERR("Unable to prepare sec cdb");
2283 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2284 ret = rte_dpaa_portal_init((void *)0);
2286 DPAA_SEC_ERR("Failure in affining portal");
2290 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2291 rte_dpaa_mem_vtop(&sess->cdb),
2292 qman_fq_fqid(&qp->outq));
2294 DPAA_SEC_ERR("Unable to init sec queue");
2300 free_session_data(dpaa_sec_session *s)
2303 rte_free(s->aead_key.data);
2305 rte_free(s->auth_key.data);
2306 rte_free(s->cipher_key.data);
2308 memset(s, 0, sizeof(dpaa_sec_session));
2312 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2313 struct rte_crypto_sym_xform *xform, void *sess)
2315 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2316 dpaa_sec_session *session = sess;
2320 PMD_INIT_FUNC_TRACE();
2322 if (unlikely(sess == NULL)) {
2323 DPAA_SEC_ERR("invalid session struct");
2326 memset(session, 0, sizeof(dpaa_sec_session));
2328 /* Default IV length = 0 */
2329 session->iv.length = 0;
2332 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2333 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2334 ret = dpaa_sec_cipher_init(dev, xform, session);
2336 /* Authentication Only */
2337 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2338 xform->next == NULL) {
2339 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2340 session->ctxt = DPAA_SEC_AUTH;
2341 ret = dpaa_sec_auth_init(dev, xform, session);
2343 /* Cipher then Authenticate */
2344 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2345 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2346 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2347 session->auth_cipher_text = 1;
2348 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2349 ret = dpaa_sec_auth_init(dev, xform, session);
2350 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2351 ret = dpaa_sec_cipher_init(dev, xform, session);
2353 ret = dpaa_sec_chain_init(dev, xform, session);
2355 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2358 /* Authenticate then Cipher */
2359 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2360 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2361 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2362 session->auth_cipher_text = 0;
2363 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2364 ret = dpaa_sec_cipher_init(dev, xform, session);
2365 else if (xform->next->cipher.algo
2366 == RTE_CRYPTO_CIPHER_NULL)
2367 ret = dpaa_sec_auth_init(dev, xform, session);
2369 ret = dpaa_sec_chain_init(dev, xform, session);
2371 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2375 /* AEAD operation for AES-GCM kind of Algorithms */
2376 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2377 xform->next == NULL) {
2378 ret = dpaa_sec_aead_init(dev, xform, session);
2381 DPAA_SEC_ERR("Invalid crypto type");
2385 DPAA_SEC_ERR("unable to init session");
2389 rte_spinlock_lock(&internals->lock);
2390 for (i = 0; i < MAX_DPAA_CORES; i++) {
2391 session->inq[i] = dpaa_sec_attach_rxq(internals);
2392 if (session->inq[i] == NULL) {
2393 DPAA_SEC_ERR("unable to attach sec queue");
2394 rte_spinlock_unlock(&internals->lock);
2399 rte_spinlock_unlock(&internals->lock);
2404 free_session_data(session);
2409 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2410 struct rte_crypto_sym_xform *xform,
2411 struct rte_cryptodev_sym_session *sess,
2412 struct rte_mempool *mempool)
2414 void *sess_private_data;
2417 PMD_INIT_FUNC_TRACE();
2419 if (rte_mempool_get(mempool, &sess_private_data)) {
2420 DPAA_SEC_ERR("Couldn't get object from session mempool");
2424 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2426 DPAA_SEC_ERR("failed to configure session parameters");
2428 /* Return session to mempool */
2429 rte_mempool_put(mempool, sess_private_data);
2433 set_sym_session_private_data(sess, dev->driver_id,
2441 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2443 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2444 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2447 for (i = 0; i < MAX_DPAA_CORES; i++) {
2449 dpaa_sec_detach_rxq(qi, s->inq[i]);
2453 free_session_data(s);
2454 rte_mempool_put(sess_mp, (void *)s);
2457 /** Clear the memory of session so it doesn't leave key material behind */
2459 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2460 struct rte_cryptodev_sym_session *sess)
2462 PMD_INIT_FUNC_TRACE();
2463 uint8_t index = dev->driver_id;
2464 void *sess_priv = get_sym_session_private_data(sess, index);
2465 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2468 free_session_memory(dev, s);
2469 set_sym_session_private_data(sess, index, NULL);
2473 #ifdef RTE_LIBRTE_SECURITY
2475 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2476 struct rte_security_ipsec_xform *ipsec_xform,
2477 dpaa_sec_session *session)
2479 PMD_INIT_FUNC_TRACE();
2481 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2482 RTE_CACHE_LINE_SIZE);
2483 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2484 DPAA_SEC_ERR("No Memory for aead key");
2487 memcpy(session->aead_key.data, aead_xform->key.data,
2488 aead_xform->key.length);
2490 session->digest_length = aead_xform->digest_length;
2491 session->aead_key.length = aead_xform->key.length;
2493 switch (aead_xform->algo) {
2494 case RTE_CRYPTO_AEAD_AES_GCM:
2495 switch (session->digest_length) {
2497 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2500 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2503 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2506 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2507 session->digest_length);
2510 if (session->dir == DIR_ENC) {
2511 memcpy(session->encap_pdb.gcm.salt,
2512 (uint8_t *)&(ipsec_xform->salt), 4);
2514 memcpy(session->decap_pdb.gcm.salt,
2515 (uint8_t *)&(ipsec_xform->salt), 4);
2517 session->aead_key.algmode = OP_ALG_AAI_GCM;
2518 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2521 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2529 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2530 struct rte_crypto_auth_xform *auth_xform,
2531 struct rte_security_ipsec_xform *ipsec_xform,
2532 dpaa_sec_session *session)
2535 session->cipher_key.data = rte_zmalloc(NULL,
2536 cipher_xform->key.length,
2537 RTE_CACHE_LINE_SIZE);
2538 if (session->cipher_key.data == NULL &&
2539 cipher_xform->key.length > 0) {
2540 DPAA_SEC_ERR("No Memory for cipher key");
2544 session->cipher_key.length = cipher_xform->key.length;
2545 memcpy(session->cipher_key.data, cipher_xform->key.data,
2546 cipher_xform->key.length);
2547 session->cipher_alg = cipher_xform->algo;
2549 session->cipher_key.data = NULL;
2550 session->cipher_key.length = 0;
2551 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2555 session->auth_key.data = rte_zmalloc(NULL,
2556 auth_xform->key.length,
2557 RTE_CACHE_LINE_SIZE);
2558 if (session->auth_key.data == NULL &&
2559 auth_xform->key.length > 0) {
2560 DPAA_SEC_ERR("No Memory for auth key");
2563 session->auth_key.length = auth_xform->key.length;
2564 memcpy(session->auth_key.data, auth_xform->key.data,
2565 auth_xform->key.length);
2566 session->auth_alg = auth_xform->algo;
2567 session->digest_length = auth_xform->digest_length;
2569 session->auth_key.data = NULL;
2570 session->auth_key.length = 0;
2571 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2574 switch (session->auth_alg) {
2575 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2576 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2577 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2579 case RTE_CRYPTO_AUTH_MD5_HMAC:
2580 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2581 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2583 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2584 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2585 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2586 if (session->digest_length != 16)
2588 "+++Using sha256-hmac truncated len is non-standard,"
2589 "it will not work with lookaside proto");
2591 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2592 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2593 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2595 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2596 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2597 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2599 case RTE_CRYPTO_AUTH_AES_CMAC:
2600 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2602 case RTE_CRYPTO_AUTH_NULL:
2603 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2605 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2606 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2607 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2608 case RTE_CRYPTO_AUTH_SHA1:
2609 case RTE_CRYPTO_AUTH_SHA256:
2610 case RTE_CRYPTO_AUTH_SHA512:
2611 case RTE_CRYPTO_AUTH_SHA224:
2612 case RTE_CRYPTO_AUTH_SHA384:
2613 case RTE_CRYPTO_AUTH_MD5:
2614 case RTE_CRYPTO_AUTH_AES_GMAC:
2615 case RTE_CRYPTO_AUTH_KASUMI_F9:
2616 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2617 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2618 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2622 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2627 switch (session->cipher_alg) {
2628 case RTE_CRYPTO_CIPHER_AES_CBC:
2629 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2630 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2632 case RTE_CRYPTO_CIPHER_3DES_CBC:
2633 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2634 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2636 case RTE_CRYPTO_CIPHER_AES_CTR:
2637 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2638 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2639 if (session->dir == DIR_ENC) {
2640 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2641 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2643 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2644 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2647 case RTE_CRYPTO_CIPHER_NULL:
2648 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2650 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2651 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2652 case RTE_CRYPTO_CIPHER_3DES_ECB:
2653 case RTE_CRYPTO_CIPHER_AES_ECB:
2654 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2655 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2656 session->cipher_alg);
2659 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2660 session->cipher_alg);
2668 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2669 struct rte_security_session_conf *conf,
2672 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2673 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2674 struct rte_crypto_auth_xform *auth_xform = NULL;
2675 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2676 struct rte_crypto_aead_xform *aead_xform = NULL;
2677 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2681 PMD_INIT_FUNC_TRACE();
2683 memset(session, 0, sizeof(dpaa_sec_session));
2684 session->proto_alg = conf->protocol;
2685 session->ctxt = DPAA_SEC_IPSEC;
2687 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2688 session->dir = DIR_ENC;
2690 session->dir = DIR_DEC;
2692 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2693 cipher_xform = &conf->crypto_xform->cipher;
2694 if (conf->crypto_xform->next)
2695 auth_xform = &conf->crypto_xform->next->auth;
2696 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2697 ipsec_xform, session);
2698 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2699 auth_xform = &conf->crypto_xform->auth;
2700 if (conf->crypto_xform->next)
2701 cipher_xform = &conf->crypto_xform->next->cipher;
2702 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2703 ipsec_xform, session);
2704 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2705 aead_xform = &conf->crypto_xform->aead;
2706 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2707 ipsec_xform, session);
2709 DPAA_SEC_ERR("XFORM not specified");
2714 DPAA_SEC_ERR("Failed to process xform");
2718 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2719 if (ipsec_xform->tunnel.type ==
2720 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2721 session->ip4_hdr.ip_v = IPVERSION;
2722 session->ip4_hdr.ip_hl = 5;
2723 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2724 sizeof(session->ip4_hdr));
2725 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2726 session->ip4_hdr.ip_id = 0;
2727 session->ip4_hdr.ip_off = 0;
2728 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2729 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2730 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2731 IPPROTO_ESP : IPPROTO_AH;
2732 session->ip4_hdr.ip_sum = 0;
2733 session->ip4_hdr.ip_src =
2734 ipsec_xform->tunnel.ipv4.src_ip;
2735 session->ip4_hdr.ip_dst =
2736 ipsec_xform->tunnel.ipv4.dst_ip;
2737 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2738 (void *)&session->ip4_hdr,
2740 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2741 } else if (ipsec_xform->tunnel.type ==
2742 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2743 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2744 DPAA_IPv6_DEFAULT_VTC_FLOW |
2745 ((ipsec_xform->tunnel.ipv6.dscp <<
2746 RTE_IPV6_HDR_TC_SHIFT) &
2747 RTE_IPV6_HDR_TC_MASK) |
2748 ((ipsec_xform->tunnel.ipv6.flabel <<
2749 RTE_IPV6_HDR_FL_SHIFT) &
2750 RTE_IPV6_HDR_FL_MASK));
2751 /* Payload length will be updated by HW */
2752 session->ip6_hdr.payload_len = 0;
2753 session->ip6_hdr.hop_limits =
2754 ipsec_xform->tunnel.ipv6.hlimit;
2755 session->ip6_hdr.proto = (ipsec_xform->proto ==
2756 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2757 IPPROTO_ESP : IPPROTO_AH;
2758 memcpy(&session->ip6_hdr.src_addr,
2759 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2760 memcpy(&session->ip6_hdr.dst_addr,
2761 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2762 session->encap_pdb.ip_hdr_len =
2763 sizeof(struct rte_ipv6_hdr);
2765 session->encap_pdb.options =
2766 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2767 PDBOPTS_ESP_OIHI_PDB_INL |
2769 PDBHMO_ESP_ENCAP_DTTL |
2771 if (ipsec_xform->options.esn)
2772 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2773 session->encap_pdb.spi = ipsec_xform->spi;
2775 } else if (ipsec_xform->direction ==
2776 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2777 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2778 session->decap_pdb.options = sizeof(struct ip) << 16;
2780 session->decap_pdb.options =
2781 sizeof(struct rte_ipv6_hdr) << 16;
2782 if (ipsec_xform->options.esn)
2783 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2784 if (ipsec_xform->replay_win_sz) {
2786 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2795 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2798 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2801 session->decap_pdb.options |=
2807 rte_spinlock_lock(&internals->lock);
2808 for (i = 0; i < MAX_DPAA_CORES; i++) {
2809 session->inq[i] = dpaa_sec_attach_rxq(internals);
2810 if (session->inq[i] == NULL) {
2811 DPAA_SEC_ERR("unable to attach sec queue");
2812 rte_spinlock_unlock(&internals->lock);
2816 rte_spinlock_unlock(&internals->lock);
2820 free_session_data(session);
2825 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2826 struct rte_security_session_conf *conf,
2829 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2830 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2831 struct rte_crypto_auth_xform *auth_xform = NULL;
2832 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2833 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2834 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2838 PMD_INIT_FUNC_TRACE();
2840 memset(session, 0, sizeof(dpaa_sec_session));
2842 /* find xfrm types */
2843 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2844 cipher_xform = &xform->cipher;
2845 if (xform->next != NULL)
2846 auth_xform = &xform->next->auth;
2847 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2848 auth_xform = &xform->auth;
2849 if (xform->next != NULL)
2850 cipher_xform = &xform->next->cipher;
2852 DPAA_SEC_ERR("Invalid crypto type");
2856 session->proto_alg = conf->protocol;
2857 session->ctxt = DPAA_SEC_PDCP;
2860 switch (cipher_xform->algo) {
2861 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2862 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2864 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2865 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2867 case RTE_CRYPTO_CIPHER_AES_CTR:
2868 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2870 case RTE_CRYPTO_CIPHER_NULL:
2871 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2874 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2875 session->cipher_alg);
2879 session->cipher_key.data = rte_zmalloc(NULL,
2880 cipher_xform->key.length,
2881 RTE_CACHE_LINE_SIZE);
2882 if (session->cipher_key.data == NULL &&
2883 cipher_xform->key.length > 0) {
2884 DPAA_SEC_ERR("No Memory for cipher key");
2887 session->cipher_key.length = cipher_xform->key.length;
2888 memcpy(session->cipher_key.data, cipher_xform->key.data,
2889 cipher_xform->key.length);
2890 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2892 session->cipher_alg = cipher_xform->algo;
2894 session->cipher_key.data = NULL;
2895 session->cipher_key.length = 0;
2896 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2897 session->dir = DIR_ENC;
2900 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2901 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2902 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2904 "PDCP Seq Num size should be 5/12 bits for cmode");
2911 switch (auth_xform->algo) {
2912 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2913 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2915 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2916 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2918 case RTE_CRYPTO_AUTH_AES_CMAC:
2919 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2921 case RTE_CRYPTO_AUTH_NULL:
2922 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2925 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2927 rte_free(session->cipher_key.data);
2930 session->auth_key.data = rte_zmalloc(NULL,
2931 auth_xform->key.length,
2932 RTE_CACHE_LINE_SIZE);
2933 if (!session->auth_key.data &&
2934 auth_xform->key.length > 0) {
2935 DPAA_SEC_ERR("No Memory for auth key");
2936 rte_free(session->cipher_key.data);
2939 session->auth_key.length = auth_xform->key.length;
2940 memcpy(session->auth_key.data, auth_xform->key.data,
2941 auth_xform->key.length);
2942 session->auth_alg = auth_xform->algo;
2944 session->auth_key.data = NULL;
2945 session->auth_key.length = 0;
2946 session->auth_alg = 0;
2948 session->pdcp.domain = pdcp_xform->domain;
2949 session->pdcp.bearer = pdcp_xform->bearer;
2950 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2951 session->pdcp.sn_size = pdcp_xform->sn_size;
2952 session->pdcp.hfn = pdcp_xform->hfn;
2953 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2954 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2955 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2957 rte_spinlock_lock(&dev_priv->lock);
2958 for (i = 0; i < MAX_DPAA_CORES; i++) {
2959 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2960 if (session->inq[i] == NULL) {
2961 DPAA_SEC_ERR("unable to attach sec queue");
2962 rte_spinlock_unlock(&dev_priv->lock);
2967 rte_spinlock_unlock(&dev_priv->lock);
2970 rte_free(session->auth_key.data);
2971 rte_free(session->cipher_key.data);
2972 memset(session, 0, sizeof(dpaa_sec_session));
2977 dpaa_sec_security_session_create(void *dev,
2978 struct rte_security_session_conf *conf,
2979 struct rte_security_session *sess,
2980 struct rte_mempool *mempool)
2982 void *sess_private_data;
2983 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2986 if (rte_mempool_get(mempool, &sess_private_data)) {
2987 DPAA_SEC_ERR("Couldn't get object from session mempool");
2991 switch (conf->protocol) {
2992 case RTE_SECURITY_PROTOCOL_IPSEC:
2993 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2996 case RTE_SECURITY_PROTOCOL_PDCP:
2997 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3000 case RTE_SECURITY_PROTOCOL_MACSEC:
3006 DPAA_SEC_ERR("failed to configure session parameters");
3007 /* Return session to mempool */
3008 rte_mempool_put(mempool, sess_private_data);
3012 set_sec_session_private_data(sess, sess_private_data);
3017 /** Clear the memory of session so it doesn't leave key material behind */
3019 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3020 struct rte_security_session *sess)
3022 PMD_INIT_FUNC_TRACE();
3023 void *sess_priv = get_sec_session_private_data(sess);
3024 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3027 free_session_memory((struct rte_cryptodev *)dev, s);
3028 set_sec_session_private_data(sess, NULL);
3034 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3035 struct rte_cryptodev_config *config __rte_unused)
3037 PMD_INIT_FUNC_TRACE();
3043 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3045 PMD_INIT_FUNC_TRACE();
3050 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3052 PMD_INIT_FUNC_TRACE();
3056 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3058 PMD_INIT_FUNC_TRACE();
3067 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3068 struct rte_cryptodev_info *info)
3070 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3072 PMD_INIT_FUNC_TRACE();
3074 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3075 info->feature_flags = dev->feature_flags;
3076 info->capabilities = dpaa_sec_capabilities;
3077 info->sym.max_nb_sessions = internals->max_nb_sessions;
3078 info->driver_id = cryptodev_driver_id;
3082 static enum qman_cb_dqrr_result
3083 dpaa_sec_process_parallel_event(void *event,
3084 struct qman_portal *qm __always_unused,
3085 struct qman_fq *outq,
3086 const struct qm_dqrr_entry *dqrr,
3089 const struct qm_fd *fd;
3090 struct dpaa_sec_job *job;
3091 struct dpaa_sec_op_ctx *ctx;
3092 struct rte_event *ev = (struct rte_event *)event;
3096 /* sg is embedded in an op ctx,
3097 * sg[0] is for output
3100 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3102 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3103 ctx->fd_status = fd->status;
3104 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3105 struct qm_sg_entry *sg_out;
3108 sg_out = &job->sg[0];
3109 hw_sg_to_cpu(sg_out);
3110 len = sg_out->length;
3111 ctx->op->sym->m_src->pkt_len = len;
3112 ctx->op->sym->m_src->data_len = len;
3114 if (!ctx->fd_status) {
3115 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3117 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3118 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3120 ev->event_ptr = (void *)ctx->op;
3122 ev->flow_id = outq->ev.flow_id;
3123 ev->sub_event_type = outq->ev.sub_event_type;
3124 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3125 ev->op = RTE_EVENT_OP_NEW;
3126 ev->sched_type = outq->ev.sched_type;
3127 ev->queue_id = outq->ev.queue_id;
3128 ev->priority = outq->ev.priority;
3129 *bufs = (void *)ctx->op;
3131 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3133 return qman_cb_dqrr_consume;
3136 static enum qman_cb_dqrr_result
3137 dpaa_sec_process_atomic_event(void *event,
3138 struct qman_portal *qm __rte_unused,
3139 struct qman_fq *outq,
3140 const struct qm_dqrr_entry *dqrr,
3144 const struct qm_fd *fd;
3145 struct dpaa_sec_job *job;
3146 struct dpaa_sec_op_ctx *ctx;
3147 struct rte_event *ev = (struct rte_event *)event;
3151 /* sg is embedded in an op ctx,
3152 * sg[0] is for output
3155 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3157 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3158 ctx->fd_status = fd->status;
3159 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3160 struct qm_sg_entry *sg_out;
3163 sg_out = &job->sg[0];
3164 hw_sg_to_cpu(sg_out);
3165 len = sg_out->length;
3166 ctx->op->sym->m_src->pkt_len = len;
3167 ctx->op->sym->m_src->data_len = len;
3169 if (!ctx->fd_status) {
3170 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3172 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3173 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3175 ev->event_ptr = (void *)ctx->op;
3176 ev->flow_id = outq->ev.flow_id;
3177 ev->sub_event_type = outq->ev.sub_event_type;
3178 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3179 ev->op = RTE_EVENT_OP_NEW;
3180 ev->sched_type = outq->ev.sched_type;
3181 ev->queue_id = outq->ev.queue_id;
3182 ev->priority = outq->ev.priority;
3184 /* Save active dqrr entries */
3185 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3186 DPAA_PER_LCORE_DQRR_SIZE++;
3187 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3188 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3189 ev->impl_opaque = index + 1;
3190 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3191 *bufs = (void *)ctx->op;
3193 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3195 return qman_cb_dqrr_defer;
3199 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3202 const struct rte_event *event)
3204 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3205 struct qm_mcc_initfq opts = {0};
3209 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3210 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3211 opts.fqd.dest.channel = ch_id;
3213 switch (event->sched_type) {
3214 case RTE_SCHED_TYPE_ATOMIC:
3215 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3216 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3217 * configuration with HOLD_ACTIVE setting
3219 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3220 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3222 case RTE_SCHED_TYPE_ORDERED:
3223 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3226 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3227 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3231 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3232 if (unlikely(ret)) {
3233 DPAA_SEC_ERR("unable to init caam source fq!");
3237 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3243 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3246 struct qm_mcc_initfq opts = {0};
3248 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3250 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3251 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3252 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3253 qp->outq.cb.ern = ern_sec_fq_handler;
3254 qman_retire_fq(&qp->outq, NULL);
3255 qman_oos_fq(&qp->outq);
3256 ret = qman_init_fq(&qp->outq, 0, &opts);
3258 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3259 qp->outq.cb.dqrr = NULL;
3264 static struct rte_cryptodev_ops crypto_ops = {
3265 .dev_configure = dpaa_sec_dev_configure,
3266 .dev_start = dpaa_sec_dev_start,
3267 .dev_stop = dpaa_sec_dev_stop,
3268 .dev_close = dpaa_sec_dev_close,
3269 .dev_infos_get = dpaa_sec_dev_infos_get,
3270 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3271 .queue_pair_release = dpaa_sec_queue_pair_release,
3272 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3273 .sym_session_configure = dpaa_sec_sym_session_configure,
3274 .sym_session_clear = dpaa_sec_sym_session_clear
3277 #ifdef RTE_LIBRTE_SECURITY
3278 static const struct rte_security_capability *
3279 dpaa_sec_capabilities_get(void *device __rte_unused)
3281 return dpaa_sec_security_cap;
3284 static const struct rte_security_ops dpaa_sec_security_ops = {
3285 .session_create = dpaa_sec_security_session_create,
3286 .session_update = NULL,
3287 .session_stats_get = NULL,
3288 .session_destroy = dpaa_sec_security_session_destroy,
3289 .set_pkt_metadata = NULL,
3290 .capabilities_get = dpaa_sec_capabilities_get
3294 dpaa_sec_uninit(struct rte_cryptodev *dev)
3296 struct dpaa_sec_dev_private *internals;
3301 internals = dev->data->dev_private;
3302 rte_free(dev->security_ctx);
3304 rte_free(internals);
3306 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3307 dev->data->name, rte_socket_id());
3313 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3315 struct dpaa_sec_dev_private *internals;
3316 #ifdef RTE_LIBRTE_SECURITY
3317 struct rte_security_ctx *security_instance;
3319 struct dpaa_sec_qp *qp;
3323 PMD_INIT_FUNC_TRACE();
3325 cryptodev->driver_id = cryptodev_driver_id;
3326 cryptodev->dev_ops = &crypto_ops;
3328 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3329 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3330 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3331 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3332 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3333 RTE_CRYPTODEV_FF_SECURITY |
3334 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3335 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3336 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3337 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3338 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3340 internals = cryptodev->data->dev_private;
3341 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3342 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3345 * For secondary processes, we don't initialise any further as primary
3346 * has already done this work. Only check we don't need a different
3349 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3350 DPAA_SEC_WARN("Device already init by primary process");
3353 #ifdef RTE_LIBRTE_SECURITY
3354 /* Initialize security_ctx only for primary process*/
3355 security_instance = rte_malloc("rte_security_instances_ops",
3356 sizeof(struct rte_security_ctx), 0);
3357 if (security_instance == NULL)
3359 security_instance->device = (void *)cryptodev;
3360 security_instance->ops = &dpaa_sec_security_ops;
3361 security_instance->sess_cnt = 0;
3362 cryptodev->security_ctx = security_instance;
3364 rte_spinlock_init(&internals->lock);
3365 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3366 /* init qman fq for queue pair */
3367 qp = &internals->qps[i];
3368 ret = dpaa_sec_init_tx(&qp->outq);
3370 DPAA_SEC_ERR("config tx of queue pair %d", i);
3375 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3376 QMAN_FQ_FLAG_TO_DCPORTAL;
3377 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3378 /* create rx qman fq for sessions*/
3379 ret = qman_create_fq(0, flags, &internals->inq[i]);
3380 if (unlikely(ret != 0)) {
3381 DPAA_SEC_ERR("sec qman_create_fq failed");
3386 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3390 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3392 rte_free(cryptodev->security_ctx);
3397 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3398 struct rte_dpaa_device *dpaa_dev)
3400 struct rte_cryptodev *cryptodev;
3401 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3405 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3407 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3408 if (cryptodev == NULL)
3411 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3412 cryptodev->data->dev_private = rte_zmalloc_socket(
3413 "cryptodev private structure",
3414 sizeof(struct dpaa_sec_dev_private),
3415 RTE_CACHE_LINE_SIZE,
3418 if (cryptodev->data->dev_private == NULL)
3419 rte_panic("Cannot allocate memzone for private "
3423 dpaa_dev->crypto_dev = cryptodev;
3424 cryptodev->device = &dpaa_dev->device;
3426 /* init user callbacks */
3427 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3429 /* if sec device version is not configured */
3430 if (!rta_get_sec_era()) {
3431 const struct device_node *caam_node;
3433 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3434 const uint32_t *prop = of_get_property(caam_node,
3439 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3445 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3446 retval = rte_dpaa_portal_init((void *)1);
3448 DPAA_SEC_ERR("Unable to initialize portal");
3453 /* Invoke PMD device initialization function */
3454 retval = dpaa_sec_dev_init(cryptodev);
3460 /* In case of error, cleanup is done */
3461 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3462 rte_free(cryptodev->data->dev_private);
3464 rte_cryptodev_pmd_release_device(cryptodev);
3470 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3472 struct rte_cryptodev *cryptodev;
3475 cryptodev = dpaa_dev->crypto_dev;
3476 if (cryptodev == NULL)
3479 ret = dpaa_sec_uninit(cryptodev);
3483 return rte_cryptodev_pmd_destroy(cryptodev);
3486 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3487 .drv_type = FSL_DPAA_CRYPTO,
3489 .name = "DPAA SEC PMD"
3491 .probe = cryptodev_dpaa_sec_probe,
3492 .remove = cryptodev_dpaa_sec_remove,
3495 static struct cryptodev_driver dpaa_sec_crypto_drv;
3497 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3498 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3499 cryptodev_driver_id);
3500 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);