1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (rta_inline_pdcp_query(authdata.algtype,
269 ses->pdcp.hfn_ovd)) {
271 (size_t)rte_dpaa_mem_vtop((void *)
272 (size_t)cipherdata.key);
273 cipherdata.key_type = RTA_DATA_PTR;
276 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277 if (ses->dir == DIR_ENC)
278 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279 cdb->sh_desc, 1, swap,
284 ses->pdcp.hfn_threshold,
285 &cipherdata, &authdata,
287 else if (ses->dir == DIR_DEC)
288 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289 cdb->sh_desc, 1, swap,
294 ses->pdcp.hfn_threshold,
295 &cipherdata, &authdata,
298 if (ses->dir == DIR_ENC) {
299 if (ses->pdcp.sdap_enabled)
301 cnstr_shdsc_pdcp_sdap_u_plane_encap(
302 cdb->sh_desc, 1, swap,
307 ses->pdcp.hfn_threshold,
308 &cipherdata, p_authdata, 0);
311 cnstr_shdsc_pdcp_u_plane_encap(
312 cdb->sh_desc, 1, swap,
317 ses->pdcp.hfn_threshold,
318 &cipherdata, p_authdata, 0);
319 } else if (ses->dir == DIR_DEC) {
320 if (ses->pdcp.sdap_enabled)
322 cnstr_shdsc_pdcp_sdap_u_plane_decap(
323 cdb->sh_desc, 1, swap,
328 ses->pdcp.hfn_threshold,
329 &cipherdata, p_authdata, 0);
332 cnstr_shdsc_pdcp_u_plane_decap(
333 cdb->sh_desc, 1, swap,
338 ses->pdcp.hfn_threshold,
339 &cipherdata, p_authdata, 0);
342 return shared_desc_len;
345 /* prepare ipsec proto command block of the session */
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
349 struct alginfo cipherdata = {0}, authdata = {0};
350 struct sec_cdb *cdb = &ses->cdb;
351 int32_t shared_desc_len = 0;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
359 cipherdata.key = (size_t)ses->cipher_key.data;
360 cipherdata.keylen = ses->cipher_key.length;
361 cipherdata.key_enc_flags = 0;
362 cipherdata.key_type = RTA_DATA_IMM;
363 cipherdata.algtype = ses->cipher_key.alg;
364 cipherdata.algmode = ses->cipher_key.algmode;
366 if (ses->auth_key.length) {
367 authdata.key = (size_t)ses->auth_key.data;
368 authdata.keylen = ses->auth_key.length;
369 authdata.key_enc_flags = 0;
370 authdata.key_type = RTA_DATA_IMM;
371 authdata.algtype = ses->auth_key.alg;
372 authdata.algmode = ses->auth_key.algmode;
375 cdb->sh_desc[0] = cipherdata.keylen;
376 cdb->sh_desc[1] = authdata.keylen;
377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
379 (unsigned int *)cdb->sh_desc,
380 &cdb->sh_desc[2], 2);
383 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
386 if (cdb->sh_desc[2] & 1)
387 cipherdata.key_type = RTA_DATA_IMM;
389 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390 (void *)(size_t)cipherdata.key);
391 cipherdata.key_type = RTA_DATA_PTR;
393 if (cdb->sh_desc[2] & (1<<1))
394 authdata.key_type = RTA_DATA_IMM;
396 authdata.key = (size_t)rte_dpaa_mem_vtop(
397 (void *)(size_t)authdata.key);
398 authdata.key_type = RTA_DATA_PTR;
404 if (ses->dir == DIR_ENC) {
405 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
407 true, swap, SHR_SERIAL,
409 (uint8_t *)&ses->ip4_hdr,
410 &cipherdata, &authdata);
411 } else if (ses->dir == DIR_DEC) {
412 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
414 true, swap, SHR_SERIAL,
416 &cipherdata, &authdata);
418 return shared_desc_len;
421 /* prepare command block of the session */
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
425 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426 int32_t shared_desc_len = 0;
427 struct sec_cdb *cdb = &ses->cdb;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
435 memset(cdb, 0, sizeof(struct sec_cdb));
438 #ifdef RTE_LIB_SECURITY
440 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
443 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
446 case DPAA_SEC_CIPHER:
447 alginfo_c.key = (size_t)ses->cipher_key.data;
448 alginfo_c.keylen = ses->cipher_key.length;
449 alginfo_c.key_enc_flags = 0;
450 alginfo_c.key_type = RTA_DATA_IMM;
451 alginfo_c.algtype = ses->cipher_key.alg;
452 alginfo_c.algmode = ses->cipher_key.algmode;
454 switch (ses->cipher_alg) {
455 case RTE_CRYPTO_CIPHER_AES_CBC:
456 case RTE_CRYPTO_CIPHER_3DES_CBC:
457 case RTE_CRYPTO_CIPHER_DES_CBC:
458 case RTE_CRYPTO_CIPHER_AES_CTR:
459 case RTE_CRYPTO_CIPHER_3DES_CTR:
460 shared_desc_len = cnstr_shdsc_blkcipher(
462 swap, SHR_NEVER, &alginfo_c,
466 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
467 shared_desc_len = cnstr_shdsc_snow_f8(
468 cdb->sh_desc, true, swap,
472 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
473 shared_desc_len = cnstr_shdsc_zuce(
474 cdb->sh_desc, true, swap,
479 DPAA_SEC_ERR("unsupported cipher alg %d",
485 alginfo_a.key = (size_t)ses->auth_key.data;
486 alginfo_a.keylen = ses->auth_key.length;
487 alginfo_a.key_enc_flags = 0;
488 alginfo_a.key_type = RTA_DATA_IMM;
489 alginfo_a.algtype = ses->auth_key.alg;
490 alginfo_a.algmode = ses->auth_key.algmode;
491 switch (ses->auth_alg) {
492 case RTE_CRYPTO_AUTH_MD5:
493 case RTE_CRYPTO_AUTH_SHA1:
494 case RTE_CRYPTO_AUTH_SHA224:
495 case RTE_CRYPTO_AUTH_SHA256:
496 case RTE_CRYPTO_AUTH_SHA384:
497 case RTE_CRYPTO_AUTH_SHA512:
498 shared_desc_len = cnstr_shdsc_hash(
500 swap, SHR_NEVER, &alginfo_a,
504 case RTE_CRYPTO_AUTH_MD5_HMAC:
505 case RTE_CRYPTO_AUTH_SHA1_HMAC:
506 case RTE_CRYPTO_AUTH_SHA224_HMAC:
507 case RTE_CRYPTO_AUTH_SHA256_HMAC:
508 case RTE_CRYPTO_AUTH_SHA384_HMAC:
509 case RTE_CRYPTO_AUTH_SHA512_HMAC:
510 shared_desc_len = cnstr_shdsc_hmac(
512 swap, SHR_NEVER, &alginfo_a,
516 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
517 shared_desc_len = cnstr_shdsc_snow_f9(
518 cdb->sh_desc, true, swap,
523 case RTE_CRYPTO_AUTH_ZUC_EIA3:
524 shared_desc_len = cnstr_shdsc_zuca(
525 cdb->sh_desc, true, swap,
531 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
535 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
536 DPAA_SEC_ERR("not supported aead alg");
539 alginfo.key = (size_t)ses->aead_key.data;
540 alginfo.keylen = ses->aead_key.length;
541 alginfo.key_enc_flags = 0;
542 alginfo.key_type = RTA_DATA_IMM;
543 alginfo.algtype = ses->aead_key.alg;
544 alginfo.algmode = ses->aead_key.algmode;
546 if (ses->dir == DIR_ENC)
547 shared_desc_len = cnstr_shdsc_gcm_encap(
548 cdb->sh_desc, true, swap, SHR_NEVER,
553 shared_desc_len = cnstr_shdsc_gcm_decap(
554 cdb->sh_desc, true, swap, SHR_NEVER,
559 case DPAA_SEC_CIPHER_HASH:
560 alginfo_c.key = (size_t)ses->cipher_key.data;
561 alginfo_c.keylen = ses->cipher_key.length;
562 alginfo_c.key_enc_flags = 0;
563 alginfo_c.key_type = RTA_DATA_IMM;
564 alginfo_c.algtype = ses->cipher_key.alg;
565 alginfo_c.algmode = ses->cipher_key.algmode;
567 alginfo_a.key = (size_t)ses->auth_key.data;
568 alginfo_a.keylen = ses->auth_key.length;
569 alginfo_a.key_enc_flags = 0;
570 alginfo_a.key_type = RTA_DATA_IMM;
571 alginfo_a.algtype = ses->auth_key.alg;
572 alginfo_a.algmode = ses->auth_key.algmode;
574 cdb->sh_desc[0] = alginfo_c.keylen;
575 cdb->sh_desc[1] = alginfo_a.keylen;
576 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
578 (unsigned int *)cdb->sh_desc,
579 &cdb->sh_desc[2], 2);
582 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
585 if (cdb->sh_desc[2] & 1)
586 alginfo_c.key_type = RTA_DATA_IMM;
588 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
589 (void *)(size_t)alginfo_c.key);
590 alginfo_c.key_type = RTA_DATA_PTR;
592 if (cdb->sh_desc[2] & (1<<1))
593 alginfo_a.key_type = RTA_DATA_IMM;
595 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
596 (void *)(size_t)alginfo_a.key);
597 alginfo_a.key_type = RTA_DATA_PTR;
602 /* Auth_only_len is set as 0 here and it will be
603 * overwritten in fd for each packet.
605 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
606 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
608 ses->digest_length, ses->dir);
610 case DPAA_SEC_HASH_CIPHER:
612 DPAA_SEC_ERR("error: Unsupported session");
616 if (shared_desc_len < 0) {
617 DPAA_SEC_ERR("error in preparing command block");
618 return shared_desc_len;
621 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
622 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
623 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
628 /* qp is lockless, should be accessed by only one thread */
630 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
633 unsigned int pkts = 0;
634 int num_rx_bufs, ret;
635 struct qm_dqrr_entry *dq;
636 uint32_t vdqcr_flags = 0;
640 * Until request for four buffers, we provide exact number of buffers.
641 * Otherwise we do not set the QM_VDQCR_EXACT flag.
642 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
643 * requested, so we request two less in this case.
646 vdqcr_flags = QM_VDQCR_EXACT;
647 num_rx_bufs = nb_ops;
649 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
650 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
652 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
657 const struct qm_fd *fd;
658 struct dpaa_sec_job *job;
659 struct dpaa_sec_op_ctx *ctx;
660 struct rte_crypto_op *op;
662 dq = qman_dequeue(fq);
667 /* sg is embedded in an op ctx,
668 * sg[0] is for output
671 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
673 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
674 ctx->fd_status = fd->status;
676 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
677 struct qm_sg_entry *sg_out;
679 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
680 op->sym->m_src : op->sym->m_dst;
682 sg_out = &job->sg[0];
683 hw_sg_to_cpu(sg_out);
684 len = sg_out->length;
686 while (mbuf->next != NULL) {
687 len -= mbuf->data_len;
690 mbuf->data_len = len;
692 if (!ctx->fd_status) {
693 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
695 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
696 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
700 /* report op status to sym->op and then free the ctx memeory */
701 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
703 qman_dqrr_consume(fq, dq);
704 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
709 static inline struct dpaa_sec_job *
710 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
712 struct rte_crypto_sym_op *sym = op->sym;
713 struct rte_mbuf *mbuf = sym->m_src;
714 struct dpaa_sec_job *cf;
715 struct dpaa_sec_op_ctx *ctx;
716 struct qm_sg_entry *sg, *out_sg, *in_sg;
717 phys_addr_t start_addr;
718 uint8_t *old_digest, extra_segs;
719 int data_len, data_offset;
721 data_len = sym->auth.data.length;
722 data_offset = sym->auth.data.offset;
724 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
725 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
726 if ((data_len & 7) || (data_offset & 7)) {
727 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
731 data_len = data_len >> 3;
732 data_offset = data_offset >> 3;
740 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
741 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
745 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
751 old_digest = ctx->digest;
755 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
756 out_sg->length = ses->digest_length;
757 cpu_to_hw_sg(out_sg);
761 /* need to extend the input to a compound frame */
762 in_sg->extension = 1;
764 in_sg->length = data_len;
765 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
770 if (ses->iv.length) {
773 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
776 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
777 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
779 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
780 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
783 sg->length = ses->iv.length;
785 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
786 in_sg->length += sg->length;
791 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
792 sg->offset = data_offset;
794 if (data_len <= (mbuf->data_len - data_offset)) {
795 sg->length = data_len;
797 sg->length = mbuf->data_len - data_offset;
799 /* remaining i/p segs */
800 while ((data_len = data_len - sg->length) &&
801 (mbuf = mbuf->next)) {
804 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
805 if (data_len > mbuf->data_len)
806 sg->length = mbuf->data_len;
808 sg->length = data_len;
812 if (is_decode(ses)) {
813 /* Digest verification case */
816 rte_memcpy(old_digest, sym->auth.digest.data,
818 start_addr = rte_dpaa_mem_vtop(old_digest);
819 qm_sg_entry_set64(sg, start_addr);
820 sg->length = ses->digest_length;
821 in_sg->length += ses->digest_length;
832 * |<----data_len------->|
833 * |ip_header|ah_header|icv|payload|
838 static inline struct dpaa_sec_job *
839 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
841 struct rte_crypto_sym_op *sym = op->sym;
842 struct rte_mbuf *mbuf = sym->m_src;
843 struct dpaa_sec_job *cf;
844 struct dpaa_sec_op_ctx *ctx;
845 struct qm_sg_entry *sg, *in_sg;
846 rte_iova_t start_addr;
848 int data_len, data_offset;
850 data_len = sym->auth.data.length;
851 data_offset = sym->auth.data.offset;
853 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
854 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
855 if ((data_len & 7) || (data_offset & 7)) {
856 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
860 data_len = data_len >> 3;
861 data_offset = data_offset >> 3;
864 ctx = dpaa_sec_alloc_ctx(ses, 4);
870 old_digest = ctx->digest;
872 start_addr = rte_pktmbuf_iova(mbuf);
875 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
876 sg->length = ses->digest_length;
881 /* need to extend the input to a compound frame */
882 in_sg->extension = 1;
884 in_sg->length = data_len;
885 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
888 if (ses->iv.length) {
891 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
894 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
895 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
897 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
898 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
901 sg->length = ses->iv.length;
903 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
904 in_sg->length += sg->length;
909 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
910 sg->offset = data_offset;
911 sg->length = data_len;
913 if (is_decode(ses)) {
914 /* Digest verification case */
916 /* hash result or digest, save digest first */
917 rte_memcpy(old_digest, sym->auth.digest.data,
919 /* let's check digest by hw */
920 start_addr = rte_dpaa_mem_vtop(old_digest);
922 qm_sg_entry_set64(sg, start_addr);
923 sg->length = ses->digest_length;
924 in_sg->length += ses->digest_length;
933 static inline struct dpaa_sec_job *
934 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
936 struct rte_crypto_sym_op *sym = op->sym;
937 struct dpaa_sec_job *cf;
938 struct dpaa_sec_op_ctx *ctx;
939 struct qm_sg_entry *sg, *out_sg, *in_sg;
940 struct rte_mbuf *mbuf;
942 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
944 int data_len, data_offset;
946 data_len = sym->cipher.data.length;
947 data_offset = sym->cipher.data.offset;
949 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
950 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
951 if ((data_len & 7) || (data_offset & 7)) {
952 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
956 data_len = data_len >> 3;
957 data_offset = data_offset >> 3;
962 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
965 req_segs = mbuf->nb_segs * 2 + 3;
967 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
968 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
973 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
982 out_sg->extension = 1;
983 out_sg->length = data_len;
984 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
985 cpu_to_hw_sg(out_sg);
989 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
990 sg->length = mbuf->data_len - data_offset;
991 sg->offset = data_offset;
993 /* Successive segs */
998 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
999 sg->length = mbuf->data_len;
1008 in_sg->extension = 1;
1010 in_sg->length = data_len + ses->iv.length;
1013 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1014 cpu_to_hw_sg(in_sg);
1017 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1018 sg->length = ses->iv.length;
1023 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1024 sg->length = mbuf->data_len - data_offset;
1025 sg->offset = data_offset;
1027 /* Successive segs */
1032 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1033 sg->length = mbuf->data_len;
1042 static inline struct dpaa_sec_job *
1043 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1045 struct rte_crypto_sym_op *sym = op->sym;
1046 struct dpaa_sec_job *cf;
1047 struct dpaa_sec_op_ctx *ctx;
1048 struct qm_sg_entry *sg;
1049 rte_iova_t src_start_addr, dst_start_addr;
1050 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1052 int data_len, data_offset;
1054 data_len = sym->cipher.data.length;
1055 data_offset = sym->cipher.data.offset;
1057 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1058 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1059 if ((data_len & 7) || (data_offset & 7)) {
1060 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1064 data_len = data_len >> 3;
1065 data_offset = data_offset >> 3;
1068 ctx = dpaa_sec_alloc_ctx(ses, 4);
1075 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1078 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1080 dst_start_addr = src_start_addr;
1084 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1085 sg->length = data_len + ses->iv.length;
1091 /* need to extend the input to a compound frame */
1094 sg->length = data_len + ses->iv.length;
1095 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1099 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1100 sg->length = ses->iv.length;
1104 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1105 sg->length = data_len;
1112 static inline struct dpaa_sec_job *
1113 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1115 struct rte_crypto_sym_op *sym = op->sym;
1116 struct dpaa_sec_job *cf;
1117 struct dpaa_sec_op_ctx *ctx;
1118 struct qm_sg_entry *sg, *out_sg, *in_sg;
1119 struct rte_mbuf *mbuf;
1121 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1126 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1129 req_segs = mbuf->nb_segs * 2 + 4;
1132 if (ses->auth_only_len)
1135 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1136 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1141 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1148 rte_prefetch0(cf->sg);
1151 out_sg = &cf->sg[0];
1152 out_sg->extension = 1;
1154 out_sg->length = sym->aead.data.length + ses->digest_length;
1156 out_sg->length = sym->aead.data.length;
1158 /* output sg entries */
1160 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1161 cpu_to_hw_sg(out_sg);
1164 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1165 sg->length = mbuf->data_len - sym->aead.data.offset;
1166 sg->offset = sym->aead.data.offset;
1168 /* Successive segs */
1173 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1174 sg->length = mbuf->data_len;
1177 sg->length -= ses->digest_length;
1179 if (is_encode(ses)) {
1181 /* set auth output */
1183 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1184 sg->length = ses->digest_length;
1192 in_sg->extension = 1;
1195 in_sg->length = ses->iv.length + sym->aead.data.length
1196 + ses->auth_only_len;
1198 in_sg->length = ses->iv.length + sym->aead.data.length
1199 + ses->auth_only_len + ses->digest_length;
1201 /* input sg entries */
1203 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1204 cpu_to_hw_sg(in_sg);
1207 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1208 sg->length = ses->iv.length;
1211 /* 2nd seg auth only */
1212 if (ses->auth_only_len) {
1214 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1215 sg->length = ses->auth_only_len;
1221 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1222 sg->length = mbuf->data_len - sym->aead.data.offset;
1223 sg->offset = sym->aead.data.offset;
1225 /* Successive segs */
1230 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1231 sg->length = mbuf->data_len;
1235 if (is_decode(ses)) {
1238 memcpy(ctx->digest, sym->aead.digest.data,
1239 ses->digest_length);
1240 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1241 sg->length = ses->digest_length;
1249 static inline struct dpaa_sec_job *
1250 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1252 struct rte_crypto_sym_op *sym = op->sym;
1253 struct dpaa_sec_job *cf;
1254 struct dpaa_sec_op_ctx *ctx;
1255 struct qm_sg_entry *sg;
1256 uint32_t length = 0;
1257 rte_iova_t src_start_addr, dst_start_addr;
1258 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1261 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1264 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1266 dst_start_addr = src_start_addr;
1268 ctx = dpaa_sec_alloc_ctx(ses, 7);
1276 rte_prefetch0(cf->sg);
1278 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1279 if (is_encode(ses)) {
1280 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1281 sg->length = ses->iv.length;
1282 length += sg->length;
1286 if (ses->auth_only_len) {
1287 qm_sg_entry_set64(sg,
1288 rte_dpaa_mem_vtop(sym->aead.aad.data));
1289 sg->length = ses->auth_only_len;
1290 length += sg->length;
1294 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1295 sg->length = sym->aead.data.length;
1296 length += sg->length;
1300 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1301 sg->length = ses->iv.length;
1302 length += sg->length;
1306 if (ses->auth_only_len) {
1307 qm_sg_entry_set64(sg,
1308 rte_dpaa_mem_vtop(sym->aead.aad.data));
1309 sg->length = ses->auth_only_len;
1310 length += sg->length;
1314 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1315 sg->length = sym->aead.data.length;
1316 length += sg->length;
1319 memcpy(ctx->digest, sym->aead.digest.data,
1320 ses->digest_length);
1323 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1324 sg->length = ses->digest_length;
1325 length += sg->length;
1329 /* input compound frame */
1330 cf->sg[1].length = length;
1331 cf->sg[1].extension = 1;
1332 cf->sg[1].final = 1;
1333 cpu_to_hw_sg(&cf->sg[1]);
1337 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1338 qm_sg_entry_set64(sg,
1339 dst_start_addr + sym->aead.data.offset);
1340 sg->length = sym->aead.data.length;
1341 length = sg->length;
1342 if (is_encode(ses)) {
1344 /* set auth output */
1346 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1347 sg->length = ses->digest_length;
1348 length += sg->length;
1353 /* output compound frame */
1354 cf->sg[0].length = length;
1355 cf->sg[0].extension = 1;
1356 cpu_to_hw_sg(&cf->sg[0]);
1361 static inline struct dpaa_sec_job *
1362 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1364 struct rte_crypto_sym_op *sym = op->sym;
1365 struct dpaa_sec_job *cf;
1366 struct dpaa_sec_op_ctx *ctx;
1367 struct qm_sg_entry *sg, *out_sg, *in_sg;
1368 struct rte_mbuf *mbuf;
1370 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1375 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1378 req_segs = mbuf->nb_segs * 2 + 4;
1381 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1382 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1387 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1394 rte_prefetch0(cf->sg);
1397 out_sg = &cf->sg[0];
1398 out_sg->extension = 1;
1400 out_sg->length = sym->auth.data.length + ses->digest_length;
1402 out_sg->length = sym->auth.data.length;
1404 /* output sg entries */
1406 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1407 cpu_to_hw_sg(out_sg);
1410 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1411 sg->length = mbuf->data_len - sym->auth.data.offset;
1412 sg->offset = sym->auth.data.offset;
1414 /* Successive segs */
1419 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1420 sg->length = mbuf->data_len;
1423 sg->length -= ses->digest_length;
1425 if (is_encode(ses)) {
1427 /* set auth output */
1429 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1430 sg->length = ses->digest_length;
1438 in_sg->extension = 1;
1441 in_sg->length = ses->iv.length + sym->auth.data.length;
1443 in_sg->length = ses->iv.length + sym->auth.data.length
1444 + ses->digest_length;
1446 /* input sg entries */
1448 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1449 cpu_to_hw_sg(in_sg);
1452 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1453 sg->length = ses->iv.length;
1458 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1459 sg->length = mbuf->data_len - sym->auth.data.offset;
1460 sg->offset = sym->auth.data.offset;
1462 /* Successive segs */
1467 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1468 sg->length = mbuf->data_len;
1472 sg->length -= ses->digest_length;
1473 if (is_decode(ses)) {
1476 memcpy(ctx->digest, sym->auth.digest.data,
1477 ses->digest_length);
1478 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1479 sg->length = ses->digest_length;
1487 static inline struct dpaa_sec_job *
1488 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1490 struct rte_crypto_sym_op *sym = op->sym;
1491 struct dpaa_sec_job *cf;
1492 struct dpaa_sec_op_ctx *ctx;
1493 struct qm_sg_entry *sg;
1494 rte_iova_t src_start_addr, dst_start_addr;
1495 uint32_t length = 0;
1496 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1499 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1501 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1503 dst_start_addr = src_start_addr;
1505 ctx = dpaa_sec_alloc_ctx(ses, 7);
1513 rte_prefetch0(cf->sg);
1515 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1516 if (is_encode(ses)) {
1517 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1518 sg->length = ses->iv.length;
1519 length += sg->length;
1523 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1524 sg->length = sym->auth.data.length;
1525 length += sg->length;
1529 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1530 sg->length = ses->iv.length;
1531 length += sg->length;
1536 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1537 sg->length = sym->auth.data.length;
1538 length += sg->length;
1541 memcpy(ctx->digest, sym->auth.digest.data,
1542 ses->digest_length);
1545 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1546 sg->length = ses->digest_length;
1547 length += sg->length;
1551 /* input compound frame */
1552 cf->sg[1].length = length;
1553 cf->sg[1].extension = 1;
1554 cf->sg[1].final = 1;
1555 cpu_to_hw_sg(&cf->sg[1]);
1559 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1560 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1561 sg->length = sym->cipher.data.length;
1562 length = sg->length;
1563 if (is_encode(ses)) {
1565 /* set auth output */
1567 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1568 sg->length = ses->digest_length;
1569 length += sg->length;
1574 /* output compound frame */
1575 cf->sg[0].length = length;
1576 cf->sg[0].extension = 1;
1577 cpu_to_hw_sg(&cf->sg[0]);
1582 #ifdef RTE_LIB_SECURITY
1583 static inline struct dpaa_sec_job *
1584 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1586 struct rte_crypto_sym_op *sym = op->sym;
1587 struct dpaa_sec_job *cf;
1588 struct dpaa_sec_op_ctx *ctx;
1589 struct qm_sg_entry *sg;
1590 phys_addr_t src_start_addr, dst_start_addr;
1592 ctx = dpaa_sec_alloc_ctx(ses, 2);
1598 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1601 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1603 dst_start_addr = src_start_addr;
1607 qm_sg_entry_set64(sg, src_start_addr);
1608 sg->length = sym->m_src->pkt_len;
1612 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1615 qm_sg_entry_set64(sg, dst_start_addr);
1616 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1622 static inline struct dpaa_sec_job *
1623 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1625 struct rte_crypto_sym_op *sym = op->sym;
1626 struct dpaa_sec_job *cf;
1627 struct dpaa_sec_op_ctx *ctx;
1628 struct qm_sg_entry *sg, *out_sg, *in_sg;
1629 struct rte_mbuf *mbuf;
1631 uint32_t in_len = 0, out_len = 0;
1638 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1639 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1640 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1645 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1651 out_sg = &cf->sg[0];
1652 out_sg->extension = 1;
1653 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1657 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1660 /* Successive segs */
1661 while (mbuf->next) {
1662 sg->length = mbuf->data_len;
1663 out_len += sg->length;
1667 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1670 sg->length = mbuf->buf_len - mbuf->data_off;
1671 out_len += sg->length;
1675 out_sg->length = out_len;
1676 cpu_to_hw_sg(out_sg);
1681 in_sg->extension = 1;
1683 in_len = mbuf->data_len;
1686 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1689 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1690 sg->length = mbuf->data_len;
1693 /* Successive segs */
1698 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1699 sg->length = mbuf->data_len;
1701 in_len += sg->length;
1707 in_sg->length = in_len;
1708 cpu_to_hw_sg(in_sg);
1710 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1717 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1720 /* Function to transmit the frames to given device and queuepair */
1722 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1723 uint16_t num_tx = 0;
1724 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1725 uint32_t frames_to_send;
1726 struct rte_crypto_op *op;
1727 struct dpaa_sec_job *cf;
1728 dpaa_sec_session *ses;
1729 uint16_t auth_hdr_len, auth_tail_len;
1730 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1731 struct qman_fq *inq[DPAA_SEC_BURST];
1733 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1734 if (rte_dpaa_portal_init((void *)0)) {
1735 DPAA_SEC_ERR("Failure in affining portal");
1741 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1742 DPAA_SEC_BURST : nb_ops;
1743 for (loop = 0; loop < frames_to_send; loop++) {
1745 if (*dpaa_seqn(op->sym->m_src) != 0) {
1746 index = *dpaa_seqn(op->sym->m_src) - 1;
1747 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1748 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1749 flags[loop] = ((index & 0x0f) << 8);
1750 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1751 DPAA_PER_LCORE_DQRR_SIZE--;
1752 DPAA_PER_LCORE_DQRR_HELD &=
1757 switch (op->sess_type) {
1758 case RTE_CRYPTO_OP_WITH_SESSION:
1759 ses = (dpaa_sec_session *)
1760 get_sym_session_private_data(
1762 cryptodev_driver_id);
1764 #ifdef RTE_LIB_SECURITY
1765 case RTE_CRYPTO_OP_SECURITY_SESSION:
1766 ses = (dpaa_sec_session *)
1767 get_sec_session_private_data(
1768 op->sym->sec_session);
1773 "sessionless crypto op not supported");
1774 frames_to_send = loop;
1780 DPAA_SEC_DP_ERR("session not available");
1781 frames_to_send = loop;
1786 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1787 if (dpaa_sec_attach_sess_q(qp, ses)) {
1788 frames_to_send = loop;
1792 } else if (unlikely(ses->qp[rte_lcore_id() %
1793 MAX_DPAA_CORES] != qp)) {
1794 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1796 ses->qp[rte_lcore_id() %
1797 MAX_DPAA_CORES], qp);
1798 frames_to_send = loop;
1803 auth_hdr_len = op->sym->auth.data.length -
1804 op->sym->cipher.data.length;
1807 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1808 ((op->sym->m_dst == NULL) ||
1809 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1810 switch (ses->ctxt) {
1811 #ifdef RTE_LIB_SECURITY
1813 case DPAA_SEC_IPSEC:
1814 cf = build_proto(op, ses);
1818 cf = build_auth_only(op, ses);
1820 case DPAA_SEC_CIPHER:
1821 cf = build_cipher_only(op, ses);
1824 cf = build_cipher_auth_gcm(op, ses);
1825 auth_hdr_len = ses->auth_only_len;
1827 case DPAA_SEC_CIPHER_HASH:
1829 op->sym->cipher.data.offset
1830 - op->sym->auth.data.offset;
1832 op->sym->auth.data.length
1833 - op->sym->cipher.data.length
1835 cf = build_cipher_auth(op, ses);
1838 DPAA_SEC_DP_ERR("not supported ops");
1839 frames_to_send = loop;
1844 switch (ses->ctxt) {
1845 #ifdef RTE_LIB_SECURITY
1847 case DPAA_SEC_IPSEC:
1848 cf = build_proto_sg(op, ses);
1852 cf = build_auth_only_sg(op, ses);
1854 case DPAA_SEC_CIPHER:
1855 cf = build_cipher_only_sg(op, ses);
1858 cf = build_cipher_auth_gcm_sg(op, ses);
1859 auth_hdr_len = ses->auth_only_len;
1861 case DPAA_SEC_CIPHER_HASH:
1863 op->sym->cipher.data.offset
1864 - op->sym->auth.data.offset;
1866 op->sym->auth.data.length
1867 - op->sym->cipher.data.length
1869 cf = build_cipher_auth_sg(op, ses);
1872 DPAA_SEC_DP_ERR("not supported ops");
1873 frames_to_send = loop;
1878 if (unlikely(!cf)) {
1879 frames_to_send = loop;
1885 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1886 fd->opaque_addr = 0;
1888 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1889 fd->_format1 = qm_fd_compound;
1890 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1892 /* Auth_only_len is set as 0 in descriptor and it is
1893 * overwritten here in the fd.cmd which will update
1896 if (auth_hdr_len || auth_tail_len) {
1897 fd->cmd = 0x80000000;
1899 ((auth_tail_len << 16) | auth_hdr_len);
1902 #ifdef RTE_LIB_SECURITY
1903 /* In case of PDCP, per packet HFN is stored in
1904 * mbuf priv after sym_op.
1906 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1907 fd->cmd = 0x80000000 |
1908 *((uint32_t *)((uint8_t *)op +
1909 ses->pdcp.hfn_ovd_offset));
1910 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1911 *((uint32_t *)((uint8_t *)op +
1912 ses->pdcp.hfn_ovd_offset)),
1919 while (loop < frames_to_send) {
1920 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1921 &flags[loop], frames_to_send - loop);
1923 nb_ops -= frames_to_send;
1924 num_tx += frames_to_send;
1927 dpaa_qp->tx_pkts += num_tx;
1928 dpaa_qp->tx_errs += nb_ops - num_tx;
1934 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1938 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1940 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1941 if (rte_dpaa_portal_init((void *)0)) {
1942 DPAA_SEC_ERR("Failure in affining portal");
1947 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1949 dpaa_qp->rx_pkts += num_rx;
1950 dpaa_qp->rx_errs += nb_ops - num_rx;
1952 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1957 /** Release queue pair */
1959 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1962 struct dpaa_sec_dev_private *internals;
1963 struct dpaa_sec_qp *qp = NULL;
1965 PMD_INIT_FUNC_TRACE();
1967 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1969 internals = dev->data->dev_private;
1970 if (qp_id >= internals->max_nb_queue_pairs) {
1971 DPAA_SEC_ERR("Max supported qpid %d",
1972 internals->max_nb_queue_pairs);
1976 qp = &internals->qps[qp_id];
1977 rte_mempool_free(qp->ctx_pool);
1978 qp->internals = NULL;
1979 dev->data->queue_pairs[qp_id] = NULL;
1984 /** Setup a queue pair */
1986 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1987 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1988 __rte_unused int socket_id)
1990 struct dpaa_sec_dev_private *internals;
1991 struct dpaa_sec_qp *qp = NULL;
1994 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1996 internals = dev->data->dev_private;
1997 if (qp_id >= internals->max_nb_queue_pairs) {
1998 DPAA_SEC_ERR("Max supported qpid %d",
1999 internals->max_nb_queue_pairs);
2003 qp = &internals->qps[qp_id];
2004 qp->internals = internals;
2005 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2006 dev->data->dev_id, qp_id);
2007 if (!qp->ctx_pool) {
2008 qp->ctx_pool = rte_mempool_create((const char *)str,
2011 CTX_POOL_CACHE_SIZE, 0,
2012 NULL, NULL, NULL, NULL,
2014 if (!qp->ctx_pool) {
2015 DPAA_SEC_ERR("%s create failed\n", str);
2019 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2020 dev->data->dev_id, qp_id);
2021 dev->data->queue_pairs[qp_id] = qp;
2026 /** Returns the size of session structure */
2028 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2030 PMD_INIT_FUNC_TRACE();
2032 return sizeof(dpaa_sec_session);
2036 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2037 struct rte_crypto_sym_xform *xform,
2038 dpaa_sec_session *session)
2040 session->ctxt = DPAA_SEC_CIPHER;
2041 session->cipher_alg = xform->cipher.algo;
2042 session->iv.length = xform->cipher.iv.length;
2043 session->iv.offset = xform->cipher.iv.offset;
2044 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2045 RTE_CACHE_LINE_SIZE);
2046 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2047 DPAA_SEC_ERR("No Memory for cipher key");
2050 session->cipher_key.length = xform->cipher.key.length;
2052 memcpy(session->cipher_key.data, xform->cipher.key.data,
2053 xform->cipher.key.length);
2054 switch (xform->cipher.algo) {
2055 case RTE_CRYPTO_CIPHER_AES_CBC:
2056 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2057 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2059 case RTE_CRYPTO_CIPHER_DES_CBC:
2060 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2061 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2063 case RTE_CRYPTO_CIPHER_3DES_CBC:
2064 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2065 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2067 case RTE_CRYPTO_CIPHER_AES_CTR:
2068 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2069 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2071 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2072 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2074 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2075 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2078 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2079 xform->cipher.algo);
2082 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2089 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2090 struct rte_crypto_sym_xform *xform,
2091 dpaa_sec_session *session)
2093 session->ctxt = DPAA_SEC_AUTH;
2094 session->auth_alg = xform->auth.algo;
2095 session->auth_key.length = xform->auth.key.length;
2096 if (xform->auth.key.length) {
2097 session->auth_key.data =
2098 rte_zmalloc(NULL, xform->auth.key.length,
2099 RTE_CACHE_LINE_SIZE);
2100 if (session->auth_key.data == NULL) {
2101 DPAA_SEC_ERR("No Memory for auth key");
2104 memcpy(session->auth_key.data, xform->auth.key.data,
2105 xform->auth.key.length);
2108 session->digest_length = xform->auth.digest_length;
2109 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2110 session->iv.offset = xform->auth.iv.offset;
2111 session->iv.length = xform->auth.iv.length;
2114 switch (xform->auth.algo) {
2115 case RTE_CRYPTO_AUTH_SHA1:
2116 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2117 session->auth_key.algmode = OP_ALG_AAI_HASH;
2119 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2120 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2121 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2123 case RTE_CRYPTO_AUTH_MD5:
2124 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2125 session->auth_key.algmode = OP_ALG_AAI_HASH;
2127 case RTE_CRYPTO_AUTH_MD5_HMAC:
2128 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2129 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2131 case RTE_CRYPTO_AUTH_SHA224:
2132 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2133 session->auth_key.algmode = OP_ALG_AAI_HASH;
2135 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2136 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2137 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2139 case RTE_CRYPTO_AUTH_SHA256:
2140 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2141 session->auth_key.algmode = OP_ALG_AAI_HASH;
2143 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2144 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2145 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2147 case RTE_CRYPTO_AUTH_SHA384:
2148 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2149 session->auth_key.algmode = OP_ALG_AAI_HASH;
2151 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2152 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2153 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2155 case RTE_CRYPTO_AUTH_SHA512:
2156 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2157 session->auth_key.algmode = OP_ALG_AAI_HASH;
2159 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2160 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2161 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2163 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2164 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2165 session->auth_key.algmode = OP_ALG_AAI_F9;
2167 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2168 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2169 session->auth_key.algmode = OP_ALG_AAI_F9;
2172 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2177 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2184 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2185 struct rte_crypto_sym_xform *xform,
2186 dpaa_sec_session *session)
2189 struct rte_crypto_cipher_xform *cipher_xform;
2190 struct rte_crypto_auth_xform *auth_xform;
2192 session->ctxt = DPAA_SEC_CIPHER_HASH;
2193 if (session->auth_cipher_text) {
2194 cipher_xform = &xform->cipher;
2195 auth_xform = &xform->next->auth;
2197 cipher_xform = &xform->next->cipher;
2198 auth_xform = &xform->auth;
2201 /* Set IV parameters */
2202 session->iv.offset = cipher_xform->iv.offset;
2203 session->iv.length = cipher_xform->iv.length;
2205 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2206 RTE_CACHE_LINE_SIZE);
2207 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2208 DPAA_SEC_ERR("No Memory for cipher key");
2211 session->cipher_key.length = cipher_xform->key.length;
2212 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2213 RTE_CACHE_LINE_SIZE);
2214 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2215 DPAA_SEC_ERR("No Memory for auth key");
2218 session->auth_key.length = auth_xform->key.length;
2219 memcpy(session->cipher_key.data, cipher_xform->key.data,
2220 cipher_xform->key.length);
2221 memcpy(session->auth_key.data, auth_xform->key.data,
2222 auth_xform->key.length);
2224 session->digest_length = auth_xform->digest_length;
2225 session->auth_alg = auth_xform->algo;
2227 switch (auth_xform->algo) {
2228 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2229 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2230 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2232 case RTE_CRYPTO_AUTH_MD5_HMAC:
2233 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2234 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2236 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2237 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2238 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2240 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2241 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2242 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2244 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2245 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2246 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2248 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2249 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2250 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2253 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2258 session->cipher_alg = cipher_xform->algo;
2260 switch (cipher_xform->algo) {
2261 case RTE_CRYPTO_CIPHER_AES_CBC:
2262 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2263 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2265 case RTE_CRYPTO_CIPHER_DES_CBC:
2266 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2267 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2269 case RTE_CRYPTO_CIPHER_3DES_CBC:
2270 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2271 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2273 case RTE_CRYPTO_CIPHER_AES_CTR:
2274 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2275 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2278 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2279 cipher_xform->algo);
2282 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2288 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2289 struct rte_crypto_sym_xform *xform,
2290 dpaa_sec_session *session)
2292 session->aead_alg = xform->aead.algo;
2293 session->ctxt = DPAA_SEC_AEAD;
2294 session->iv.length = xform->aead.iv.length;
2295 session->iv.offset = xform->aead.iv.offset;
2296 session->auth_only_len = xform->aead.aad_length;
2297 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2298 RTE_CACHE_LINE_SIZE);
2299 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2300 DPAA_SEC_ERR("No Memory for aead key\n");
2303 session->aead_key.length = xform->aead.key.length;
2304 session->digest_length = xform->aead.digest_length;
2306 memcpy(session->aead_key.data, xform->aead.key.data,
2307 xform->aead.key.length);
2309 switch (session->aead_alg) {
2310 case RTE_CRYPTO_AEAD_AES_GCM:
2311 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2312 session->aead_key.algmode = OP_ALG_AAI_GCM;
2315 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2319 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2325 static struct qman_fq *
2326 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2330 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2331 if (qi->inq_attach[i] == 0) {
2332 qi->inq_attach[i] = 1;
2336 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2342 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2346 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2347 if (&qi->inq[i] == fq) {
2348 if (qman_retire_fq(fq, NULL) != 0)
2349 DPAA_SEC_DEBUG("Queue is not retired\n");
2351 qi->inq_attach[i] = 0;
2359 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2363 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2364 ret = dpaa_sec_prep_cdb(sess);
2366 DPAA_SEC_ERR("Unable to prepare sec cdb");
2369 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2370 ret = rte_dpaa_portal_init((void *)0);
2372 DPAA_SEC_ERR("Failure in affining portal");
2376 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2377 rte_dpaa_mem_vtop(&sess->cdb),
2378 qman_fq_fqid(&qp->outq));
2380 DPAA_SEC_ERR("Unable to init sec queue");
2386 free_session_data(dpaa_sec_session *s)
2389 rte_free(s->aead_key.data);
2391 rte_free(s->auth_key.data);
2392 rte_free(s->cipher_key.data);
2394 memset(s, 0, sizeof(dpaa_sec_session));
2398 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2399 struct rte_crypto_sym_xform *xform, void *sess)
2401 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2402 dpaa_sec_session *session = sess;
2406 PMD_INIT_FUNC_TRACE();
2408 if (unlikely(sess == NULL)) {
2409 DPAA_SEC_ERR("invalid session struct");
2412 memset(session, 0, sizeof(dpaa_sec_session));
2414 /* Default IV length = 0 */
2415 session->iv.length = 0;
2418 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2419 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2420 ret = dpaa_sec_cipher_init(dev, xform, session);
2422 /* Authentication Only */
2423 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2424 xform->next == NULL) {
2425 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2426 session->ctxt = DPAA_SEC_AUTH;
2427 ret = dpaa_sec_auth_init(dev, xform, session);
2429 /* Cipher then Authenticate */
2430 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2431 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2432 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2433 session->auth_cipher_text = 1;
2434 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2435 ret = dpaa_sec_auth_init(dev, xform, session);
2436 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2437 ret = dpaa_sec_cipher_init(dev, xform, session);
2439 ret = dpaa_sec_chain_init(dev, xform, session);
2441 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2444 /* Authenticate then Cipher */
2445 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2446 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2447 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2448 session->auth_cipher_text = 0;
2449 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2450 ret = dpaa_sec_cipher_init(dev, xform, session);
2451 else if (xform->next->cipher.algo
2452 == RTE_CRYPTO_CIPHER_NULL)
2453 ret = dpaa_sec_auth_init(dev, xform, session);
2455 ret = dpaa_sec_chain_init(dev, xform, session);
2457 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2461 /* AEAD operation for AES-GCM kind of Algorithms */
2462 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2463 xform->next == NULL) {
2464 ret = dpaa_sec_aead_init(dev, xform, session);
2467 DPAA_SEC_ERR("Invalid crypto type");
2471 DPAA_SEC_ERR("unable to init session");
2475 rte_spinlock_lock(&internals->lock);
2476 for (i = 0; i < MAX_DPAA_CORES; i++) {
2477 session->inq[i] = dpaa_sec_attach_rxq(internals);
2478 if (session->inq[i] == NULL) {
2479 DPAA_SEC_ERR("unable to attach sec queue");
2480 rte_spinlock_unlock(&internals->lock);
2485 rte_spinlock_unlock(&internals->lock);
2490 free_session_data(session);
2495 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2496 struct rte_crypto_sym_xform *xform,
2497 struct rte_cryptodev_sym_session *sess,
2498 struct rte_mempool *mempool)
2500 void *sess_private_data;
2503 PMD_INIT_FUNC_TRACE();
2505 if (rte_mempool_get(mempool, &sess_private_data)) {
2506 DPAA_SEC_ERR("Couldn't get object from session mempool");
2510 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2512 DPAA_SEC_ERR("failed to configure session parameters");
2514 /* Return session to mempool */
2515 rte_mempool_put(mempool, sess_private_data);
2519 set_sym_session_private_data(sess, dev->driver_id,
2527 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2529 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2530 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2533 for (i = 0; i < MAX_DPAA_CORES; i++) {
2535 dpaa_sec_detach_rxq(qi, s->inq[i]);
2539 free_session_data(s);
2540 rte_mempool_put(sess_mp, (void *)s);
2543 /** Clear the memory of session so it doesn't leave key material behind */
2545 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2546 struct rte_cryptodev_sym_session *sess)
2548 PMD_INIT_FUNC_TRACE();
2549 uint8_t index = dev->driver_id;
2550 void *sess_priv = get_sym_session_private_data(sess, index);
2551 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2554 free_session_memory(dev, s);
2555 set_sym_session_private_data(sess, index, NULL);
2559 #ifdef RTE_LIB_SECURITY
2561 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2562 struct rte_security_ipsec_xform *ipsec_xform,
2563 dpaa_sec_session *session)
2565 PMD_INIT_FUNC_TRACE();
2567 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2568 RTE_CACHE_LINE_SIZE);
2569 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2570 DPAA_SEC_ERR("No Memory for aead key");
2573 memcpy(session->aead_key.data, aead_xform->key.data,
2574 aead_xform->key.length);
2576 session->digest_length = aead_xform->digest_length;
2577 session->aead_key.length = aead_xform->key.length;
2579 switch (aead_xform->algo) {
2580 case RTE_CRYPTO_AEAD_AES_GCM:
2581 switch (session->digest_length) {
2583 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2586 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2589 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2592 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2593 session->digest_length);
2596 if (session->dir == DIR_ENC) {
2597 memcpy(session->encap_pdb.gcm.salt,
2598 (uint8_t *)&(ipsec_xform->salt), 4);
2600 memcpy(session->decap_pdb.gcm.salt,
2601 (uint8_t *)&(ipsec_xform->salt), 4);
2603 session->aead_key.algmode = OP_ALG_AAI_GCM;
2604 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2607 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2615 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2616 struct rte_crypto_auth_xform *auth_xform,
2617 struct rte_security_ipsec_xform *ipsec_xform,
2618 dpaa_sec_session *session)
2621 session->cipher_key.data = rte_zmalloc(NULL,
2622 cipher_xform->key.length,
2623 RTE_CACHE_LINE_SIZE);
2624 if (session->cipher_key.data == NULL &&
2625 cipher_xform->key.length > 0) {
2626 DPAA_SEC_ERR("No Memory for cipher key");
2630 session->cipher_key.length = cipher_xform->key.length;
2631 memcpy(session->cipher_key.data, cipher_xform->key.data,
2632 cipher_xform->key.length);
2633 session->cipher_alg = cipher_xform->algo;
2635 session->cipher_key.data = NULL;
2636 session->cipher_key.length = 0;
2637 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2641 session->auth_key.data = rte_zmalloc(NULL,
2642 auth_xform->key.length,
2643 RTE_CACHE_LINE_SIZE);
2644 if (session->auth_key.data == NULL &&
2645 auth_xform->key.length > 0) {
2646 DPAA_SEC_ERR("No Memory for auth key");
2649 session->auth_key.length = auth_xform->key.length;
2650 memcpy(session->auth_key.data, auth_xform->key.data,
2651 auth_xform->key.length);
2652 session->auth_alg = auth_xform->algo;
2653 session->digest_length = auth_xform->digest_length;
2655 session->auth_key.data = NULL;
2656 session->auth_key.length = 0;
2657 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2660 switch (session->auth_alg) {
2661 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2662 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2663 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2665 case RTE_CRYPTO_AUTH_MD5_HMAC:
2666 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2667 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2669 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2670 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2671 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2672 if (session->digest_length != 16)
2674 "+++Using sha256-hmac truncated len is non-standard,"
2675 "it will not work with lookaside proto");
2677 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2678 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2679 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2681 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2682 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2683 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2685 case RTE_CRYPTO_AUTH_AES_CMAC:
2686 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2688 case RTE_CRYPTO_AUTH_NULL:
2689 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2691 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2692 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2693 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2694 case RTE_CRYPTO_AUTH_SHA1:
2695 case RTE_CRYPTO_AUTH_SHA256:
2696 case RTE_CRYPTO_AUTH_SHA512:
2697 case RTE_CRYPTO_AUTH_SHA224:
2698 case RTE_CRYPTO_AUTH_SHA384:
2699 case RTE_CRYPTO_AUTH_MD5:
2700 case RTE_CRYPTO_AUTH_AES_GMAC:
2701 case RTE_CRYPTO_AUTH_KASUMI_F9:
2702 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2703 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2704 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2708 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2713 switch (session->cipher_alg) {
2714 case RTE_CRYPTO_CIPHER_AES_CBC:
2715 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2716 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2718 case RTE_CRYPTO_CIPHER_DES_CBC:
2719 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2720 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2722 case RTE_CRYPTO_CIPHER_3DES_CBC:
2723 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2724 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2726 case RTE_CRYPTO_CIPHER_AES_CTR:
2727 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2728 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2729 if (session->dir == DIR_ENC) {
2730 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2731 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2733 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2734 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2737 case RTE_CRYPTO_CIPHER_NULL:
2738 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2740 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2741 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2742 case RTE_CRYPTO_CIPHER_3DES_ECB:
2743 case RTE_CRYPTO_CIPHER_AES_ECB:
2744 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2745 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2746 session->cipher_alg);
2749 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2750 session->cipher_alg);
2758 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2759 struct rte_security_session_conf *conf,
2762 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2763 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2764 struct rte_crypto_auth_xform *auth_xform = NULL;
2765 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2766 struct rte_crypto_aead_xform *aead_xform = NULL;
2767 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2771 PMD_INIT_FUNC_TRACE();
2773 memset(session, 0, sizeof(dpaa_sec_session));
2774 session->proto_alg = conf->protocol;
2775 session->ctxt = DPAA_SEC_IPSEC;
2777 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2778 session->dir = DIR_ENC;
2780 session->dir = DIR_DEC;
2782 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2783 cipher_xform = &conf->crypto_xform->cipher;
2784 if (conf->crypto_xform->next)
2785 auth_xform = &conf->crypto_xform->next->auth;
2786 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2787 ipsec_xform, session);
2788 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2789 auth_xform = &conf->crypto_xform->auth;
2790 if (conf->crypto_xform->next)
2791 cipher_xform = &conf->crypto_xform->next->cipher;
2792 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2793 ipsec_xform, session);
2794 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2795 aead_xform = &conf->crypto_xform->aead;
2796 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2797 ipsec_xform, session);
2799 DPAA_SEC_ERR("XFORM not specified");
2804 DPAA_SEC_ERR("Failed to process xform");
2808 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2809 if (ipsec_xform->tunnel.type ==
2810 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2811 session->ip4_hdr.ip_v = IPVERSION;
2812 session->ip4_hdr.ip_hl = 5;
2813 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2814 sizeof(session->ip4_hdr));
2815 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2816 session->ip4_hdr.ip_id = 0;
2817 session->ip4_hdr.ip_off = 0;
2818 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2819 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2820 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2821 IPPROTO_ESP : IPPROTO_AH;
2822 session->ip4_hdr.ip_sum = 0;
2823 session->ip4_hdr.ip_src =
2824 ipsec_xform->tunnel.ipv4.src_ip;
2825 session->ip4_hdr.ip_dst =
2826 ipsec_xform->tunnel.ipv4.dst_ip;
2827 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2828 (void *)&session->ip4_hdr,
2830 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2831 } else if (ipsec_xform->tunnel.type ==
2832 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2833 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2834 DPAA_IPv6_DEFAULT_VTC_FLOW |
2835 ((ipsec_xform->tunnel.ipv6.dscp <<
2836 RTE_IPV6_HDR_TC_SHIFT) &
2837 RTE_IPV6_HDR_TC_MASK) |
2838 ((ipsec_xform->tunnel.ipv6.flabel <<
2839 RTE_IPV6_HDR_FL_SHIFT) &
2840 RTE_IPV6_HDR_FL_MASK));
2841 /* Payload length will be updated by HW */
2842 session->ip6_hdr.payload_len = 0;
2843 session->ip6_hdr.hop_limits =
2844 ipsec_xform->tunnel.ipv6.hlimit;
2845 session->ip6_hdr.proto = (ipsec_xform->proto ==
2846 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2847 IPPROTO_ESP : IPPROTO_AH;
2848 memcpy(&session->ip6_hdr.src_addr,
2849 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2850 memcpy(&session->ip6_hdr.dst_addr,
2851 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2852 session->encap_pdb.ip_hdr_len =
2853 sizeof(struct rte_ipv6_hdr);
2855 session->encap_pdb.options =
2856 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2857 PDBOPTS_ESP_OIHI_PDB_INL |
2859 PDBHMO_ESP_ENCAP_DTTL |
2861 if (ipsec_xform->options.esn)
2862 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2863 session->encap_pdb.spi = ipsec_xform->spi;
2865 } else if (ipsec_xform->direction ==
2866 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2867 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2868 session->decap_pdb.options = sizeof(struct ip) << 16;
2870 session->decap_pdb.options =
2871 sizeof(struct rte_ipv6_hdr) << 16;
2872 if (ipsec_xform->options.esn)
2873 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2874 if (ipsec_xform->replay_win_sz) {
2876 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2885 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2888 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2891 session->decap_pdb.options |=
2897 rte_spinlock_lock(&internals->lock);
2898 for (i = 0; i < MAX_DPAA_CORES; i++) {
2899 session->inq[i] = dpaa_sec_attach_rxq(internals);
2900 if (session->inq[i] == NULL) {
2901 DPAA_SEC_ERR("unable to attach sec queue");
2902 rte_spinlock_unlock(&internals->lock);
2906 rte_spinlock_unlock(&internals->lock);
2910 free_session_data(session);
2915 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2916 struct rte_security_session_conf *conf,
2919 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2920 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2921 struct rte_crypto_auth_xform *auth_xform = NULL;
2922 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2923 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2924 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2928 PMD_INIT_FUNC_TRACE();
2930 memset(session, 0, sizeof(dpaa_sec_session));
2932 /* find xfrm types */
2933 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2934 cipher_xform = &xform->cipher;
2935 if (xform->next != NULL)
2936 auth_xform = &xform->next->auth;
2937 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2938 auth_xform = &xform->auth;
2939 if (xform->next != NULL)
2940 cipher_xform = &xform->next->cipher;
2942 DPAA_SEC_ERR("Invalid crypto type");
2946 session->proto_alg = conf->protocol;
2947 session->ctxt = DPAA_SEC_PDCP;
2950 switch (cipher_xform->algo) {
2951 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2952 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2954 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2955 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2957 case RTE_CRYPTO_CIPHER_AES_CTR:
2958 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2960 case RTE_CRYPTO_CIPHER_NULL:
2961 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2964 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2965 session->cipher_alg);
2969 session->cipher_key.data = rte_zmalloc(NULL,
2970 cipher_xform->key.length,
2971 RTE_CACHE_LINE_SIZE);
2972 if (session->cipher_key.data == NULL &&
2973 cipher_xform->key.length > 0) {
2974 DPAA_SEC_ERR("No Memory for cipher key");
2977 session->cipher_key.length = cipher_xform->key.length;
2978 memcpy(session->cipher_key.data, cipher_xform->key.data,
2979 cipher_xform->key.length);
2980 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2982 session->cipher_alg = cipher_xform->algo;
2984 session->cipher_key.data = NULL;
2985 session->cipher_key.length = 0;
2986 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2987 session->dir = DIR_ENC;
2990 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2991 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2992 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2994 "PDCP Seq Num size should be 5/12 bits for cmode");
3001 switch (auth_xform->algo) {
3002 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3003 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3005 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3006 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3008 case RTE_CRYPTO_AUTH_AES_CMAC:
3009 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3011 case RTE_CRYPTO_AUTH_NULL:
3012 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3015 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3017 rte_free(session->cipher_key.data);
3020 session->auth_key.data = rte_zmalloc(NULL,
3021 auth_xform->key.length,
3022 RTE_CACHE_LINE_SIZE);
3023 if (!session->auth_key.data &&
3024 auth_xform->key.length > 0) {
3025 DPAA_SEC_ERR("No Memory for auth key");
3026 rte_free(session->cipher_key.data);
3029 session->auth_key.length = auth_xform->key.length;
3030 memcpy(session->auth_key.data, auth_xform->key.data,
3031 auth_xform->key.length);
3032 session->auth_alg = auth_xform->algo;
3034 session->auth_key.data = NULL;
3035 session->auth_key.length = 0;
3036 session->auth_alg = 0;
3038 session->pdcp.domain = pdcp_xform->domain;
3039 session->pdcp.bearer = pdcp_xform->bearer;
3040 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3041 session->pdcp.sn_size = pdcp_xform->sn_size;
3042 session->pdcp.hfn = pdcp_xform->hfn;
3043 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3044 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3045 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3047 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3049 rte_spinlock_lock(&dev_priv->lock);
3050 for (i = 0; i < MAX_DPAA_CORES; i++) {
3051 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3052 if (session->inq[i] == NULL) {
3053 DPAA_SEC_ERR("unable to attach sec queue");
3054 rte_spinlock_unlock(&dev_priv->lock);
3059 rte_spinlock_unlock(&dev_priv->lock);
3062 rte_free(session->auth_key.data);
3063 rte_free(session->cipher_key.data);
3064 memset(session, 0, sizeof(dpaa_sec_session));
3069 dpaa_sec_security_session_create(void *dev,
3070 struct rte_security_session_conf *conf,
3071 struct rte_security_session *sess,
3072 struct rte_mempool *mempool)
3074 void *sess_private_data;
3075 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3078 if (rte_mempool_get(mempool, &sess_private_data)) {
3079 DPAA_SEC_ERR("Couldn't get object from session mempool");
3083 switch (conf->protocol) {
3084 case RTE_SECURITY_PROTOCOL_IPSEC:
3085 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3088 case RTE_SECURITY_PROTOCOL_PDCP:
3089 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3092 case RTE_SECURITY_PROTOCOL_MACSEC:
3098 DPAA_SEC_ERR("failed to configure session parameters");
3099 /* Return session to mempool */
3100 rte_mempool_put(mempool, sess_private_data);
3104 set_sec_session_private_data(sess, sess_private_data);
3109 /** Clear the memory of session so it doesn't leave key material behind */
3111 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3112 struct rte_security_session *sess)
3114 PMD_INIT_FUNC_TRACE();
3115 void *sess_priv = get_sec_session_private_data(sess);
3116 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3119 free_session_memory((struct rte_cryptodev *)dev, s);
3120 set_sec_session_private_data(sess, NULL);
3126 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3127 struct rte_cryptodev_config *config __rte_unused)
3129 PMD_INIT_FUNC_TRACE();
3135 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3137 PMD_INIT_FUNC_TRACE();
3142 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3144 PMD_INIT_FUNC_TRACE();
3148 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3150 PMD_INIT_FUNC_TRACE();
3159 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3160 struct rte_cryptodev_info *info)
3162 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3164 PMD_INIT_FUNC_TRACE();
3166 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3167 info->feature_flags = dev->feature_flags;
3168 info->capabilities = dpaa_sec_capabilities;
3169 info->sym.max_nb_sessions = internals->max_nb_sessions;
3170 info->driver_id = cryptodev_driver_id;
3174 static enum qman_cb_dqrr_result
3175 dpaa_sec_process_parallel_event(void *event,
3176 struct qman_portal *qm __always_unused,
3177 struct qman_fq *outq,
3178 const struct qm_dqrr_entry *dqrr,
3181 const struct qm_fd *fd;
3182 struct dpaa_sec_job *job;
3183 struct dpaa_sec_op_ctx *ctx;
3184 struct rte_event *ev = (struct rte_event *)event;
3188 /* sg is embedded in an op ctx,
3189 * sg[0] is for output
3192 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3194 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3195 ctx->fd_status = fd->status;
3196 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3197 struct qm_sg_entry *sg_out;
3200 sg_out = &job->sg[0];
3201 hw_sg_to_cpu(sg_out);
3202 len = sg_out->length;
3203 ctx->op->sym->m_src->pkt_len = len;
3204 ctx->op->sym->m_src->data_len = len;
3206 if (!ctx->fd_status) {
3207 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3209 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3210 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3212 ev->event_ptr = (void *)ctx->op;
3214 ev->flow_id = outq->ev.flow_id;
3215 ev->sub_event_type = outq->ev.sub_event_type;
3216 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3217 ev->op = RTE_EVENT_OP_NEW;
3218 ev->sched_type = outq->ev.sched_type;
3219 ev->queue_id = outq->ev.queue_id;
3220 ev->priority = outq->ev.priority;
3221 *bufs = (void *)ctx->op;
3223 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3225 return qman_cb_dqrr_consume;
3228 static enum qman_cb_dqrr_result
3229 dpaa_sec_process_atomic_event(void *event,
3230 struct qman_portal *qm __rte_unused,
3231 struct qman_fq *outq,
3232 const struct qm_dqrr_entry *dqrr,
3236 const struct qm_fd *fd;
3237 struct dpaa_sec_job *job;
3238 struct dpaa_sec_op_ctx *ctx;
3239 struct rte_event *ev = (struct rte_event *)event;
3243 /* sg is embedded in an op ctx,
3244 * sg[0] is for output
3247 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3249 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3250 ctx->fd_status = fd->status;
3251 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3252 struct qm_sg_entry *sg_out;
3255 sg_out = &job->sg[0];
3256 hw_sg_to_cpu(sg_out);
3257 len = sg_out->length;
3258 ctx->op->sym->m_src->pkt_len = len;
3259 ctx->op->sym->m_src->data_len = len;
3261 if (!ctx->fd_status) {
3262 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3264 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3265 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3267 ev->event_ptr = (void *)ctx->op;
3268 ev->flow_id = outq->ev.flow_id;
3269 ev->sub_event_type = outq->ev.sub_event_type;
3270 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3271 ev->op = RTE_EVENT_OP_NEW;
3272 ev->sched_type = outq->ev.sched_type;
3273 ev->queue_id = outq->ev.queue_id;
3274 ev->priority = outq->ev.priority;
3276 /* Save active dqrr entries */
3277 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3278 DPAA_PER_LCORE_DQRR_SIZE++;
3279 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3280 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3281 ev->impl_opaque = index + 1;
3282 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3283 *bufs = (void *)ctx->op;
3285 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3287 return qman_cb_dqrr_defer;
3291 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3294 const struct rte_event *event)
3296 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3297 struct qm_mcc_initfq opts = {0};
3301 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3302 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3303 opts.fqd.dest.channel = ch_id;
3305 switch (event->sched_type) {
3306 case RTE_SCHED_TYPE_ATOMIC:
3307 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3308 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3309 * configuration with HOLD_ACTIVE setting
3311 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3312 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3314 case RTE_SCHED_TYPE_ORDERED:
3315 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3318 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3319 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3323 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3324 if (unlikely(ret)) {
3325 DPAA_SEC_ERR("unable to init caam source fq!");
3329 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3335 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3338 struct qm_mcc_initfq opts = {0};
3340 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3342 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3343 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3344 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3345 qp->outq.cb.ern = ern_sec_fq_handler;
3346 qman_retire_fq(&qp->outq, NULL);
3347 qman_oos_fq(&qp->outq);
3348 ret = qman_init_fq(&qp->outq, 0, &opts);
3350 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3351 qp->outq.cb.dqrr = NULL;
3356 static struct rte_cryptodev_ops crypto_ops = {
3357 .dev_configure = dpaa_sec_dev_configure,
3358 .dev_start = dpaa_sec_dev_start,
3359 .dev_stop = dpaa_sec_dev_stop,
3360 .dev_close = dpaa_sec_dev_close,
3361 .dev_infos_get = dpaa_sec_dev_infos_get,
3362 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3363 .queue_pair_release = dpaa_sec_queue_pair_release,
3364 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3365 .sym_session_configure = dpaa_sec_sym_session_configure,
3366 .sym_session_clear = dpaa_sec_sym_session_clear
3369 #ifdef RTE_LIB_SECURITY
3370 static const struct rte_security_capability *
3371 dpaa_sec_capabilities_get(void *device __rte_unused)
3373 return dpaa_sec_security_cap;
3376 static const struct rte_security_ops dpaa_sec_security_ops = {
3377 .session_create = dpaa_sec_security_session_create,
3378 .session_update = NULL,
3379 .session_stats_get = NULL,
3380 .session_destroy = dpaa_sec_security_session_destroy,
3381 .set_pkt_metadata = NULL,
3382 .capabilities_get = dpaa_sec_capabilities_get
3386 dpaa_sec_uninit(struct rte_cryptodev *dev)
3388 struct dpaa_sec_dev_private *internals;
3393 internals = dev->data->dev_private;
3394 rte_free(dev->security_ctx);
3396 rte_free(internals);
3398 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3399 dev->data->name, rte_socket_id());
3405 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3407 struct dpaa_sec_dev_private *internals;
3408 #ifdef RTE_LIB_SECURITY
3409 struct rte_security_ctx *security_instance;
3411 struct dpaa_sec_qp *qp;
3415 PMD_INIT_FUNC_TRACE();
3417 cryptodev->driver_id = cryptodev_driver_id;
3418 cryptodev->dev_ops = &crypto_ops;
3420 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3421 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3422 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3423 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3424 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3425 RTE_CRYPTODEV_FF_SECURITY |
3426 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3427 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3428 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3429 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3430 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3432 internals = cryptodev->data->dev_private;
3433 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3434 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3437 * For secondary processes, we don't initialise any further as primary
3438 * has already done this work. Only check we don't need a different
3441 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3442 DPAA_SEC_WARN("Device already init by primary process");
3445 #ifdef RTE_LIB_SECURITY
3446 /* Initialize security_ctx only for primary process*/
3447 security_instance = rte_malloc("rte_security_instances_ops",
3448 sizeof(struct rte_security_ctx), 0);
3449 if (security_instance == NULL)
3451 security_instance->device = (void *)cryptodev;
3452 security_instance->ops = &dpaa_sec_security_ops;
3453 security_instance->sess_cnt = 0;
3454 cryptodev->security_ctx = security_instance;
3456 rte_spinlock_init(&internals->lock);
3457 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3458 /* init qman fq for queue pair */
3459 qp = &internals->qps[i];
3460 ret = dpaa_sec_init_tx(&qp->outq);
3462 DPAA_SEC_ERR("config tx of queue pair %d", i);
3467 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3468 QMAN_FQ_FLAG_TO_DCPORTAL;
3469 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3470 /* create rx qman fq for sessions*/
3471 ret = qman_create_fq(0, flags, &internals->inq[i]);
3472 if (unlikely(ret != 0)) {
3473 DPAA_SEC_ERR("sec qman_create_fq failed");
3478 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3482 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3484 rte_free(cryptodev->security_ctx);
3489 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3490 struct rte_dpaa_device *dpaa_dev)
3492 struct rte_cryptodev *cryptodev;
3493 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3497 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3499 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3500 if (cryptodev == NULL)
3503 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3504 cryptodev->data->dev_private = rte_zmalloc_socket(
3505 "cryptodev private structure",
3506 sizeof(struct dpaa_sec_dev_private),
3507 RTE_CACHE_LINE_SIZE,
3510 if (cryptodev->data->dev_private == NULL)
3511 rte_panic("Cannot allocate memzone for private "
3515 dpaa_dev->crypto_dev = cryptodev;
3516 cryptodev->device = &dpaa_dev->device;
3518 /* init user callbacks */
3519 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3521 /* if sec device version is not configured */
3522 if (!rta_get_sec_era()) {
3523 const struct device_node *caam_node;
3525 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3526 const uint32_t *prop = of_get_property(caam_node,
3531 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3537 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3538 retval = rte_dpaa_portal_init((void *)1);
3540 DPAA_SEC_ERR("Unable to initialize portal");
3545 /* Invoke PMD device initialization function */
3546 retval = dpaa_sec_dev_init(cryptodev);
3552 /* In case of error, cleanup is done */
3553 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3554 rte_free(cryptodev->data->dev_private);
3556 rte_cryptodev_pmd_release_device(cryptodev);
3562 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3564 struct rte_cryptodev *cryptodev;
3567 cryptodev = dpaa_dev->crypto_dev;
3568 if (cryptodev == NULL)
3571 ret = dpaa_sec_uninit(cryptodev);
3575 return rte_cryptodev_pmd_destroy(cryptodev);
3578 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3579 .drv_type = FSL_DPAA_CRYPTO,
3581 .name = "DPAA SEC PMD"
3583 .probe = cryptodev_dpaa_sec_probe,
3584 .remove = cryptodev_dpaa_sec_remove,
3587 static struct cryptodev_driver dpaa_sec_crypto_drv;
3589 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3590 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3591 cryptodev_driver_id);
3592 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);