1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2021 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
42 #include <rte_dpaa_bus.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
48 static uint8_t cryptodev_driver_id;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(
71 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84 dcbz_64(&ctx->job.sg[i]);
86 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95 const struct qm_mr_entry *msg)
97 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98 fq->fqid, msg->ern.rc, msg->ern.seqnum);
101 /* initialize the queue with dest chan as caam chan so that
102 * all the packets in this queue could be dispatched into caam
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
108 struct qm_mcc_initfq fq_opts;
112 /* Clear FQ options */
113 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115 flags = QMAN_INITFQ_FLAG_SCHED;
116 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117 QM_INITFQ_WE_CONTEXTB;
119 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120 fq_opts.fqd.context_b = fqid_out;
121 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122 fq_opts.fqd.dest.wq = 0;
124 fq_in->cb.ern = ern_sec_fq_handler;
126 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128 ret = qman_init_fq(fq_in, flags, &fq_opts);
129 if (unlikely(ret != 0))
130 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138 struct qman_fq *fq __always_unused,
139 const struct qm_dqrr_entry *dqrr)
141 const struct qm_fd *fd;
142 struct dpaa_sec_job *job;
143 struct dpaa_sec_op_ctx *ctx;
145 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146 return qman_cb_dqrr_defer;
148 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149 return qman_cb_dqrr_consume;
152 /* sg is embedded in an op ctx,
153 * sg[0] is for output
156 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159 ctx->fd_status = fd->status;
160 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161 struct qm_sg_entry *sg_out;
163 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166 sg_out = &job->sg[0];
167 hw_sg_to_cpu(sg_out);
168 len = sg_out->length;
170 while (mbuf->next != NULL) {
171 len -= mbuf->data_len;
174 mbuf->data_len = len;
176 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177 dpaa_sec_op_ending(ctx);
179 return qman_cb_dqrr_consume;
182 /* caam result is put into this queue */
184 dpaa_sec_init_tx(struct qman_fq *fq)
187 struct qm_mcc_initfq opts;
190 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191 QMAN_FQ_FLAG_DYNAMIC_FQID;
193 ret = qman_create_fq(0, flags, fq);
195 DPAA_SEC_ERR("qman_create_fq failed");
199 memset(&opts, 0, sizeof(opts));
200 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205 fq->cb.dqrr = dqrr_out_fq_cb_rx;
206 fq->cb.ern = ern_sec_fq_handler;
208 ret = qman_init_fq(fq, 0, &opts);
210 DPAA_SEC_ERR("unable to init caam source fq!");
217 static inline int is_aead(dpaa_sec_session *ses)
219 return ((ses->cipher_alg == 0) &&
220 (ses->auth_alg == 0) &&
221 (ses->aead_alg != 0));
224 static inline int is_encode(dpaa_sec_session *ses)
226 return ses->dir == DIR_ENC;
229 static inline int is_decode(dpaa_sec_session *ses)
231 return ses->dir == DIR_DEC;
234 #ifdef RTE_LIB_SECURITY
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 struct alginfo authdata = {0}, cipherdata = {0};
239 struct sec_cdb *cdb = &ses->cdb;
240 struct alginfo *p_authdata = NULL;
241 int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
248 cipherdata.key = (size_t)ses->cipher_key.data;
249 cipherdata.keylen = ses->cipher_key.length;
250 cipherdata.key_enc_flags = 0;
251 cipherdata.key_type = RTA_DATA_IMM;
252 cipherdata.algtype = ses->cipher_key.alg;
253 cipherdata.algmode = ses->cipher_key.algmode;
256 authdata.key = (size_t)ses->auth_key.data;
257 authdata.keylen = ses->auth_key.length;
258 authdata.key_enc_flags = 0;
259 authdata.key_type = RTA_DATA_IMM;
260 authdata.algtype = ses->auth_key.alg;
261 authdata.algmode = ses->auth_key.algmode;
263 p_authdata = &authdata;
266 if (ses->pdcp.sdap_enabled) {
267 int nb_keys_to_inline =
268 rta_inline_pdcp_sdap_query(authdata.algtype,
272 if (nb_keys_to_inline >= 1) {
273 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
274 (size_t)cipherdata.key);
275 cipherdata.key_type = RTA_DATA_PTR;
277 if (nb_keys_to_inline >= 2) {
278 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
279 (size_t)authdata.key);
280 authdata.key_type = RTA_DATA_PTR;
283 if (rta_inline_pdcp_query(authdata.algtype,
286 ses->pdcp.hfn_ovd)) {
287 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
288 (size_t)cipherdata.key);
289 cipherdata.key_type = RTA_DATA_PTR;
293 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
294 if (ses->dir == DIR_ENC)
295 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
296 cdb->sh_desc, 1, swap,
301 ses->pdcp.hfn_threshold,
302 &cipherdata, &authdata,
304 else if (ses->dir == DIR_DEC)
305 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
306 cdb->sh_desc, 1, swap,
311 ses->pdcp.hfn_threshold,
312 &cipherdata, &authdata,
315 if (ses->dir == DIR_ENC) {
316 if (ses->pdcp.sdap_enabled)
318 cnstr_shdsc_pdcp_sdap_u_plane_encap(
319 cdb->sh_desc, 1, swap,
324 ses->pdcp.hfn_threshold,
325 &cipherdata, p_authdata, 0);
328 cnstr_shdsc_pdcp_u_plane_encap(
329 cdb->sh_desc, 1, swap,
334 ses->pdcp.hfn_threshold,
335 &cipherdata, p_authdata, 0);
336 } else if (ses->dir == DIR_DEC) {
337 if (ses->pdcp.sdap_enabled)
339 cnstr_shdsc_pdcp_sdap_u_plane_decap(
340 cdb->sh_desc, 1, swap,
345 ses->pdcp.hfn_threshold,
346 &cipherdata, p_authdata, 0);
349 cnstr_shdsc_pdcp_u_plane_decap(
350 cdb->sh_desc, 1, swap,
355 ses->pdcp.hfn_threshold,
356 &cipherdata, p_authdata, 0);
359 return shared_desc_len;
362 /* prepare ipsec proto command block of the session */
364 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
366 struct alginfo cipherdata = {0}, authdata = {0};
367 struct sec_cdb *cdb = &ses->cdb;
368 int32_t shared_desc_len = 0;
370 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
376 cipherdata.key = (size_t)ses->cipher_key.data;
377 cipherdata.keylen = ses->cipher_key.length;
378 cipherdata.key_enc_flags = 0;
379 cipherdata.key_type = RTA_DATA_IMM;
380 cipherdata.algtype = ses->cipher_key.alg;
381 cipherdata.algmode = ses->cipher_key.algmode;
383 if (ses->auth_key.length) {
384 authdata.key = (size_t)ses->auth_key.data;
385 authdata.keylen = ses->auth_key.length;
386 authdata.key_enc_flags = 0;
387 authdata.key_type = RTA_DATA_IMM;
388 authdata.algtype = ses->auth_key.alg;
389 authdata.algmode = ses->auth_key.algmode;
392 cdb->sh_desc[0] = cipherdata.keylen;
393 cdb->sh_desc[1] = authdata.keylen;
394 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
396 (unsigned int *)cdb->sh_desc,
397 &cdb->sh_desc[2], 2);
400 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
403 if (cdb->sh_desc[2] & 1)
404 cipherdata.key_type = RTA_DATA_IMM;
406 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
407 (void *)(size_t)cipherdata.key);
408 cipherdata.key_type = RTA_DATA_PTR;
410 if (cdb->sh_desc[2] & (1<<1))
411 authdata.key_type = RTA_DATA_IMM;
413 authdata.key = (size_t)rte_dpaa_mem_vtop(
414 (void *)(size_t)authdata.key);
415 authdata.key_type = RTA_DATA_PTR;
421 if (ses->dir == DIR_ENC) {
422 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
424 true, swap, SHR_SERIAL,
426 (uint8_t *)&ses->ip4_hdr,
427 &cipherdata, &authdata);
428 } else if (ses->dir == DIR_DEC) {
429 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
431 true, swap, SHR_SERIAL,
433 &cipherdata, &authdata);
435 return shared_desc_len;
438 /* prepare command block of the session */
440 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
442 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
443 int32_t shared_desc_len = 0;
444 struct sec_cdb *cdb = &ses->cdb;
446 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
452 memset(cdb, 0, sizeof(struct sec_cdb));
455 #ifdef RTE_LIB_SECURITY
457 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
460 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
463 case DPAA_SEC_CIPHER:
464 alginfo_c.key = (size_t)ses->cipher_key.data;
465 alginfo_c.keylen = ses->cipher_key.length;
466 alginfo_c.key_enc_flags = 0;
467 alginfo_c.key_type = RTA_DATA_IMM;
468 alginfo_c.algtype = ses->cipher_key.alg;
469 alginfo_c.algmode = ses->cipher_key.algmode;
471 switch (ses->cipher_alg) {
472 case RTE_CRYPTO_CIPHER_AES_CBC:
473 case RTE_CRYPTO_CIPHER_3DES_CBC:
474 case RTE_CRYPTO_CIPHER_DES_CBC:
475 case RTE_CRYPTO_CIPHER_AES_CTR:
476 case RTE_CRYPTO_CIPHER_3DES_CTR:
477 shared_desc_len = cnstr_shdsc_blkcipher(
479 swap, SHR_NEVER, &alginfo_c,
483 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
484 shared_desc_len = cnstr_shdsc_snow_f8(
485 cdb->sh_desc, true, swap,
489 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
490 shared_desc_len = cnstr_shdsc_zuce(
491 cdb->sh_desc, true, swap,
496 DPAA_SEC_ERR("unsupported cipher alg %d",
502 alginfo_a.key = (size_t)ses->auth_key.data;
503 alginfo_a.keylen = ses->auth_key.length;
504 alginfo_a.key_enc_flags = 0;
505 alginfo_a.key_type = RTA_DATA_IMM;
506 alginfo_a.algtype = ses->auth_key.alg;
507 alginfo_a.algmode = ses->auth_key.algmode;
508 switch (ses->auth_alg) {
509 case RTE_CRYPTO_AUTH_MD5:
510 case RTE_CRYPTO_AUTH_SHA1:
511 case RTE_CRYPTO_AUTH_SHA224:
512 case RTE_CRYPTO_AUTH_SHA256:
513 case RTE_CRYPTO_AUTH_SHA384:
514 case RTE_CRYPTO_AUTH_SHA512:
515 shared_desc_len = cnstr_shdsc_hash(
517 swap, SHR_NEVER, &alginfo_a,
521 case RTE_CRYPTO_AUTH_MD5_HMAC:
522 case RTE_CRYPTO_AUTH_SHA1_HMAC:
523 case RTE_CRYPTO_AUTH_SHA224_HMAC:
524 case RTE_CRYPTO_AUTH_SHA256_HMAC:
525 case RTE_CRYPTO_AUTH_SHA384_HMAC:
526 case RTE_CRYPTO_AUTH_SHA512_HMAC:
527 shared_desc_len = cnstr_shdsc_hmac(
529 swap, SHR_NEVER, &alginfo_a,
533 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
534 shared_desc_len = cnstr_shdsc_snow_f9(
535 cdb->sh_desc, true, swap,
540 case RTE_CRYPTO_AUTH_ZUC_EIA3:
541 shared_desc_len = cnstr_shdsc_zuca(
542 cdb->sh_desc, true, swap,
547 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
548 case RTE_CRYPTO_AUTH_AES_CMAC:
549 shared_desc_len = cnstr_shdsc_aes_mac(
551 true, swap, SHR_NEVER,
557 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
561 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
562 DPAA_SEC_ERR("not supported aead alg");
565 alginfo.key = (size_t)ses->aead_key.data;
566 alginfo.keylen = ses->aead_key.length;
567 alginfo.key_enc_flags = 0;
568 alginfo.key_type = RTA_DATA_IMM;
569 alginfo.algtype = ses->aead_key.alg;
570 alginfo.algmode = ses->aead_key.algmode;
572 if (ses->dir == DIR_ENC)
573 shared_desc_len = cnstr_shdsc_gcm_encap(
574 cdb->sh_desc, true, swap, SHR_NEVER,
579 shared_desc_len = cnstr_shdsc_gcm_decap(
580 cdb->sh_desc, true, swap, SHR_NEVER,
585 case DPAA_SEC_CIPHER_HASH:
586 alginfo_c.key = (size_t)ses->cipher_key.data;
587 alginfo_c.keylen = ses->cipher_key.length;
588 alginfo_c.key_enc_flags = 0;
589 alginfo_c.key_type = RTA_DATA_IMM;
590 alginfo_c.algtype = ses->cipher_key.alg;
591 alginfo_c.algmode = ses->cipher_key.algmode;
593 alginfo_a.key = (size_t)ses->auth_key.data;
594 alginfo_a.keylen = ses->auth_key.length;
595 alginfo_a.key_enc_flags = 0;
596 alginfo_a.key_type = RTA_DATA_IMM;
597 alginfo_a.algtype = ses->auth_key.alg;
598 alginfo_a.algmode = ses->auth_key.algmode;
600 cdb->sh_desc[0] = alginfo_c.keylen;
601 cdb->sh_desc[1] = alginfo_a.keylen;
602 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
604 (unsigned int *)cdb->sh_desc,
605 &cdb->sh_desc[2], 2);
608 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
611 if (cdb->sh_desc[2] & 1)
612 alginfo_c.key_type = RTA_DATA_IMM;
614 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
615 (void *)(size_t)alginfo_c.key);
616 alginfo_c.key_type = RTA_DATA_PTR;
618 if (cdb->sh_desc[2] & (1<<1))
619 alginfo_a.key_type = RTA_DATA_IMM;
621 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
622 (void *)(size_t)alginfo_a.key);
623 alginfo_a.key_type = RTA_DATA_PTR;
628 /* Auth_only_len is set as 0 here and it will be
629 * overwritten in fd for each packet.
631 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
632 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
634 ses->digest_length, ses->dir);
636 case DPAA_SEC_HASH_CIPHER:
638 DPAA_SEC_ERR("error: Unsupported session");
642 if (shared_desc_len < 0) {
643 DPAA_SEC_ERR("error in preparing command block");
644 return shared_desc_len;
647 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
648 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
649 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
654 /* qp is lockless, should be accessed by only one thread */
656 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
659 unsigned int pkts = 0;
660 int num_rx_bufs, ret;
661 struct qm_dqrr_entry *dq;
662 uint32_t vdqcr_flags = 0;
666 * Until request for four buffers, we provide exact number of buffers.
667 * Otherwise we do not set the QM_VDQCR_EXACT flag.
668 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
669 * requested, so we request two less in this case.
672 vdqcr_flags = QM_VDQCR_EXACT;
673 num_rx_bufs = nb_ops;
675 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
676 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
678 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
683 const struct qm_fd *fd;
684 struct dpaa_sec_job *job;
685 struct dpaa_sec_op_ctx *ctx;
686 struct rte_crypto_op *op;
688 dq = qman_dequeue(fq);
693 /* sg is embedded in an op ctx,
694 * sg[0] is for output
697 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
699 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
700 ctx->fd_status = fd->status;
702 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
703 struct qm_sg_entry *sg_out;
705 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
706 op->sym->m_src : op->sym->m_dst;
708 sg_out = &job->sg[0];
709 hw_sg_to_cpu(sg_out);
710 len = sg_out->length;
712 while (mbuf->next != NULL) {
713 len -= mbuf->data_len;
716 mbuf->data_len = len;
718 if (!ctx->fd_status) {
719 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
721 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
722 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
726 /* report op status to sym->op and then free the ctx memeory */
727 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
729 qman_dqrr_consume(fq, dq);
730 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
735 static inline struct dpaa_sec_job *
736 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
738 struct rte_crypto_sym_op *sym = op->sym;
739 struct rte_mbuf *mbuf = sym->m_src;
740 struct dpaa_sec_job *cf;
741 struct dpaa_sec_op_ctx *ctx;
742 struct qm_sg_entry *sg, *out_sg, *in_sg;
743 phys_addr_t start_addr;
744 uint8_t *old_digest, extra_segs;
745 int data_len, data_offset;
747 data_len = sym->auth.data.length;
748 data_offset = sym->auth.data.offset;
750 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
751 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
752 if ((data_len & 7) || (data_offset & 7)) {
753 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
757 data_len = data_len >> 3;
758 data_offset = data_offset >> 3;
766 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
767 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
771 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
777 old_digest = ctx->digest;
781 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
782 out_sg->length = ses->digest_length;
783 cpu_to_hw_sg(out_sg);
787 /* need to extend the input to a compound frame */
788 in_sg->extension = 1;
790 in_sg->length = data_len;
791 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
796 if (ses->iv.length) {
799 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
802 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
803 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
805 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
806 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
809 sg->length = ses->iv.length;
811 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
812 in_sg->length += sg->length;
817 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
818 sg->offset = data_offset;
820 if (data_len <= (mbuf->data_len - data_offset)) {
821 sg->length = data_len;
823 sg->length = mbuf->data_len - data_offset;
825 /* remaining i/p segs */
826 while ((data_len = data_len - sg->length) &&
827 (mbuf = mbuf->next)) {
830 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
831 if (data_len > mbuf->data_len)
832 sg->length = mbuf->data_len;
834 sg->length = data_len;
838 if (is_decode(ses)) {
839 /* Digest verification case */
842 rte_memcpy(old_digest, sym->auth.digest.data,
844 start_addr = rte_dpaa_mem_vtop(old_digest);
845 qm_sg_entry_set64(sg, start_addr);
846 sg->length = ses->digest_length;
847 in_sg->length += ses->digest_length;
858 * |<----data_len------->|
859 * |ip_header|ah_header|icv|payload|
864 static inline struct dpaa_sec_job *
865 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
867 struct rte_crypto_sym_op *sym = op->sym;
868 struct rte_mbuf *mbuf = sym->m_src;
869 struct dpaa_sec_job *cf;
870 struct dpaa_sec_op_ctx *ctx;
871 struct qm_sg_entry *sg, *in_sg;
872 rte_iova_t start_addr;
874 int data_len, data_offset;
876 data_len = sym->auth.data.length;
877 data_offset = sym->auth.data.offset;
879 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
880 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
881 if ((data_len & 7) || (data_offset & 7)) {
882 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
886 data_len = data_len >> 3;
887 data_offset = data_offset >> 3;
890 ctx = dpaa_sec_alloc_ctx(ses, 4);
896 old_digest = ctx->digest;
898 start_addr = rte_pktmbuf_iova(mbuf);
901 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
902 sg->length = ses->digest_length;
907 /* need to extend the input to a compound frame */
908 in_sg->extension = 1;
910 in_sg->length = data_len;
911 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
914 if (ses->iv.length) {
917 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
920 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
921 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
923 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
924 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
927 sg->length = ses->iv.length;
929 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
930 in_sg->length += sg->length;
935 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
936 sg->offset = data_offset;
937 sg->length = data_len;
939 if (is_decode(ses)) {
940 /* Digest verification case */
942 /* hash result or digest, save digest first */
943 rte_memcpy(old_digest, sym->auth.digest.data,
945 /* let's check digest by hw */
946 start_addr = rte_dpaa_mem_vtop(old_digest);
948 qm_sg_entry_set64(sg, start_addr);
949 sg->length = ses->digest_length;
950 in_sg->length += ses->digest_length;
959 static inline struct dpaa_sec_job *
960 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
962 struct rte_crypto_sym_op *sym = op->sym;
963 struct dpaa_sec_job *cf;
964 struct dpaa_sec_op_ctx *ctx;
965 struct qm_sg_entry *sg, *out_sg, *in_sg;
966 struct rte_mbuf *mbuf;
968 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
970 int data_len, data_offset;
972 data_len = sym->cipher.data.length;
973 data_offset = sym->cipher.data.offset;
975 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
976 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
977 if ((data_len & 7) || (data_offset & 7)) {
978 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
982 data_len = data_len >> 3;
983 data_offset = data_offset >> 3;
988 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
991 req_segs = mbuf->nb_segs * 2 + 3;
993 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
994 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
999 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1007 out_sg = &cf->sg[0];
1008 out_sg->extension = 1;
1009 out_sg->length = data_len;
1010 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1011 cpu_to_hw_sg(out_sg);
1015 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1016 sg->length = mbuf->data_len - data_offset;
1017 sg->offset = data_offset;
1019 /* Successive segs */
1024 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1025 sg->length = mbuf->data_len;
1034 in_sg->extension = 1;
1036 in_sg->length = data_len + ses->iv.length;
1039 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1040 cpu_to_hw_sg(in_sg);
1043 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1044 sg->length = ses->iv.length;
1049 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1050 sg->length = mbuf->data_len - data_offset;
1051 sg->offset = data_offset;
1053 /* Successive segs */
1058 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1059 sg->length = mbuf->data_len;
1068 static inline struct dpaa_sec_job *
1069 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1071 struct rte_crypto_sym_op *sym = op->sym;
1072 struct dpaa_sec_job *cf;
1073 struct dpaa_sec_op_ctx *ctx;
1074 struct qm_sg_entry *sg;
1075 rte_iova_t src_start_addr, dst_start_addr;
1076 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1078 int data_len, data_offset;
1080 data_len = sym->cipher.data.length;
1081 data_offset = sym->cipher.data.offset;
1083 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1084 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1085 if ((data_len & 7) || (data_offset & 7)) {
1086 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1090 data_len = data_len >> 3;
1091 data_offset = data_offset >> 3;
1094 ctx = dpaa_sec_alloc_ctx(ses, 4);
1101 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1104 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1106 dst_start_addr = src_start_addr;
1110 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1111 sg->length = data_len + ses->iv.length;
1117 /* need to extend the input to a compound frame */
1120 sg->length = data_len + ses->iv.length;
1121 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1125 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1126 sg->length = ses->iv.length;
1130 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1131 sg->length = data_len;
1138 static inline struct dpaa_sec_job *
1139 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1141 struct rte_crypto_sym_op *sym = op->sym;
1142 struct dpaa_sec_job *cf;
1143 struct dpaa_sec_op_ctx *ctx;
1144 struct qm_sg_entry *sg, *out_sg, *in_sg;
1145 struct rte_mbuf *mbuf;
1147 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1152 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1155 req_segs = mbuf->nb_segs * 2 + 4;
1158 if (ses->auth_only_len)
1161 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1162 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1167 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1174 rte_prefetch0(cf->sg);
1177 out_sg = &cf->sg[0];
1178 out_sg->extension = 1;
1180 out_sg->length = sym->aead.data.length + ses->digest_length;
1182 out_sg->length = sym->aead.data.length;
1184 /* output sg entries */
1186 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1187 cpu_to_hw_sg(out_sg);
1190 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1191 sg->length = mbuf->data_len - sym->aead.data.offset;
1192 sg->offset = sym->aead.data.offset;
1194 /* Successive segs */
1199 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1200 sg->length = mbuf->data_len;
1203 sg->length -= ses->digest_length;
1205 if (is_encode(ses)) {
1207 /* set auth output */
1209 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1210 sg->length = ses->digest_length;
1218 in_sg->extension = 1;
1221 in_sg->length = ses->iv.length + sym->aead.data.length
1222 + ses->auth_only_len;
1224 in_sg->length = ses->iv.length + sym->aead.data.length
1225 + ses->auth_only_len + ses->digest_length;
1227 /* input sg entries */
1229 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1230 cpu_to_hw_sg(in_sg);
1233 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1234 sg->length = ses->iv.length;
1237 /* 2nd seg auth only */
1238 if (ses->auth_only_len) {
1240 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1241 sg->length = ses->auth_only_len;
1247 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1248 sg->length = mbuf->data_len - sym->aead.data.offset;
1249 sg->offset = sym->aead.data.offset;
1251 /* Successive segs */
1256 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1257 sg->length = mbuf->data_len;
1261 if (is_decode(ses)) {
1264 memcpy(ctx->digest, sym->aead.digest.data,
1265 ses->digest_length);
1266 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1267 sg->length = ses->digest_length;
1275 static inline struct dpaa_sec_job *
1276 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1278 struct rte_crypto_sym_op *sym = op->sym;
1279 struct dpaa_sec_job *cf;
1280 struct dpaa_sec_op_ctx *ctx;
1281 struct qm_sg_entry *sg;
1282 uint32_t length = 0;
1283 rte_iova_t src_start_addr, dst_start_addr;
1284 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1287 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1290 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1292 dst_start_addr = src_start_addr;
1294 ctx = dpaa_sec_alloc_ctx(ses, 7);
1302 rte_prefetch0(cf->sg);
1304 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1305 if (is_encode(ses)) {
1306 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1307 sg->length = ses->iv.length;
1308 length += sg->length;
1312 if (ses->auth_only_len) {
1313 qm_sg_entry_set64(sg,
1314 rte_dpaa_mem_vtop(sym->aead.aad.data));
1315 sg->length = ses->auth_only_len;
1316 length += sg->length;
1320 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1321 sg->length = sym->aead.data.length;
1322 length += sg->length;
1326 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1327 sg->length = ses->iv.length;
1328 length += sg->length;
1332 if (ses->auth_only_len) {
1333 qm_sg_entry_set64(sg,
1334 rte_dpaa_mem_vtop(sym->aead.aad.data));
1335 sg->length = ses->auth_only_len;
1336 length += sg->length;
1340 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1341 sg->length = sym->aead.data.length;
1342 length += sg->length;
1345 memcpy(ctx->digest, sym->aead.digest.data,
1346 ses->digest_length);
1349 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1350 sg->length = ses->digest_length;
1351 length += sg->length;
1355 /* input compound frame */
1356 cf->sg[1].length = length;
1357 cf->sg[1].extension = 1;
1358 cf->sg[1].final = 1;
1359 cpu_to_hw_sg(&cf->sg[1]);
1363 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1364 qm_sg_entry_set64(sg,
1365 dst_start_addr + sym->aead.data.offset);
1366 sg->length = sym->aead.data.length;
1367 length = sg->length;
1368 if (is_encode(ses)) {
1370 /* set auth output */
1372 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1373 sg->length = ses->digest_length;
1374 length += sg->length;
1379 /* output compound frame */
1380 cf->sg[0].length = length;
1381 cf->sg[0].extension = 1;
1382 cpu_to_hw_sg(&cf->sg[0]);
1387 static inline struct dpaa_sec_job *
1388 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1390 struct rte_crypto_sym_op *sym = op->sym;
1391 struct dpaa_sec_job *cf;
1392 struct dpaa_sec_op_ctx *ctx;
1393 struct qm_sg_entry *sg, *out_sg, *in_sg;
1394 struct rte_mbuf *mbuf;
1396 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1401 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1404 req_segs = mbuf->nb_segs * 2 + 4;
1407 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1408 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1413 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1420 rte_prefetch0(cf->sg);
1423 out_sg = &cf->sg[0];
1424 out_sg->extension = 1;
1426 out_sg->length = sym->auth.data.length + ses->digest_length;
1428 out_sg->length = sym->auth.data.length;
1430 /* output sg entries */
1432 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1433 cpu_to_hw_sg(out_sg);
1436 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1437 sg->length = mbuf->data_len - sym->auth.data.offset;
1438 sg->offset = sym->auth.data.offset;
1440 /* Successive segs */
1445 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1446 sg->length = mbuf->data_len;
1449 sg->length -= ses->digest_length;
1451 if (is_encode(ses)) {
1453 /* set auth output */
1455 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1456 sg->length = ses->digest_length;
1464 in_sg->extension = 1;
1467 in_sg->length = ses->iv.length + sym->auth.data.length;
1469 in_sg->length = ses->iv.length + sym->auth.data.length
1470 + ses->digest_length;
1472 /* input sg entries */
1474 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1475 cpu_to_hw_sg(in_sg);
1478 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1479 sg->length = ses->iv.length;
1484 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1485 sg->length = mbuf->data_len - sym->auth.data.offset;
1486 sg->offset = sym->auth.data.offset;
1488 /* Successive segs */
1493 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1494 sg->length = mbuf->data_len;
1498 sg->length -= ses->digest_length;
1499 if (is_decode(ses)) {
1502 memcpy(ctx->digest, sym->auth.digest.data,
1503 ses->digest_length);
1504 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1505 sg->length = ses->digest_length;
1513 static inline struct dpaa_sec_job *
1514 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1516 struct rte_crypto_sym_op *sym = op->sym;
1517 struct dpaa_sec_job *cf;
1518 struct dpaa_sec_op_ctx *ctx;
1519 struct qm_sg_entry *sg;
1520 rte_iova_t src_start_addr, dst_start_addr;
1521 uint32_t length = 0;
1522 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1525 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1527 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1529 dst_start_addr = src_start_addr;
1531 ctx = dpaa_sec_alloc_ctx(ses, 7);
1539 rte_prefetch0(cf->sg);
1541 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1542 if (is_encode(ses)) {
1543 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1544 sg->length = ses->iv.length;
1545 length += sg->length;
1549 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1550 sg->length = sym->auth.data.length;
1551 length += sg->length;
1555 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1556 sg->length = ses->iv.length;
1557 length += sg->length;
1562 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1563 sg->length = sym->auth.data.length;
1564 length += sg->length;
1567 memcpy(ctx->digest, sym->auth.digest.data,
1568 ses->digest_length);
1571 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1572 sg->length = ses->digest_length;
1573 length += sg->length;
1577 /* input compound frame */
1578 cf->sg[1].length = length;
1579 cf->sg[1].extension = 1;
1580 cf->sg[1].final = 1;
1581 cpu_to_hw_sg(&cf->sg[1]);
1585 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1586 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1587 sg->length = sym->cipher.data.length;
1588 length = sg->length;
1589 if (is_encode(ses)) {
1591 /* set auth output */
1593 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1594 sg->length = ses->digest_length;
1595 length += sg->length;
1600 /* output compound frame */
1601 cf->sg[0].length = length;
1602 cf->sg[0].extension = 1;
1603 cpu_to_hw_sg(&cf->sg[0]);
1608 #ifdef RTE_LIB_SECURITY
1609 static inline struct dpaa_sec_job *
1610 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1612 struct rte_crypto_sym_op *sym = op->sym;
1613 struct dpaa_sec_job *cf;
1614 struct dpaa_sec_op_ctx *ctx;
1615 struct qm_sg_entry *sg;
1616 phys_addr_t src_start_addr, dst_start_addr;
1618 ctx = dpaa_sec_alloc_ctx(ses, 2);
1624 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1627 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1629 dst_start_addr = src_start_addr;
1633 qm_sg_entry_set64(sg, src_start_addr);
1634 sg->length = sym->m_src->pkt_len;
1638 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1641 qm_sg_entry_set64(sg, dst_start_addr);
1642 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1648 static inline struct dpaa_sec_job *
1649 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1651 struct rte_crypto_sym_op *sym = op->sym;
1652 struct dpaa_sec_job *cf;
1653 struct dpaa_sec_op_ctx *ctx;
1654 struct qm_sg_entry *sg, *out_sg, *in_sg;
1655 struct rte_mbuf *mbuf;
1657 uint32_t in_len = 0, out_len = 0;
1664 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1665 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1666 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1671 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1677 out_sg = &cf->sg[0];
1678 out_sg->extension = 1;
1679 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1683 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1686 /* Successive segs */
1687 while (mbuf->next) {
1688 sg->length = mbuf->data_len;
1689 out_len += sg->length;
1693 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1696 sg->length = mbuf->buf_len - mbuf->data_off;
1697 out_len += sg->length;
1701 out_sg->length = out_len;
1702 cpu_to_hw_sg(out_sg);
1707 in_sg->extension = 1;
1709 in_len = mbuf->data_len;
1712 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1715 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1716 sg->length = mbuf->data_len;
1719 /* Successive segs */
1724 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1725 sg->length = mbuf->data_len;
1727 in_len += sg->length;
1733 in_sg->length = in_len;
1734 cpu_to_hw_sg(in_sg);
1736 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1743 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1746 /* Function to transmit the frames to given device and queuepair */
1748 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1749 uint16_t num_tx = 0;
1750 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1751 uint32_t frames_to_send;
1752 struct rte_crypto_op *op;
1753 struct dpaa_sec_job *cf;
1754 dpaa_sec_session *ses;
1755 uint16_t auth_hdr_len, auth_tail_len;
1756 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1757 struct qman_fq *inq[DPAA_SEC_BURST];
1759 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1760 if (rte_dpaa_portal_init((void *)0)) {
1761 DPAA_SEC_ERR("Failure in affining portal");
1767 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1768 DPAA_SEC_BURST : nb_ops;
1769 for (loop = 0; loop < frames_to_send; loop++) {
1771 if (*dpaa_seqn(op->sym->m_src) != 0) {
1772 index = *dpaa_seqn(op->sym->m_src) - 1;
1773 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1774 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1775 flags[loop] = ((index & 0x0f) << 8);
1776 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1777 DPAA_PER_LCORE_DQRR_SIZE--;
1778 DPAA_PER_LCORE_DQRR_HELD &=
1783 switch (op->sess_type) {
1784 case RTE_CRYPTO_OP_WITH_SESSION:
1785 ses = (dpaa_sec_session *)
1786 get_sym_session_private_data(
1788 cryptodev_driver_id);
1790 #ifdef RTE_LIB_SECURITY
1791 case RTE_CRYPTO_OP_SECURITY_SESSION:
1792 ses = (dpaa_sec_session *)
1793 get_sec_session_private_data(
1794 op->sym->sec_session);
1799 "sessionless crypto op not supported");
1800 frames_to_send = loop;
1806 DPAA_SEC_DP_ERR("session not available");
1807 frames_to_send = loop;
1812 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1813 if (dpaa_sec_attach_sess_q(qp, ses)) {
1814 frames_to_send = loop;
1818 } else if (unlikely(ses->qp[rte_lcore_id() %
1819 MAX_DPAA_CORES] != qp)) {
1820 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1822 ses->qp[rte_lcore_id() %
1823 MAX_DPAA_CORES], qp);
1824 frames_to_send = loop;
1829 auth_hdr_len = op->sym->auth.data.length -
1830 op->sym->cipher.data.length;
1833 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1834 ((op->sym->m_dst == NULL) ||
1835 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1836 switch (ses->ctxt) {
1837 #ifdef RTE_LIB_SECURITY
1839 case DPAA_SEC_IPSEC:
1840 cf = build_proto(op, ses);
1844 cf = build_auth_only(op, ses);
1846 case DPAA_SEC_CIPHER:
1847 cf = build_cipher_only(op, ses);
1850 cf = build_cipher_auth_gcm(op, ses);
1851 auth_hdr_len = ses->auth_only_len;
1853 case DPAA_SEC_CIPHER_HASH:
1855 op->sym->cipher.data.offset
1856 - op->sym->auth.data.offset;
1858 op->sym->auth.data.length
1859 - op->sym->cipher.data.length
1861 cf = build_cipher_auth(op, ses);
1864 DPAA_SEC_DP_ERR("not supported ops");
1865 frames_to_send = loop;
1870 switch (ses->ctxt) {
1871 #ifdef RTE_LIB_SECURITY
1873 case DPAA_SEC_IPSEC:
1874 cf = build_proto_sg(op, ses);
1878 cf = build_auth_only_sg(op, ses);
1880 case DPAA_SEC_CIPHER:
1881 cf = build_cipher_only_sg(op, ses);
1884 cf = build_cipher_auth_gcm_sg(op, ses);
1885 auth_hdr_len = ses->auth_only_len;
1887 case DPAA_SEC_CIPHER_HASH:
1889 op->sym->cipher.data.offset
1890 - op->sym->auth.data.offset;
1892 op->sym->auth.data.length
1893 - op->sym->cipher.data.length
1895 cf = build_cipher_auth_sg(op, ses);
1898 DPAA_SEC_DP_ERR("not supported ops");
1899 frames_to_send = loop;
1904 if (unlikely(!cf)) {
1905 frames_to_send = loop;
1911 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1912 fd->opaque_addr = 0;
1914 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1915 fd->_format1 = qm_fd_compound;
1916 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1918 /* Auth_only_len is set as 0 in descriptor and it is
1919 * overwritten here in the fd.cmd which will update
1922 if (auth_hdr_len || auth_tail_len) {
1923 fd->cmd = 0x80000000;
1925 ((auth_tail_len << 16) | auth_hdr_len);
1928 #ifdef RTE_LIB_SECURITY
1929 /* In case of PDCP, per packet HFN is stored in
1930 * mbuf priv after sym_op.
1932 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1933 fd->cmd = 0x80000000 |
1934 *((uint32_t *)((uint8_t *)op +
1935 ses->pdcp.hfn_ovd_offset));
1936 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1937 *((uint32_t *)((uint8_t *)op +
1938 ses->pdcp.hfn_ovd_offset)),
1945 while (loop < frames_to_send) {
1946 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1947 &flags[loop], frames_to_send - loop);
1949 nb_ops -= frames_to_send;
1950 num_tx += frames_to_send;
1953 dpaa_qp->tx_pkts += num_tx;
1954 dpaa_qp->tx_errs += nb_ops - num_tx;
1960 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1964 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1966 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1967 if (rte_dpaa_portal_init((void *)0)) {
1968 DPAA_SEC_ERR("Failure in affining portal");
1973 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1975 dpaa_qp->rx_pkts += num_rx;
1976 dpaa_qp->rx_errs += nb_ops - num_rx;
1978 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1983 /** Release queue pair */
1985 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1988 struct dpaa_sec_dev_private *internals;
1989 struct dpaa_sec_qp *qp = NULL;
1991 PMD_INIT_FUNC_TRACE();
1993 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1995 internals = dev->data->dev_private;
1996 if (qp_id >= internals->max_nb_queue_pairs) {
1997 DPAA_SEC_ERR("Max supported qpid %d",
1998 internals->max_nb_queue_pairs);
2002 qp = &internals->qps[qp_id];
2003 rte_mempool_free(qp->ctx_pool);
2004 qp->internals = NULL;
2005 dev->data->queue_pairs[qp_id] = NULL;
2010 /** Setup a queue pair */
2012 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2013 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2014 __rte_unused int socket_id)
2016 struct dpaa_sec_dev_private *internals;
2017 struct dpaa_sec_qp *qp = NULL;
2020 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2022 internals = dev->data->dev_private;
2023 if (qp_id >= internals->max_nb_queue_pairs) {
2024 DPAA_SEC_ERR("Max supported qpid %d",
2025 internals->max_nb_queue_pairs);
2029 qp = &internals->qps[qp_id];
2030 qp->internals = internals;
2031 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2032 dev->data->dev_id, qp_id);
2033 if (!qp->ctx_pool) {
2034 qp->ctx_pool = rte_mempool_create((const char *)str,
2037 CTX_POOL_CACHE_SIZE, 0,
2038 NULL, NULL, NULL, NULL,
2040 if (!qp->ctx_pool) {
2041 DPAA_SEC_ERR("%s create failed\n", str);
2045 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2046 dev->data->dev_id, qp_id);
2047 dev->data->queue_pairs[qp_id] = qp;
2052 /** Returns the size of session structure */
2054 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2056 PMD_INIT_FUNC_TRACE();
2058 return sizeof(dpaa_sec_session);
2062 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2063 struct rte_crypto_sym_xform *xform,
2064 dpaa_sec_session *session)
2066 session->ctxt = DPAA_SEC_CIPHER;
2067 session->cipher_alg = xform->cipher.algo;
2068 session->iv.length = xform->cipher.iv.length;
2069 session->iv.offset = xform->cipher.iv.offset;
2070 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2071 RTE_CACHE_LINE_SIZE);
2072 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2073 DPAA_SEC_ERR("No Memory for cipher key");
2076 session->cipher_key.length = xform->cipher.key.length;
2078 memcpy(session->cipher_key.data, xform->cipher.key.data,
2079 xform->cipher.key.length);
2080 switch (xform->cipher.algo) {
2081 case RTE_CRYPTO_CIPHER_AES_CBC:
2082 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2083 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2085 case RTE_CRYPTO_CIPHER_DES_CBC:
2086 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2087 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2089 case RTE_CRYPTO_CIPHER_3DES_CBC:
2090 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2091 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2093 case RTE_CRYPTO_CIPHER_AES_CTR:
2094 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2095 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2097 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2098 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2100 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2101 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2104 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2105 xform->cipher.algo);
2108 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2115 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2116 struct rte_crypto_sym_xform *xform,
2117 dpaa_sec_session *session)
2119 session->ctxt = DPAA_SEC_AUTH;
2120 session->auth_alg = xform->auth.algo;
2121 session->auth_key.length = xform->auth.key.length;
2122 if (xform->auth.key.length) {
2123 session->auth_key.data =
2124 rte_zmalloc(NULL, xform->auth.key.length,
2125 RTE_CACHE_LINE_SIZE);
2126 if (session->auth_key.data == NULL) {
2127 DPAA_SEC_ERR("No Memory for auth key");
2130 memcpy(session->auth_key.data, xform->auth.key.data,
2131 xform->auth.key.length);
2134 session->digest_length = xform->auth.digest_length;
2135 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2136 session->iv.offset = xform->auth.iv.offset;
2137 session->iv.length = xform->auth.iv.length;
2140 switch (xform->auth.algo) {
2141 case RTE_CRYPTO_AUTH_SHA1:
2142 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2143 session->auth_key.algmode = OP_ALG_AAI_HASH;
2145 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2146 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2147 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2149 case RTE_CRYPTO_AUTH_MD5:
2150 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2151 session->auth_key.algmode = OP_ALG_AAI_HASH;
2153 case RTE_CRYPTO_AUTH_MD5_HMAC:
2154 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2155 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2157 case RTE_CRYPTO_AUTH_SHA224:
2158 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2159 session->auth_key.algmode = OP_ALG_AAI_HASH;
2161 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2162 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2163 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2165 case RTE_CRYPTO_AUTH_SHA256:
2166 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2167 session->auth_key.algmode = OP_ALG_AAI_HASH;
2169 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2170 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2171 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2173 case RTE_CRYPTO_AUTH_SHA384:
2174 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2175 session->auth_key.algmode = OP_ALG_AAI_HASH;
2177 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2178 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2179 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2181 case RTE_CRYPTO_AUTH_SHA512:
2182 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2183 session->auth_key.algmode = OP_ALG_AAI_HASH;
2185 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2186 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2187 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2189 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2190 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2191 session->auth_key.algmode = OP_ALG_AAI_F9;
2193 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2194 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2195 session->auth_key.algmode = OP_ALG_AAI_F9;
2197 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2198 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2199 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2201 case RTE_CRYPTO_AUTH_AES_CMAC:
2202 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2203 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2206 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2211 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2218 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2219 struct rte_crypto_sym_xform *xform,
2220 dpaa_sec_session *session)
2223 struct rte_crypto_cipher_xform *cipher_xform;
2224 struct rte_crypto_auth_xform *auth_xform;
2226 session->ctxt = DPAA_SEC_CIPHER_HASH;
2227 if (session->auth_cipher_text) {
2228 cipher_xform = &xform->cipher;
2229 auth_xform = &xform->next->auth;
2231 cipher_xform = &xform->next->cipher;
2232 auth_xform = &xform->auth;
2235 /* Set IV parameters */
2236 session->iv.offset = cipher_xform->iv.offset;
2237 session->iv.length = cipher_xform->iv.length;
2239 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2240 RTE_CACHE_LINE_SIZE);
2241 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2242 DPAA_SEC_ERR("No Memory for cipher key");
2245 session->cipher_key.length = cipher_xform->key.length;
2246 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2247 RTE_CACHE_LINE_SIZE);
2248 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2249 DPAA_SEC_ERR("No Memory for auth key");
2252 session->auth_key.length = auth_xform->key.length;
2253 memcpy(session->cipher_key.data, cipher_xform->key.data,
2254 cipher_xform->key.length);
2255 memcpy(session->auth_key.data, auth_xform->key.data,
2256 auth_xform->key.length);
2258 session->digest_length = auth_xform->digest_length;
2259 session->auth_alg = auth_xform->algo;
2261 switch (auth_xform->algo) {
2262 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2263 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2264 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2266 case RTE_CRYPTO_AUTH_MD5_HMAC:
2267 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2268 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2270 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2271 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2272 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2274 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2275 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2276 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2278 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2279 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2280 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2282 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2283 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2284 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2286 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2287 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2288 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2290 case RTE_CRYPTO_AUTH_AES_CMAC:
2291 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2292 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2295 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2300 session->cipher_alg = cipher_xform->algo;
2302 switch (cipher_xform->algo) {
2303 case RTE_CRYPTO_CIPHER_AES_CBC:
2304 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2305 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2307 case RTE_CRYPTO_CIPHER_DES_CBC:
2308 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2309 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2311 case RTE_CRYPTO_CIPHER_3DES_CBC:
2312 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2313 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2315 case RTE_CRYPTO_CIPHER_AES_CTR:
2316 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2317 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2320 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2321 cipher_xform->algo);
2324 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2330 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2331 struct rte_crypto_sym_xform *xform,
2332 dpaa_sec_session *session)
2334 session->aead_alg = xform->aead.algo;
2335 session->ctxt = DPAA_SEC_AEAD;
2336 session->iv.length = xform->aead.iv.length;
2337 session->iv.offset = xform->aead.iv.offset;
2338 session->auth_only_len = xform->aead.aad_length;
2339 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2340 RTE_CACHE_LINE_SIZE);
2341 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2342 DPAA_SEC_ERR("No Memory for aead key\n");
2345 session->aead_key.length = xform->aead.key.length;
2346 session->digest_length = xform->aead.digest_length;
2348 memcpy(session->aead_key.data, xform->aead.key.data,
2349 xform->aead.key.length);
2351 switch (session->aead_alg) {
2352 case RTE_CRYPTO_AEAD_AES_GCM:
2353 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2354 session->aead_key.algmode = OP_ALG_AAI_GCM;
2357 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2361 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2367 static struct qman_fq *
2368 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2372 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2373 if (qi->inq_attach[i] == 0) {
2374 qi->inq_attach[i] = 1;
2378 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2384 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2388 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2389 if (&qi->inq[i] == fq) {
2390 if (qman_retire_fq(fq, NULL) != 0)
2391 DPAA_SEC_DEBUG("Queue is not retired\n");
2393 qi->inq_attach[i] = 0;
2401 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2405 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2406 ret = dpaa_sec_prep_cdb(sess);
2408 DPAA_SEC_ERR("Unable to prepare sec cdb");
2411 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2412 ret = rte_dpaa_portal_init((void *)0);
2414 DPAA_SEC_ERR("Failure in affining portal");
2418 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2419 rte_dpaa_mem_vtop(&sess->cdb),
2420 qman_fq_fqid(&qp->outq));
2422 DPAA_SEC_ERR("Unable to init sec queue");
2428 free_session_data(dpaa_sec_session *s)
2431 rte_free(s->aead_key.data);
2433 rte_free(s->auth_key.data);
2434 rte_free(s->cipher_key.data);
2436 memset(s, 0, sizeof(dpaa_sec_session));
2440 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2441 struct rte_crypto_sym_xform *xform, void *sess)
2443 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2444 dpaa_sec_session *session = sess;
2448 PMD_INIT_FUNC_TRACE();
2450 if (unlikely(sess == NULL)) {
2451 DPAA_SEC_ERR("invalid session struct");
2454 memset(session, 0, sizeof(dpaa_sec_session));
2456 /* Default IV length = 0 */
2457 session->iv.length = 0;
2460 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2461 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2462 ret = dpaa_sec_cipher_init(dev, xform, session);
2464 /* Authentication Only */
2465 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2466 xform->next == NULL) {
2467 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2468 session->ctxt = DPAA_SEC_AUTH;
2469 ret = dpaa_sec_auth_init(dev, xform, session);
2471 /* Cipher then Authenticate */
2472 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2473 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2474 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2475 session->auth_cipher_text = 1;
2476 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2477 ret = dpaa_sec_auth_init(dev, xform, session);
2478 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2479 ret = dpaa_sec_cipher_init(dev, xform, session);
2481 ret = dpaa_sec_chain_init(dev, xform, session);
2483 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2486 /* Authenticate then Cipher */
2487 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2488 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2489 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2490 session->auth_cipher_text = 0;
2491 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2492 ret = dpaa_sec_cipher_init(dev, xform, session);
2493 else if (xform->next->cipher.algo
2494 == RTE_CRYPTO_CIPHER_NULL)
2495 ret = dpaa_sec_auth_init(dev, xform, session);
2497 ret = dpaa_sec_chain_init(dev, xform, session);
2499 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2503 /* AEAD operation for AES-GCM kind of Algorithms */
2504 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2505 xform->next == NULL) {
2506 ret = dpaa_sec_aead_init(dev, xform, session);
2509 DPAA_SEC_ERR("Invalid crypto type");
2513 DPAA_SEC_ERR("unable to init session");
2517 rte_spinlock_lock(&internals->lock);
2518 for (i = 0; i < MAX_DPAA_CORES; i++) {
2519 session->inq[i] = dpaa_sec_attach_rxq(internals);
2520 if (session->inq[i] == NULL) {
2521 DPAA_SEC_ERR("unable to attach sec queue");
2522 rte_spinlock_unlock(&internals->lock);
2527 rte_spinlock_unlock(&internals->lock);
2532 free_session_data(session);
2537 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2538 struct rte_crypto_sym_xform *xform,
2539 struct rte_cryptodev_sym_session *sess,
2540 struct rte_mempool *mempool)
2542 void *sess_private_data;
2545 PMD_INIT_FUNC_TRACE();
2547 if (rte_mempool_get(mempool, &sess_private_data)) {
2548 DPAA_SEC_ERR("Couldn't get object from session mempool");
2552 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2554 DPAA_SEC_ERR("failed to configure session parameters");
2556 /* Return session to mempool */
2557 rte_mempool_put(mempool, sess_private_data);
2561 set_sym_session_private_data(sess, dev->driver_id,
2569 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2571 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2572 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2575 for (i = 0; i < MAX_DPAA_CORES; i++) {
2577 dpaa_sec_detach_rxq(qi, s->inq[i]);
2581 free_session_data(s);
2582 rte_mempool_put(sess_mp, (void *)s);
2585 /** Clear the memory of session so it doesn't leave key material behind */
2587 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2588 struct rte_cryptodev_sym_session *sess)
2590 PMD_INIT_FUNC_TRACE();
2591 uint8_t index = dev->driver_id;
2592 void *sess_priv = get_sym_session_private_data(sess, index);
2593 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2596 free_session_memory(dev, s);
2597 set_sym_session_private_data(sess, index, NULL);
2601 #ifdef RTE_LIB_SECURITY
2603 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2604 struct rte_security_ipsec_xform *ipsec_xform,
2605 dpaa_sec_session *session)
2607 PMD_INIT_FUNC_TRACE();
2609 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2610 RTE_CACHE_LINE_SIZE);
2611 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2612 DPAA_SEC_ERR("No Memory for aead key");
2615 memcpy(session->aead_key.data, aead_xform->key.data,
2616 aead_xform->key.length);
2618 session->digest_length = aead_xform->digest_length;
2619 session->aead_key.length = aead_xform->key.length;
2621 switch (aead_xform->algo) {
2622 case RTE_CRYPTO_AEAD_AES_GCM:
2623 switch (session->digest_length) {
2625 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2628 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2631 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2634 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2635 session->digest_length);
2638 if (session->dir == DIR_ENC) {
2639 memcpy(session->encap_pdb.gcm.salt,
2640 (uint8_t *)&(ipsec_xform->salt), 4);
2642 memcpy(session->decap_pdb.gcm.salt,
2643 (uint8_t *)&(ipsec_xform->salt), 4);
2645 session->aead_key.algmode = OP_ALG_AAI_GCM;
2646 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2649 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2657 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2658 struct rte_crypto_auth_xform *auth_xform,
2659 struct rte_security_ipsec_xform *ipsec_xform,
2660 dpaa_sec_session *session)
2663 session->cipher_key.data = rte_zmalloc(NULL,
2664 cipher_xform->key.length,
2665 RTE_CACHE_LINE_SIZE);
2666 if (session->cipher_key.data == NULL &&
2667 cipher_xform->key.length > 0) {
2668 DPAA_SEC_ERR("No Memory for cipher key");
2672 session->cipher_key.length = cipher_xform->key.length;
2673 memcpy(session->cipher_key.data, cipher_xform->key.data,
2674 cipher_xform->key.length);
2675 session->cipher_alg = cipher_xform->algo;
2677 session->cipher_key.data = NULL;
2678 session->cipher_key.length = 0;
2679 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2683 session->auth_key.data = rte_zmalloc(NULL,
2684 auth_xform->key.length,
2685 RTE_CACHE_LINE_SIZE);
2686 if (session->auth_key.data == NULL &&
2687 auth_xform->key.length > 0) {
2688 DPAA_SEC_ERR("No Memory for auth key");
2691 session->auth_key.length = auth_xform->key.length;
2692 memcpy(session->auth_key.data, auth_xform->key.data,
2693 auth_xform->key.length);
2694 session->auth_alg = auth_xform->algo;
2695 session->digest_length = auth_xform->digest_length;
2697 session->auth_key.data = NULL;
2698 session->auth_key.length = 0;
2699 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2702 switch (session->auth_alg) {
2703 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2704 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2705 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2707 case RTE_CRYPTO_AUTH_MD5_HMAC:
2708 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2709 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2711 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2712 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2713 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2714 if (session->digest_length != 16)
2716 "+++Using sha256-hmac truncated len is non-standard,"
2717 "it will not work with lookaside proto");
2719 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2720 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2721 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2723 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2724 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2725 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2727 case RTE_CRYPTO_AUTH_AES_CMAC:
2728 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2729 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2731 case RTE_CRYPTO_AUTH_NULL:
2732 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2734 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2735 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2736 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2738 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2739 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2740 case RTE_CRYPTO_AUTH_SHA1:
2741 case RTE_CRYPTO_AUTH_SHA256:
2742 case RTE_CRYPTO_AUTH_SHA512:
2743 case RTE_CRYPTO_AUTH_SHA224:
2744 case RTE_CRYPTO_AUTH_SHA384:
2745 case RTE_CRYPTO_AUTH_MD5:
2746 case RTE_CRYPTO_AUTH_AES_GMAC:
2747 case RTE_CRYPTO_AUTH_KASUMI_F9:
2748 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2749 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2750 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2754 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2759 switch (session->cipher_alg) {
2760 case RTE_CRYPTO_CIPHER_AES_CBC:
2761 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2762 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2764 case RTE_CRYPTO_CIPHER_DES_CBC:
2765 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2766 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2768 case RTE_CRYPTO_CIPHER_3DES_CBC:
2769 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2770 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2772 case RTE_CRYPTO_CIPHER_AES_CTR:
2773 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2774 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2775 if (session->dir == DIR_ENC) {
2776 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2777 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2779 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2780 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2783 case RTE_CRYPTO_CIPHER_NULL:
2784 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2786 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2787 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2788 case RTE_CRYPTO_CIPHER_3DES_ECB:
2789 case RTE_CRYPTO_CIPHER_AES_ECB:
2790 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2791 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2792 session->cipher_alg);
2795 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2796 session->cipher_alg);
2804 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2805 struct rte_security_session_conf *conf,
2808 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2809 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2810 struct rte_crypto_auth_xform *auth_xform = NULL;
2811 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2812 struct rte_crypto_aead_xform *aead_xform = NULL;
2813 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2817 PMD_INIT_FUNC_TRACE();
2819 memset(session, 0, sizeof(dpaa_sec_session));
2820 session->proto_alg = conf->protocol;
2821 session->ctxt = DPAA_SEC_IPSEC;
2823 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2824 session->dir = DIR_ENC;
2826 session->dir = DIR_DEC;
2828 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2829 cipher_xform = &conf->crypto_xform->cipher;
2830 if (conf->crypto_xform->next)
2831 auth_xform = &conf->crypto_xform->next->auth;
2832 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2833 ipsec_xform, session);
2834 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2835 auth_xform = &conf->crypto_xform->auth;
2836 if (conf->crypto_xform->next)
2837 cipher_xform = &conf->crypto_xform->next->cipher;
2838 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2839 ipsec_xform, session);
2840 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2841 aead_xform = &conf->crypto_xform->aead;
2842 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2843 ipsec_xform, session);
2845 DPAA_SEC_ERR("XFORM not specified");
2850 DPAA_SEC_ERR("Failed to process xform");
2854 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2855 if (ipsec_xform->tunnel.type ==
2856 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2857 session->ip4_hdr.ip_v = IPVERSION;
2858 session->ip4_hdr.ip_hl = 5;
2859 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2860 sizeof(session->ip4_hdr));
2861 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2862 session->ip4_hdr.ip_id = 0;
2863 session->ip4_hdr.ip_off = 0;
2864 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2865 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2866 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2867 IPPROTO_ESP : IPPROTO_AH;
2868 session->ip4_hdr.ip_sum = 0;
2869 session->ip4_hdr.ip_src =
2870 ipsec_xform->tunnel.ipv4.src_ip;
2871 session->ip4_hdr.ip_dst =
2872 ipsec_xform->tunnel.ipv4.dst_ip;
2873 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2874 (void *)&session->ip4_hdr,
2876 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2877 } else if (ipsec_xform->tunnel.type ==
2878 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2879 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2880 DPAA_IPv6_DEFAULT_VTC_FLOW |
2881 ((ipsec_xform->tunnel.ipv6.dscp <<
2882 RTE_IPV6_HDR_TC_SHIFT) &
2883 RTE_IPV6_HDR_TC_MASK) |
2884 ((ipsec_xform->tunnel.ipv6.flabel <<
2885 RTE_IPV6_HDR_FL_SHIFT) &
2886 RTE_IPV6_HDR_FL_MASK));
2887 /* Payload length will be updated by HW */
2888 session->ip6_hdr.payload_len = 0;
2889 session->ip6_hdr.hop_limits =
2890 ipsec_xform->tunnel.ipv6.hlimit;
2891 session->ip6_hdr.proto = (ipsec_xform->proto ==
2892 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2893 IPPROTO_ESP : IPPROTO_AH;
2894 memcpy(&session->ip6_hdr.src_addr,
2895 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2896 memcpy(&session->ip6_hdr.dst_addr,
2897 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2898 session->encap_pdb.ip_hdr_len =
2899 sizeof(struct rte_ipv6_hdr);
2901 session->encap_pdb.options =
2902 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2903 PDBOPTS_ESP_OIHI_PDB_INL |
2905 PDBHMO_ESP_ENCAP_DTTL |
2907 if (ipsec_xform->options.esn)
2908 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2909 session->encap_pdb.spi = ipsec_xform->spi;
2911 } else if (ipsec_xform->direction ==
2912 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2913 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2914 session->decap_pdb.options = sizeof(struct ip) << 16;
2916 session->decap_pdb.options =
2917 sizeof(struct rte_ipv6_hdr) << 16;
2918 if (ipsec_xform->options.esn)
2919 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2920 if (ipsec_xform->replay_win_sz) {
2922 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2931 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2934 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2937 session->decap_pdb.options |=
2943 rte_spinlock_lock(&internals->lock);
2944 for (i = 0; i < MAX_DPAA_CORES; i++) {
2945 session->inq[i] = dpaa_sec_attach_rxq(internals);
2946 if (session->inq[i] == NULL) {
2947 DPAA_SEC_ERR("unable to attach sec queue");
2948 rte_spinlock_unlock(&internals->lock);
2952 rte_spinlock_unlock(&internals->lock);
2956 free_session_data(session);
2961 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2962 struct rte_security_session_conf *conf,
2965 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2966 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2967 struct rte_crypto_auth_xform *auth_xform = NULL;
2968 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2969 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2970 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2974 PMD_INIT_FUNC_TRACE();
2976 memset(session, 0, sizeof(dpaa_sec_session));
2978 /* find xfrm types */
2979 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2980 cipher_xform = &xform->cipher;
2981 if (xform->next != NULL)
2982 auth_xform = &xform->next->auth;
2983 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2984 auth_xform = &xform->auth;
2985 if (xform->next != NULL)
2986 cipher_xform = &xform->next->cipher;
2988 DPAA_SEC_ERR("Invalid crypto type");
2992 session->proto_alg = conf->protocol;
2993 session->ctxt = DPAA_SEC_PDCP;
2996 switch (cipher_xform->algo) {
2997 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2998 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3000 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3001 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3003 case RTE_CRYPTO_CIPHER_AES_CTR:
3004 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3006 case RTE_CRYPTO_CIPHER_NULL:
3007 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3010 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3011 session->cipher_alg);
3015 session->cipher_key.data = rte_zmalloc(NULL,
3016 cipher_xform->key.length,
3017 RTE_CACHE_LINE_SIZE);
3018 if (session->cipher_key.data == NULL &&
3019 cipher_xform->key.length > 0) {
3020 DPAA_SEC_ERR("No Memory for cipher key");
3023 session->cipher_key.length = cipher_xform->key.length;
3024 memcpy(session->cipher_key.data, cipher_xform->key.data,
3025 cipher_xform->key.length);
3026 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3028 session->cipher_alg = cipher_xform->algo;
3030 session->cipher_key.data = NULL;
3031 session->cipher_key.length = 0;
3032 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3033 session->dir = DIR_ENC;
3036 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3037 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3038 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3040 "PDCP Seq Num size should be 5/12 bits for cmode");
3047 switch (auth_xform->algo) {
3048 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3049 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3051 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3052 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3054 case RTE_CRYPTO_AUTH_AES_CMAC:
3055 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3057 case RTE_CRYPTO_AUTH_NULL:
3058 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3061 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3063 rte_free(session->cipher_key.data);
3066 session->auth_key.data = rte_zmalloc(NULL,
3067 auth_xform->key.length,
3068 RTE_CACHE_LINE_SIZE);
3069 if (!session->auth_key.data &&
3070 auth_xform->key.length > 0) {
3071 DPAA_SEC_ERR("No Memory for auth key");
3072 rte_free(session->cipher_key.data);
3075 session->auth_key.length = auth_xform->key.length;
3076 memcpy(session->auth_key.data, auth_xform->key.data,
3077 auth_xform->key.length);
3078 session->auth_alg = auth_xform->algo;
3080 session->auth_key.data = NULL;
3081 session->auth_key.length = 0;
3082 session->auth_alg = 0;
3084 session->pdcp.domain = pdcp_xform->domain;
3085 session->pdcp.bearer = pdcp_xform->bearer;
3086 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3087 session->pdcp.sn_size = pdcp_xform->sn_size;
3088 session->pdcp.hfn = pdcp_xform->hfn;
3089 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3090 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3091 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3093 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3095 rte_spinlock_lock(&dev_priv->lock);
3096 for (i = 0; i < MAX_DPAA_CORES; i++) {
3097 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3098 if (session->inq[i] == NULL) {
3099 DPAA_SEC_ERR("unable to attach sec queue");
3100 rte_spinlock_unlock(&dev_priv->lock);
3105 rte_spinlock_unlock(&dev_priv->lock);
3108 rte_free(session->auth_key.data);
3109 rte_free(session->cipher_key.data);
3110 memset(session, 0, sizeof(dpaa_sec_session));
3115 dpaa_sec_security_session_create(void *dev,
3116 struct rte_security_session_conf *conf,
3117 struct rte_security_session *sess,
3118 struct rte_mempool *mempool)
3120 void *sess_private_data;
3121 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3124 if (rte_mempool_get(mempool, &sess_private_data)) {
3125 DPAA_SEC_ERR("Couldn't get object from session mempool");
3129 switch (conf->protocol) {
3130 case RTE_SECURITY_PROTOCOL_IPSEC:
3131 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3134 case RTE_SECURITY_PROTOCOL_PDCP:
3135 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3138 case RTE_SECURITY_PROTOCOL_MACSEC:
3144 DPAA_SEC_ERR("failed to configure session parameters");
3145 /* Return session to mempool */
3146 rte_mempool_put(mempool, sess_private_data);
3150 set_sec_session_private_data(sess, sess_private_data);
3155 /** Clear the memory of session so it doesn't leave key material behind */
3157 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3158 struct rte_security_session *sess)
3160 PMD_INIT_FUNC_TRACE();
3161 void *sess_priv = get_sec_session_private_data(sess);
3162 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3165 free_session_memory((struct rte_cryptodev *)dev, s);
3166 set_sec_session_private_data(sess, NULL);
3172 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3173 struct rte_cryptodev_config *config __rte_unused)
3175 PMD_INIT_FUNC_TRACE();
3181 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3183 PMD_INIT_FUNC_TRACE();
3188 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3190 PMD_INIT_FUNC_TRACE();
3194 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3196 PMD_INIT_FUNC_TRACE();
3205 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3206 struct rte_cryptodev_info *info)
3208 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3210 PMD_INIT_FUNC_TRACE();
3212 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3213 info->feature_flags = dev->feature_flags;
3214 info->capabilities = dpaa_sec_capabilities;
3215 info->sym.max_nb_sessions = internals->max_nb_sessions;
3216 info->driver_id = cryptodev_driver_id;
3220 static enum qman_cb_dqrr_result
3221 dpaa_sec_process_parallel_event(void *event,
3222 struct qman_portal *qm __always_unused,
3223 struct qman_fq *outq,
3224 const struct qm_dqrr_entry *dqrr,
3227 const struct qm_fd *fd;
3228 struct dpaa_sec_job *job;
3229 struct dpaa_sec_op_ctx *ctx;
3230 struct rte_event *ev = (struct rte_event *)event;
3234 /* sg is embedded in an op ctx,
3235 * sg[0] is for output
3238 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3240 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3241 ctx->fd_status = fd->status;
3242 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3243 struct qm_sg_entry *sg_out;
3246 sg_out = &job->sg[0];
3247 hw_sg_to_cpu(sg_out);
3248 len = sg_out->length;
3249 ctx->op->sym->m_src->pkt_len = len;
3250 ctx->op->sym->m_src->data_len = len;
3252 if (!ctx->fd_status) {
3253 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3255 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3256 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3258 ev->event_ptr = (void *)ctx->op;
3260 ev->flow_id = outq->ev.flow_id;
3261 ev->sub_event_type = outq->ev.sub_event_type;
3262 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3263 ev->op = RTE_EVENT_OP_NEW;
3264 ev->sched_type = outq->ev.sched_type;
3265 ev->queue_id = outq->ev.queue_id;
3266 ev->priority = outq->ev.priority;
3267 *bufs = (void *)ctx->op;
3269 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3271 return qman_cb_dqrr_consume;
3274 static enum qman_cb_dqrr_result
3275 dpaa_sec_process_atomic_event(void *event,
3276 struct qman_portal *qm __rte_unused,
3277 struct qman_fq *outq,
3278 const struct qm_dqrr_entry *dqrr,
3282 const struct qm_fd *fd;
3283 struct dpaa_sec_job *job;
3284 struct dpaa_sec_op_ctx *ctx;
3285 struct rte_event *ev = (struct rte_event *)event;
3289 /* sg is embedded in an op ctx,
3290 * sg[0] is for output
3293 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3295 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3296 ctx->fd_status = fd->status;
3297 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3298 struct qm_sg_entry *sg_out;
3301 sg_out = &job->sg[0];
3302 hw_sg_to_cpu(sg_out);
3303 len = sg_out->length;
3304 ctx->op->sym->m_src->pkt_len = len;
3305 ctx->op->sym->m_src->data_len = len;
3307 if (!ctx->fd_status) {
3308 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3310 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3311 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3313 ev->event_ptr = (void *)ctx->op;
3314 ev->flow_id = outq->ev.flow_id;
3315 ev->sub_event_type = outq->ev.sub_event_type;
3316 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3317 ev->op = RTE_EVENT_OP_NEW;
3318 ev->sched_type = outq->ev.sched_type;
3319 ev->queue_id = outq->ev.queue_id;
3320 ev->priority = outq->ev.priority;
3322 /* Save active dqrr entries */
3323 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3324 DPAA_PER_LCORE_DQRR_SIZE++;
3325 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3326 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3327 ev->impl_opaque = index + 1;
3328 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3329 *bufs = (void *)ctx->op;
3331 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3333 return qman_cb_dqrr_defer;
3337 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3340 const struct rte_event *event)
3342 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3343 struct qm_mcc_initfq opts = {0};
3347 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3348 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3349 opts.fqd.dest.channel = ch_id;
3351 switch (event->sched_type) {
3352 case RTE_SCHED_TYPE_ATOMIC:
3353 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3354 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3355 * configuration with HOLD_ACTIVE setting
3357 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3358 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3360 case RTE_SCHED_TYPE_ORDERED:
3361 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3364 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3365 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3369 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3370 if (unlikely(ret)) {
3371 DPAA_SEC_ERR("unable to init caam source fq!");
3375 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3381 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3384 struct qm_mcc_initfq opts = {0};
3386 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3388 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3389 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3390 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3391 qp->outq.cb.ern = ern_sec_fq_handler;
3392 qman_retire_fq(&qp->outq, NULL);
3393 qman_oos_fq(&qp->outq);
3394 ret = qman_init_fq(&qp->outq, 0, &opts);
3396 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3397 qp->outq.cb.dqrr = NULL;
3402 static struct rte_cryptodev_ops crypto_ops = {
3403 .dev_configure = dpaa_sec_dev_configure,
3404 .dev_start = dpaa_sec_dev_start,
3405 .dev_stop = dpaa_sec_dev_stop,
3406 .dev_close = dpaa_sec_dev_close,
3407 .dev_infos_get = dpaa_sec_dev_infos_get,
3408 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3409 .queue_pair_release = dpaa_sec_queue_pair_release,
3410 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3411 .sym_session_configure = dpaa_sec_sym_session_configure,
3412 .sym_session_clear = dpaa_sec_sym_session_clear
3415 #ifdef RTE_LIB_SECURITY
3416 static const struct rte_security_capability *
3417 dpaa_sec_capabilities_get(void *device __rte_unused)
3419 return dpaa_sec_security_cap;
3422 static const struct rte_security_ops dpaa_sec_security_ops = {
3423 .session_create = dpaa_sec_security_session_create,
3424 .session_update = NULL,
3425 .session_stats_get = NULL,
3426 .session_destroy = dpaa_sec_security_session_destroy,
3427 .set_pkt_metadata = NULL,
3428 .capabilities_get = dpaa_sec_capabilities_get
3432 dpaa_sec_uninit(struct rte_cryptodev *dev)
3434 struct dpaa_sec_dev_private *internals;
3439 internals = dev->data->dev_private;
3440 rte_free(dev->security_ctx);
3442 rte_free(internals);
3444 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3445 dev->data->name, rte_socket_id());
3451 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3453 struct dpaa_sec_dev_private *internals;
3454 #ifdef RTE_LIB_SECURITY
3455 struct rte_security_ctx *security_instance;
3457 struct dpaa_sec_qp *qp;
3461 PMD_INIT_FUNC_TRACE();
3463 cryptodev->driver_id = cryptodev_driver_id;
3464 cryptodev->dev_ops = &crypto_ops;
3466 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3467 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3468 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3469 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3470 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3471 RTE_CRYPTODEV_FF_SECURITY |
3472 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3473 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3474 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3475 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3476 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3478 internals = cryptodev->data->dev_private;
3479 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3480 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3483 * For secondary processes, we don't initialise any further as primary
3484 * has already done this work. Only check we don't need a different
3487 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3488 DPAA_SEC_WARN("Device already init by primary process");
3491 #ifdef RTE_LIB_SECURITY
3492 /* Initialize security_ctx only for primary process*/
3493 security_instance = rte_malloc("rte_security_instances_ops",
3494 sizeof(struct rte_security_ctx), 0);
3495 if (security_instance == NULL)
3497 security_instance->device = (void *)cryptodev;
3498 security_instance->ops = &dpaa_sec_security_ops;
3499 security_instance->sess_cnt = 0;
3500 cryptodev->security_ctx = security_instance;
3502 rte_spinlock_init(&internals->lock);
3503 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3504 /* init qman fq for queue pair */
3505 qp = &internals->qps[i];
3506 ret = dpaa_sec_init_tx(&qp->outq);
3508 DPAA_SEC_ERR("config tx of queue pair %d", i);
3513 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3514 QMAN_FQ_FLAG_TO_DCPORTAL;
3515 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3516 /* create rx qman fq for sessions*/
3517 ret = qman_create_fq(0, flags, &internals->inq[i]);
3518 if (unlikely(ret != 0)) {
3519 DPAA_SEC_ERR("sec qman_create_fq failed");
3524 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3528 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3530 rte_free(cryptodev->security_ctx);
3535 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3536 struct rte_dpaa_device *dpaa_dev)
3538 struct rte_cryptodev *cryptodev;
3539 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3543 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3545 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3546 if (cryptodev == NULL)
3549 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3550 cryptodev->data->dev_private = rte_zmalloc_socket(
3551 "cryptodev private structure",
3552 sizeof(struct dpaa_sec_dev_private),
3553 RTE_CACHE_LINE_SIZE,
3556 if (cryptodev->data->dev_private == NULL)
3557 rte_panic("Cannot allocate memzone for private "
3561 dpaa_dev->crypto_dev = cryptodev;
3562 cryptodev->device = &dpaa_dev->device;
3564 /* init user callbacks */
3565 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3567 /* if sec device version is not configured */
3568 if (!rta_get_sec_era()) {
3569 const struct device_node *caam_node;
3571 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3572 const uint32_t *prop = of_get_property(caam_node,
3577 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3583 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3584 retval = rte_dpaa_portal_init((void *)1);
3586 DPAA_SEC_ERR("Unable to initialize portal");
3591 /* Invoke PMD device initialization function */
3592 retval = dpaa_sec_dev_init(cryptodev);
3598 /* In case of error, cleanup is done */
3599 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3600 rte_free(cryptodev->data->dev_private);
3602 rte_cryptodev_pmd_release_device(cryptodev);
3608 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3610 struct rte_cryptodev *cryptodev;
3613 cryptodev = dpaa_dev->crypto_dev;
3614 if (cryptodev == NULL)
3617 ret = dpaa_sec_uninit(cryptodev);
3621 return rte_cryptodev_pmd_destroy(cryptodev);
3624 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3625 .drv_type = FSL_DPAA_CRYPTO,
3627 .name = "DPAA SEC PMD"
3629 .probe = cryptodev_dpaa_sec_probe,
3630 .remove = cryptodev_dpaa_sec_remove,
3633 static struct cryptodev_driver dpaa_sec_crypto_drv;
3635 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3636 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3637 cryptodev_driver_id);
3638 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);