1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2021 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30 #include <rte_hexdump.h>
36 /* RTA header files */
37 #include <desc/common.h>
38 #include <desc/algo.h>
39 #include <desc/ipsec.h>
40 #include <desc/pdcp.h>
41 #include <desc/sdap.h>
43 #include <rte_dpaa_bus.h>
45 #include <dpaa_sec_event.h>
46 #include <dpaa_sec_log.h>
47 #include <dpaax_iova_table.h>
49 #define DRIVER_DUMP_MODE "drv_dump_mode"
51 /* DPAA_SEC_DP_DUMP levels */
52 enum dpaa_sec_dump_levels {
58 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
60 uint8_t dpaa_cryptodev_driver_id;
63 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
65 if (!ctx->fd_status) {
66 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
68 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
69 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
73 static inline struct dpaa_sec_op_ctx *
74 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
76 struct dpaa_sec_op_ctx *ctx;
79 retval = rte_mempool_get(
80 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
83 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
87 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
88 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
89 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
90 * each packet, memset is costlier than dcbz_64().
92 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
93 dcbz_64(&ctx->job.sg[i]);
95 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
96 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
102 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
104 const struct qm_mr_entry *msg)
106 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
107 fq->fqid, msg->ern.rc, msg->ern.seqnum);
110 /* initialize the queue with dest chan as caam chan so that
111 * all the packets in this queue could be dispatched into caam
114 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
117 struct qm_mcc_initfq fq_opts;
121 /* Clear FQ options */
122 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
124 flags = QMAN_INITFQ_FLAG_SCHED;
125 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
126 QM_INITFQ_WE_CONTEXTB;
128 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
129 fq_opts.fqd.context_b = fqid_out;
130 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
131 fq_opts.fqd.dest.wq = 0;
133 fq_in->cb.ern = ern_sec_fq_handler;
135 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
137 ret = qman_init_fq(fq_in, flags, &fq_opts);
138 if (unlikely(ret != 0))
139 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
144 /* something is put into in_fq and caam put the crypto result into out_fq */
145 static enum qman_cb_dqrr_result
146 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
147 struct qman_fq *fq __always_unused,
148 const struct qm_dqrr_entry *dqrr)
150 const struct qm_fd *fd;
151 struct dpaa_sec_job *job;
152 struct dpaa_sec_op_ctx *ctx;
154 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
155 return qman_cb_dqrr_defer;
157 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
158 return qman_cb_dqrr_consume;
161 /* sg is embedded in an op ctx,
162 * sg[0] is for output
165 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
167 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
168 ctx->fd_status = fd->status;
169 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
170 struct qm_sg_entry *sg_out;
172 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
173 ctx->op->sym->m_src : ctx->op->sym->m_dst;
175 sg_out = &job->sg[0];
176 hw_sg_to_cpu(sg_out);
177 len = sg_out->length;
179 while (mbuf->next != NULL) {
180 len -= mbuf->data_len;
183 mbuf->data_len = len;
185 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
186 dpaa_sec_op_ending(ctx);
188 return qman_cb_dqrr_consume;
191 /* caam result is put into this queue */
193 dpaa_sec_init_tx(struct qman_fq *fq)
196 struct qm_mcc_initfq opts;
199 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
200 QMAN_FQ_FLAG_DYNAMIC_FQID;
202 ret = qman_create_fq(0, flags, fq);
204 DPAA_SEC_ERR("qman_create_fq failed");
208 memset(&opts, 0, sizeof(opts));
209 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
210 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
212 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
214 fq->cb.dqrr = dqrr_out_fq_cb_rx;
215 fq->cb.ern = ern_sec_fq_handler;
217 ret = qman_init_fq(fq, 0, &opts);
219 DPAA_SEC_ERR("unable to init caam source fq!");
226 static inline int is_aead(dpaa_sec_session *ses)
228 return ((ses->cipher_alg == 0) &&
229 (ses->auth_alg == 0) &&
230 (ses->aead_alg != 0));
233 static inline int is_encode(dpaa_sec_session *ses)
235 return ses->dir == DIR_ENC;
238 static inline int is_decode(dpaa_sec_session *ses)
240 return ses->dir == DIR_DEC;
243 #ifdef RTE_LIB_SECURITY
245 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
247 struct alginfo authdata = {0}, cipherdata = {0};
248 struct sec_cdb *cdb = &ses->cdb;
249 struct alginfo *p_authdata = NULL;
250 int32_t shared_desc_len = 0;
251 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
257 cipherdata.key = (size_t)ses->cipher_key.data;
258 cipherdata.keylen = ses->cipher_key.length;
259 cipherdata.key_enc_flags = 0;
260 cipherdata.key_type = RTA_DATA_IMM;
261 cipherdata.algtype = ses->cipher_key.alg;
262 cipherdata.algmode = ses->cipher_key.algmode;
265 authdata.key = (size_t)ses->auth_key.data;
266 authdata.keylen = ses->auth_key.length;
267 authdata.key_enc_flags = 0;
268 authdata.key_type = RTA_DATA_IMM;
269 authdata.algtype = ses->auth_key.alg;
270 authdata.algmode = ses->auth_key.algmode;
272 p_authdata = &authdata;
275 if (ses->pdcp.sdap_enabled) {
276 int nb_keys_to_inline =
277 rta_inline_pdcp_sdap_query(authdata.algtype,
281 if (nb_keys_to_inline >= 1) {
282 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
283 (size_t)cipherdata.key);
284 cipherdata.key_type = RTA_DATA_PTR;
286 if (nb_keys_to_inline >= 2) {
287 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
288 (size_t)authdata.key);
289 authdata.key_type = RTA_DATA_PTR;
292 if (rta_inline_pdcp_query(authdata.algtype,
295 ses->pdcp.hfn_ovd)) {
296 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
297 (size_t)cipherdata.key);
298 cipherdata.key_type = RTA_DATA_PTR;
302 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
303 if (ses->dir == DIR_ENC)
304 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
305 cdb->sh_desc, 1, swap,
310 ses->pdcp.hfn_threshold,
311 &cipherdata, &authdata);
312 else if (ses->dir == DIR_DEC)
313 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
314 cdb->sh_desc, 1, swap,
319 ses->pdcp.hfn_threshold,
320 &cipherdata, &authdata);
321 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
322 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
325 if (ses->dir == DIR_ENC) {
326 if (ses->pdcp.sdap_enabled)
328 cnstr_shdsc_pdcp_sdap_u_plane_encap(
329 cdb->sh_desc, 1, swap,
334 ses->pdcp.hfn_threshold,
335 &cipherdata, p_authdata);
338 cnstr_shdsc_pdcp_u_plane_encap(
339 cdb->sh_desc, 1, swap,
344 ses->pdcp.hfn_threshold,
345 &cipherdata, p_authdata);
346 } else if (ses->dir == DIR_DEC) {
347 if (ses->pdcp.sdap_enabled)
349 cnstr_shdsc_pdcp_sdap_u_plane_decap(
350 cdb->sh_desc, 1, swap,
355 ses->pdcp.hfn_threshold,
356 &cipherdata, p_authdata);
359 cnstr_shdsc_pdcp_u_plane_decap(
360 cdb->sh_desc, 1, swap,
365 ses->pdcp.hfn_threshold,
366 &cipherdata, p_authdata);
369 return shared_desc_len;
372 /* prepare ipsec proto command block of the session */
374 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
376 struct alginfo cipherdata = {0}, authdata = {0};
377 struct sec_cdb *cdb = &ses->cdb;
378 int32_t shared_desc_len = 0;
380 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
386 cipherdata.key = (size_t)ses->cipher_key.data;
387 cipherdata.keylen = ses->cipher_key.length;
388 cipherdata.key_enc_flags = 0;
389 cipherdata.key_type = RTA_DATA_IMM;
390 cipherdata.algtype = ses->cipher_key.alg;
391 cipherdata.algmode = ses->cipher_key.algmode;
393 if (ses->auth_key.length) {
394 authdata.key = (size_t)ses->auth_key.data;
395 authdata.keylen = ses->auth_key.length;
396 authdata.key_enc_flags = 0;
397 authdata.key_type = RTA_DATA_IMM;
398 authdata.algtype = ses->auth_key.alg;
399 authdata.algmode = ses->auth_key.algmode;
402 cdb->sh_desc[0] = cipherdata.keylen;
403 cdb->sh_desc[1] = authdata.keylen;
404 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
406 (unsigned int *)cdb->sh_desc,
407 &cdb->sh_desc[2], 2);
410 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
413 if (cdb->sh_desc[2] & 1)
414 cipherdata.key_type = RTA_DATA_IMM;
416 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
417 (void *)(size_t)cipherdata.key);
418 cipherdata.key_type = RTA_DATA_PTR;
420 if (cdb->sh_desc[2] & (1<<1))
421 authdata.key_type = RTA_DATA_IMM;
423 authdata.key = (size_t)rte_dpaa_mem_vtop(
424 (void *)(size_t)authdata.key);
425 authdata.key_type = RTA_DATA_PTR;
431 if (ses->dir == DIR_ENC) {
432 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
434 true, swap, SHR_SERIAL,
436 (uint8_t *)&ses->ip4_hdr,
437 &cipherdata, &authdata);
438 } else if (ses->dir == DIR_DEC) {
439 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
441 true, swap, SHR_SERIAL,
443 &cipherdata, &authdata);
445 return shared_desc_len;
448 /* prepare command block of the session */
450 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
452 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
453 int32_t shared_desc_len = 0;
454 struct sec_cdb *cdb = &ses->cdb;
456 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
462 memset(cdb, 0, sizeof(struct sec_cdb));
465 #ifdef RTE_LIB_SECURITY
467 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
470 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
473 case DPAA_SEC_CIPHER:
474 alginfo_c.key = (size_t)ses->cipher_key.data;
475 alginfo_c.keylen = ses->cipher_key.length;
476 alginfo_c.key_enc_flags = 0;
477 alginfo_c.key_type = RTA_DATA_IMM;
478 alginfo_c.algtype = ses->cipher_key.alg;
479 alginfo_c.algmode = ses->cipher_key.algmode;
481 switch (ses->cipher_alg) {
482 case RTE_CRYPTO_CIPHER_AES_CBC:
483 case RTE_CRYPTO_CIPHER_3DES_CBC:
484 case RTE_CRYPTO_CIPHER_DES_CBC:
485 case RTE_CRYPTO_CIPHER_AES_CTR:
486 case RTE_CRYPTO_CIPHER_3DES_CTR:
487 shared_desc_len = cnstr_shdsc_blkcipher(
489 swap, SHR_NEVER, &alginfo_c,
493 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
494 shared_desc_len = cnstr_shdsc_snow_f8(
495 cdb->sh_desc, true, swap,
499 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
500 shared_desc_len = cnstr_shdsc_zuce(
501 cdb->sh_desc, true, swap,
506 DPAA_SEC_ERR("unsupported cipher alg %d",
512 alginfo_a.key = (size_t)ses->auth_key.data;
513 alginfo_a.keylen = ses->auth_key.length;
514 alginfo_a.key_enc_flags = 0;
515 alginfo_a.key_type = RTA_DATA_IMM;
516 alginfo_a.algtype = ses->auth_key.alg;
517 alginfo_a.algmode = ses->auth_key.algmode;
518 switch (ses->auth_alg) {
519 case RTE_CRYPTO_AUTH_MD5:
520 case RTE_CRYPTO_AUTH_SHA1:
521 case RTE_CRYPTO_AUTH_SHA224:
522 case RTE_CRYPTO_AUTH_SHA256:
523 case RTE_CRYPTO_AUTH_SHA384:
524 case RTE_CRYPTO_AUTH_SHA512:
525 shared_desc_len = cnstr_shdsc_hash(
527 swap, SHR_NEVER, &alginfo_a,
531 case RTE_CRYPTO_AUTH_MD5_HMAC:
532 case RTE_CRYPTO_AUTH_SHA1_HMAC:
533 case RTE_CRYPTO_AUTH_SHA224_HMAC:
534 case RTE_CRYPTO_AUTH_SHA256_HMAC:
535 case RTE_CRYPTO_AUTH_SHA384_HMAC:
536 case RTE_CRYPTO_AUTH_SHA512_HMAC:
537 shared_desc_len = cnstr_shdsc_hmac(
539 swap, SHR_NEVER, &alginfo_a,
543 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
544 shared_desc_len = cnstr_shdsc_snow_f9(
545 cdb->sh_desc, true, swap,
550 case RTE_CRYPTO_AUTH_ZUC_EIA3:
551 shared_desc_len = cnstr_shdsc_zuca(
552 cdb->sh_desc, true, swap,
557 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
558 case RTE_CRYPTO_AUTH_AES_CMAC:
559 shared_desc_len = cnstr_shdsc_aes_mac(
561 true, swap, SHR_NEVER,
567 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
571 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
572 DPAA_SEC_ERR("not supported aead alg");
575 alginfo.key = (size_t)ses->aead_key.data;
576 alginfo.keylen = ses->aead_key.length;
577 alginfo.key_enc_flags = 0;
578 alginfo.key_type = RTA_DATA_IMM;
579 alginfo.algtype = ses->aead_key.alg;
580 alginfo.algmode = ses->aead_key.algmode;
582 if (ses->dir == DIR_ENC)
583 shared_desc_len = cnstr_shdsc_gcm_encap(
584 cdb->sh_desc, true, swap, SHR_NEVER,
589 shared_desc_len = cnstr_shdsc_gcm_decap(
590 cdb->sh_desc, true, swap, SHR_NEVER,
595 case DPAA_SEC_CIPHER_HASH:
596 alginfo_c.key = (size_t)ses->cipher_key.data;
597 alginfo_c.keylen = ses->cipher_key.length;
598 alginfo_c.key_enc_flags = 0;
599 alginfo_c.key_type = RTA_DATA_IMM;
600 alginfo_c.algtype = ses->cipher_key.alg;
601 alginfo_c.algmode = ses->cipher_key.algmode;
603 alginfo_a.key = (size_t)ses->auth_key.data;
604 alginfo_a.keylen = ses->auth_key.length;
605 alginfo_a.key_enc_flags = 0;
606 alginfo_a.key_type = RTA_DATA_IMM;
607 alginfo_a.algtype = ses->auth_key.alg;
608 alginfo_a.algmode = ses->auth_key.algmode;
610 cdb->sh_desc[0] = alginfo_c.keylen;
611 cdb->sh_desc[1] = alginfo_a.keylen;
612 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
614 (unsigned int *)cdb->sh_desc,
615 &cdb->sh_desc[2], 2);
618 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
621 if (cdb->sh_desc[2] & 1)
622 alginfo_c.key_type = RTA_DATA_IMM;
624 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
625 (void *)(size_t)alginfo_c.key);
626 alginfo_c.key_type = RTA_DATA_PTR;
628 if (cdb->sh_desc[2] & (1<<1))
629 alginfo_a.key_type = RTA_DATA_IMM;
631 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
632 (void *)(size_t)alginfo_a.key);
633 alginfo_a.key_type = RTA_DATA_PTR;
638 /* Auth_only_len is set as 0 here and it will be
639 * overwritten in fd for each packet.
641 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
642 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
644 ses->digest_length, ses->dir);
646 case DPAA_SEC_HASH_CIPHER:
648 DPAA_SEC_ERR("error: Unsupported session");
652 if (shared_desc_len < 0) {
653 DPAA_SEC_ERR("error in preparing command block");
654 return shared_desc_len;
657 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
658 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
659 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
665 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
667 struct dpaa_sec_job *job = &ctx->job;
668 struct rte_crypto_op *op = ctx->op;
669 dpaa_sec_session *sess = NULL;
670 struct sec_cdb c_cdb, *cdb;
672 struct rte_crypto_sym_op *sym_op;
673 struct qm_sg_entry sg[2];
675 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
676 sess = (dpaa_sec_session *)
677 get_sym_session_private_data(
679 dpaa_cryptodev_driver_id);
680 #ifdef RTE_LIBRTE_SECURITY
681 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
682 sess = (dpaa_sec_session *)
683 get_sec_session_private_data(
684 op->sym->sec_session);
687 printf("session is NULL\n");
692 rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
693 #ifdef RTE_LIBRTE_SECURITY
694 printf("\nsession protocol type = %d\n", sess->proto_alg);
696 printf("\n****************************************\n"
697 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
698 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
699 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
700 "\tCipher algmode:\t%d\n", sess->ctxt,
701 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
702 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
703 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
704 sess->cipher_key.algmode);
705 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
706 sess->cipher_key.length);
707 rte_hexdump(stdout, "auth key", sess->auth_key.data,
708 sess->auth_key.length);
709 printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
710 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
711 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
712 "\taead cipher text:\t%d\n",
713 (uint64_t)sess->auth_key.length, sess->auth_key.alg,
714 sess->auth_key.algmode,
715 sess->iv.length, sess->iv.offset,
716 sess->digest_length, sess->auth_only_len,
717 sess->auth_cipher_text);
718 #ifdef RTE_LIBRTE_SECURITY
719 printf("PDCP session params:\n"
720 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
721 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
722 "\t%d\n\thfn:\t\t%d\n"
723 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
724 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
725 sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
726 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
727 sess->pdcp.hfn_threshold);
729 c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
730 c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
731 bufsize = c_cdb.sh_hdr.hi.field.idlen;
733 printf("cdb = %p\n\n", cdb);
734 printf("Descriptor size = %d\n", bufsize);
736 for (m = 0; m < bufsize; m++)
737 printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
743 printf("Source mbuf:\n");
744 rte_pktmbuf_dump(stdout, sym_op->m_src,
745 sym_op->m_src->data_len);
748 printf("Destination mbuf:\n");
749 rte_pktmbuf_dump(stdout, sym_op->m_dst,
750 sym_op->m_dst->data_len);
753 printf("Session address = %p\ncipher offset: %d, length: %d\n"
754 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n",
755 sym_op->session, sym_op->cipher.data.offset,
756 sym_op->cipher.data.length,
757 sym_op->auth.data.offset, sym_op->auth.data.length,
758 sym_op->aead.data.offset, sym_op->aead.data.length);
761 printf("******************************************************\n");
762 printf("ctx info:\n");
763 printf("job->sg[0] output info:\n");
764 memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
765 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
766 "\n\tbpid = %d\n\toffset = %d\n",
767 (uint64_t)sg[0].addr, sg[0].length, sg[0].final,
768 sg[0].extension, sg[0].bpid, sg[0].offset);
769 printf("\njob->sg[1] input info:\n");
770 memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
771 hw_sg_to_cpu(&sg[1]);
772 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
773 "\n\tbpid = %d\n\toffset = %d\n",
774 (uint64_t)sg[1].addr, sg[1].length, sg[1].final,
775 sg[1].extension, sg[1].bpid, sg[1].offset);
777 printf("\nctx pool addr = %p\n", ctx->ctx_pool);
779 printf("ctx pool available counts = %d\n",
780 rte_mempool_avail_count(ctx->ctx_pool));
782 printf("\nop pool addr = %p\n", op->mempool);
784 printf("op pool available counts = %d\n",
785 rte_mempool_avail_count(op->mempool));
787 printf("********************************************************\n");
788 printf("Queue data:\n");
789 printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
790 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
791 "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
792 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
793 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
794 qp->rx_errs, qp->tx_errs);
797 /* qp is lockless, should be accessed by only one thread */
799 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
802 unsigned int pkts = 0;
803 int num_rx_bufs, ret;
804 struct qm_dqrr_entry *dq;
805 uint32_t vdqcr_flags = 0;
809 * Until request for four buffers, we provide exact number of buffers.
810 * Otherwise we do not set the QM_VDQCR_EXACT flag.
811 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
812 * requested, so we request two less in this case.
815 vdqcr_flags = QM_VDQCR_EXACT;
816 num_rx_bufs = nb_ops;
818 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
819 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
821 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
826 const struct qm_fd *fd;
827 struct dpaa_sec_job *job;
828 struct dpaa_sec_op_ctx *ctx;
829 struct rte_crypto_op *op;
831 dq = qman_dequeue(fq);
836 /* sg is embedded in an op ctx,
837 * sg[0] is for output
840 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
842 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
843 ctx->fd_status = fd->status;
845 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
846 struct qm_sg_entry *sg_out;
848 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
849 op->sym->m_src : op->sym->m_dst;
851 sg_out = &job->sg[0];
852 hw_sg_to_cpu(sg_out);
853 len = sg_out->length;
855 while (mbuf->next != NULL) {
856 len -= mbuf->data_len;
859 mbuf->data_len = len;
861 if (!ctx->fd_status) {
862 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
864 if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
865 DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
867 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
868 dpaa_sec_dump(ctx, qp);
870 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
874 /* report op status to sym->op and then free the ctx memory */
875 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
877 qman_dqrr_consume(fq, dq);
878 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
883 static inline struct dpaa_sec_job *
884 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
886 struct rte_crypto_sym_op *sym = op->sym;
887 struct rte_mbuf *mbuf = sym->m_src;
888 struct dpaa_sec_job *cf;
889 struct dpaa_sec_op_ctx *ctx;
890 struct qm_sg_entry *sg, *out_sg, *in_sg;
891 phys_addr_t start_addr;
892 uint8_t *old_digest, extra_segs;
893 int data_len, data_offset;
895 data_len = sym->auth.data.length;
896 data_offset = sym->auth.data.offset;
898 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
899 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
900 if ((data_len & 7) || (data_offset & 7)) {
901 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
905 data_len = data_len >> 3;
906 data_offset = data_offset >> 3;
914 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
915 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
919 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
925 old_digest = ctx->digest;
929 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
930 out_sg->length = ses->digest_length;
931 cpu_to_hw_sg(out_sg);
935 /* need to extend the input to a compound frame */
936 in_sg->extension = 1;
938 in_sg->length = data_len;
939 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
944 if (ses->iv.length) {
947 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
950 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
951 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
953 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
954 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
957 sg->length = ses->iv.length;
959 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
960 in_sg->length += sg->length;
965 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
966 sg->offset = data_offset;
968 if (data_len <= (mbuf->data_len - data_offset)) {
969 sg->length = data_len;
971 sg->length = mbuf->data_len - data_offset;
973 /* remaining i/p segs */
974 while ((data_len = data_len - sg->length) &&
975 (mbuf = mbuf->next)) {
978 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
979 if (data_len > mbuf->data_len)
980 sg->length = mbuf->data_len;
982 sg->length = data_len;
986 if (is_decode(ses)) {
987 /* Digest verification case */
990 rte_memcpy(old_digest, sym->auth.digest.data,
992 start_addr = rte_dpaa_mem_vtop(old_digest);
993 qm_sg_entry_set64(sg, start_addr);
994 sg->length = ses->digest_length;
995 in_sg->length += ses->digest_length;
1005 * packet looks like:
1006 * |<----data_len------->|
1007 * |ip_header|ah_header|icv|payload|
1012 static inline struct dpaa_sec_job *
1013 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1015 struct rte_crypto_sym_op *sym = op->sym;
1016 struct rte_mbuf *mbuf = sym->m_src;
1017 struct dpaa_sec_job *cf;
1018 struct dpaa_sec_op_ctx *ctx;
1019 struct qm_sg_entry *sg, *in_sg;
1020 rte_iova_t start_addr;
1021 uint8_t *old_digest;
1022 int data_len, data_offset;
1024 data_len = sym->auth.data.length;
1025 data_offset = sym->auth.data.offset;
1027 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1028 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1029 if ((data_len & 7) || (data_offset & 7)) {
1030 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1034 data_len = data_len >> 3;
1035 data_offset = data_offset >> 3;
1038 ctx = dpaa_sec_alloc_ctx(ses, 4);
1044 old_digest = ctx->digest;
1046 start_addr = rte_pktmbuf_iova(mbuf);
1049 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1050 sg->length = ses->digest_length;
1055 /* need to extend the input to a compound frame */
1056 in_sg->extension = 1;
1058 in_sg->length = data_len;
1059 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1062 if (ses->iv.length) {
1065 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1068 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1069 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1071 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1072 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1075 sg->length = ses->iv.length;
1077 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1078 in_sg->length += sg->length;
1083 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1084 sg->offset = data_offset;
1085 sg->length = data_len;
1087 if (is_decode(ses)) {
1088 /* Digest verification case */
1090 /* hash result or digest, save digest first */
1091 rte_memcpy(old_digest, sym->auth.digest.data,
1092 ses->digest_length);
1093 /* let's check digest by hw */
1094 start_addr = rte_dpaa_mem_vtop(old_digest);
1096 qm_sg_entry_set64(sg, start_addr);
1097 sg->length = ses->digest_length;
1098 in_sg->length += ses->digest_length;
1102 cpu_to_hw_sg(in_sg);
1107 static inline struct dpaa_sec_job *
1108 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1110 struct rte_crypto_sym_op *sym = op->sym;
1111 struct dpaa_sec_job *cf;
1112 struct dpaa_sec_op_ctx *ctx;
1113 struct qm_sg_entry *sg, *out_sg, *in_sg;
1114 struct rte_mbuf *mbuf;
1116 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1118 int data_len, data_offset;
1120 data_len = sym->cipher.data.length;
1121 data_offset = sym->cipher.data.offset;
1123 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1124 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1125 if ((data_len & 7) || (data_offset & 7)) {
1126 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1130 data_len = data_len >> 3;
1131 data_offset = data_offset >> 3;
1136 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1139 req_segs = mbuf->nb_segs * 2 + 3;
1141 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1142 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1147 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1155 out_sg = &cf->sg[0];
1156 out_sg->extension = 1;
1157 out_sg->length = data_len;
1158 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1159 cpu_to_hw_sg(out_sg);
1163 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1164 sg->length = mbuf->data_len - data_offset;
1165 sg->offset = data_offset;
1167 /* Successive segs */
1172 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1173 sg->length = mbuf->data_len;
1182 in_sg->extension = 1;
1184 in_sg->length = data_len + ses->iv.length;
1187 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1188 cpu_to_hw_sg(in_sg);
1191 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1192 sg->length = ses->iv.length;
1197 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1198 sg->length = mbuf->data_len - data_offset;
1199 sg->offset = data_offset;
1201 /* Successive segs */
1206 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1207 sg->length = mbuf->data_len;
1216 static inline struct dpaa_sec_job *
1217 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1219 struct rte_crypto_sym_op *sym = op->sym;
1220 struct dpaa_sec_job *cf;
1221 struct dpaa_sec_op_ctx *ctx;
1222 struct qm_sg_entry *sg;
1223 rte_iova_t src_start_addr, dst_start_addr;
1224 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1226 int data_len, data_offset;
1228 data_len = sym->cipher.data.length;
1229 data_offset = sym->cipher.data.offset;
1231 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1232 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1233 if ((data_len & 7) || (data_offset & 7)) {
1234 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1238 data_len = data_len >> 3;
1239 data_offset = data_offset >> 3;
1242 ctx = dpaa_sec_alloc_ctx(ses, 4);
1249 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1252 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1254 dst_start_addr = src_start_addr;
1258 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1259 sg->length = data_len + ses->iv.length;
1265 /* need to extend the input to a compound frame */
1268 sg->length = data_len + ses->iv.length;
1269 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1273 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1274 sg->length = ses->iv.length;
1278 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1279 sg->length = data_len;
1286 static inline struct dpaa_sec_job *
1287 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1289 struct rte_crypto_sym_op *sym = op->sym;
1290 struct dpaa_sec_job *cf;
1291 struct dpaa_sec_op_ctx *ctx;
1292 struct qm_sg_entry *sg, *out_sg, *in_sg;
1293 struct rte_mbuf *mbuf;
1295 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1300 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1303 req_segs = mbuf->nb_segs * 2 + 4;
1306 if (ses->auth_only_len)
1309 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1310 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1315 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1322 rte_prefetch0(cf->sg);
1325 out_sg = &cf->sg[0];
1326 out_sg->extension = 1;
1328 out_sg->length = sym->aead.data.length + ses->digest_length;
1330 out_sg->length = sym->aead.data.length;
1332 /* output sg entries */
1334 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1335 cpu_to_hw_sg(out_sg);
1338 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1339 sg->length = mbuf->data_len - sym->aead.data.offset;
1340 sg->offset = sym->aead.data.offset;
1342 /* Successive segs */
1347 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1348 sg->length = mbuf->data_len;
1351 sg->length -= ses->digest_length;
1353 if (is_encode(ses)) {
1355 /* set auth output */
1357 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1358 sg->length = ses->digest_length;
1366 in_sg->extension = 1;
1369 in_sg->length = ses->iv.length + sym->aead.data.length
1370 + ses->auth_only_len;
1372 in_sg->length = ses->iv.length + sym->aead.data.length
1373 + ses->auth_only_len + ses->digest_length;
1375 /* input sg entries */
1377 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1378 cpu_to_hw_sg(in_sg);
1381 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1382 sg->length = ses->iv.length;
1385 /* 2nd seg auth only */
1386 if (ses->auth_only_len) {
1388 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1389 sg->length = ses->auth_only_len;
1395 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1396 sg->length = mbuf->data_len - sym->aead.data.offset;
1397 sg->offset = sym->aead.data.offset;
1399 /* Successive segs */
1404 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1405 sg->length = mbuf->data_len;
1409 if (is_decode(ses)) {
1412 memcpy(ctx->digest, sym->aead.digest.data,
1413 ses->digest_length);
1414 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1415 sg->length = ses->digest_length;
1423 static inline struct dpaa_sec_job *
1424 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1426 struct rte_crypto_sym_op *sym = op->sym;
1427 struct dpaa_sec_job *cf;
1428 struct dpaa_sec_op_ctx *ctx;
1429 struct qm_sg_entry *sg;
1430 uint32_t length = 0;
1431 rte_iova_t src_start_addr, dst_start_addr;
1432 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1435 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1438 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1440 dst_start_addr = src_start_addr;
1442 ctx = dpaa_sec_alloc_ctx(ses, 7);
1450 rte_prefetch0(cf->sg);
1452 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1453 if (is_encode(ses)) {
1454 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1455 sg->length = ses->iv.length;
1456 length += sg->length;
1460 if (ses->auth_only_len) {
1461 qm_sg_entry_set64(sg,
1462 rte_dpaa_mem_vtop(sym->aead.aad.data));
1463 sg->length = ses->auth_only_len;
1464 length += sg->length;
1468 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1469 sg->length = sym->aead.data.length;
1470 length += sg->length;
1474 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1475 sg->length = ses->iv.length;
1476 length += sg->length;
1480 if (ses->auth_only_len) {
1481 qm_sg_entry_set64(sg,
1482 rte_dpaa_mem_vtop(sym->aead.aad.data));
1483 sg->length = ses->auth_only_len;
1484 length += sg->length;
1488 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1489 sg->length = sym->aead.data.length;
1490 length += sg->length;
1493 memcpy(ctx->digest, sym->aead.digest.data,
1494 ses->digest_length);
1497 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1498 sg->length = ses->digest_length;
1499 length += sg->length;
1503 /* input compound frame */
1504 cf->sg[1].length = length;
1505 cf->sg[1].extension = 1;
1506 cf->sg[1].final = 1;
1507 cpu_to_hw_sg(&cf->sg[1]);
1511 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1512 qm_sg_entry_set64(sg,
1513 dst_start_addr + sym->aead.data.offset);
1514 sg->length = sym->aead.data.length;
1515 length = sg->length;
1516 if (is_encode(ses)) {
1518 /* set auth output */
1520 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1521 sg->length = ses->digest_length;
1522 length += sg->length;
1527 /* output compound frame */
1528 cf->sg[0].length = length;
1529 cf->sg[0].extension = 1;
1530 cpu_to_hw_sg(&cf->sg[0]);
1535 static inline struct dpaa_sec_job *
1536 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1538 struct rte_crypto_sym_op *sym = op->sym;
1539 struct dpaa_sec_job *cf;
1540 struct dpaa_sec_op_ctx *ctx;
1541 struct qm_sg_entry *sg, *out_sg, *in_sg;
1542 struct rte_mbuf *mbuf;
1544 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1549 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1552 req_segs = mbuf->nb_segs * 2 + 4;
1555 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1556 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1561 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1568 rte_prefetch0(cf->sg);
1571 out_sg = &cf->sg[0];
1572 out_sg->extension = 1;
1574 out_sg->length = sym->auth.data.length + ses->digest_length;
1576 out_sg->length = sym->auth.data.length;
1578 /* output sg entries */
1580 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1581 cpu_to_hw_sg(out_sg);
1584 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1585 sg->length = mbuf->data_len - sym->auth.data.offset;
1586 sg->offset = sym->auth.data.offset;
1588 /* Successive segs */
1593 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1594 sg->length = mbuf->data_len;
1597 sg->length -= ses->digest_length;
1599 if (is_encode(ses)) {
1601 /* set auth output */
1603 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1604 sg->length = ses->digest_length;
1612 in_sg->extension = 1;
1615 in_sg->length = ses->iv.length + sym->auth.data.length;
1617 in_sg->length = ses->iv.length + sym->auth.data.length
1618 + ses->digest_length;
1620 /* input sg entries */
1622 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1623 cpu_to_hw_sg(in_sg);
1626 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1627 sg->length = ses->iv.length;
1632 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1633 sg->length = mbuf->data_len - sym->auth.data.offset;
1634 sg->offset = sym->auth.data.offset;
1636 /* Successive segs */
1641 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1642 sg->length = mbuf->data_len;
1646 sg->length -= ses->digest_length;
1647 if (is_decode(ses)) {
1650 memcpy(ctx->digest, sym->auth.digest.data,
1651 ses->digest_length);
1652 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1653 sg->length = ses->digest_length;
1661 static inline struct dpaa_sec_job *
1662 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1664 struct rte_crypto_sym_op *sym = op->sym;
1665 struct dpaa_sec_job *cf;
1666 struct dpaa_sec_op_ctx *ctx;
1667 struct qm_sg_entry *sg;
1668 rte_iova_t src_start_addr, dst_start_addr;
1669 uint32_t length = 0;
1670 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1673 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1675 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1677 dst_start_addr = src_start_addr;
1679 ctx = dpaa_sec_alloc_ctx(ses, 7);
1687 rte_prefetch0(cf->sg);
1689 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1690 if (is_encode(ses)) {
1691 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1692 sg->length = ses->iv.length;
1693 length += sg->length;
1697 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1698 sg->length = sym->auth.data.length;
1699 length += sg->length;
1703 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1704 sg->length = ses->iv.length;
1705 length += sg->length;
1710 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1711 sg->length = sym->auth.data.length;
1712 length += sg->length;
1715 memcpy(ctx->digest, sym->auth.digest.data,
1716 ses->digest_length);
1719 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1720 sg->length = ses->digest_length;
1721 length += sg->length;
1725 /* input compound frame */
1726 cf->sg[1].length = length;
1727 cf->sg[1].extension = 1;
1728 cf->sg[1].final = 1;
1729 cpu_to_hw_sg(&cf->sg[1]);
1733 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1734 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1735 sg->length = sym->cipher.data.length;
1736 length = sg->length;
1737 if (is_encode(ses)) {
1739 /* set auth output */
1741 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1742 sg->length = ses->digest_length;
1743 length += sg->length;
1748 /* output compound frame */
1749 cf->sg[0].length = length;
1750 cf->sg[0].extension = 1;
1751 cpu_to_hw_sg(&cf->sg[0]);
1756 #ifdef RTE_LIB_SECURITY
1757 static inline struct dpaa_sec_job *
1758 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1760 struct rte_crypto_sym_op *sym = op->sym;
1761 struct dpaa_sec_job *cf;
1762 struct dpaa_sec_op_ctx *ctx;
1763 struct qm_sg_entry *sg;
1764 phys_addr_t src_start_addr, dst_start_addr;
1766 ctx = dpaa_sec_alloc_ctx(ses, 2);
1772 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1775 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1777 dst_start_addr = src_start_addr;
1781 qm_sg_entry_set64(sg, src_start_addr);
1782 sg->length = sym->m_src->pkt_len;
1786 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1789 qm_sg_entry_set64(sg, dst_start_addr);
1790 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1796 static inline struct dpaa_sec_job *
1797 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1799 struct rte_crypto_sym_op *sym = op->sym;
1800 struct dpaa_sec_job *cf;
1801 struct dpaa_sec_op_ctx *ctx;
1802 struct qm_sg_entry *sg, *out_sg, *in_sg;
1803 struct rte_mbuf *mbuf;
1805 uint32_t in_len = 0, out_len = 0;
1812 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1813 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1814 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1819 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1825 out_sg = &cf->sg[0];
1826 out_sg->extension = 1;
1827 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1831 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1834 /* Successive segs */
1835 while (mbuf->next) {
1836 sg->length = mbuf->data_len;
1837 out_len += sg->length;
1841 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1844 sg->length = mbuf->buf_len - mbuf->data_off;
1845 out_len += sg->length;
1849 out_sg->length = out_len;
1850 cpu_to_hw_sg(out_sg);
1855 in_sg->extension = 1;
1857 in_len = mbuf->data_len;
1860 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1863 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1864 sg->length = mbuf->data_len;
1867 /* Successive segs */
1872 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1873 sg->length = mbuf->data_len;
1875 in_len += sg->length;
1881 in_sg->length = in_len;
1882 cpu_to_hw_sg(in_sg);
1884 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1891 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1894 /* Function to transmit the frames to given device and queuepair */
1896 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1897 uint16_t num_tx = 0;
1898 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1899 uint32_t frames_to_send;
1900 struct rte_crypto_op *op;
1901 struct dpaa_sec_job *cf;
1902 dpaa_sec_session *ses;
1903 uint16_t auth_hdr_len, auth_tail_len;
1904 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1905 struct qman_fq *inq[DPAA_SEC_BURST];
1907 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1908 if (rte_dpaa_portal_init((void *)0)) {
1909 DPAA_SEC_ERR("Failure in affining portal");
1915 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1916 DPAA_SEC_BURST : nb_ops;
1917 for (loop = 0; loop < frames_to_send; loop++) {
1919 if (*dpaa_seqn(op->sym->m_src) != 0) {
1920 index = *dpaa_seqn(op->sym->m_src) - 1;
1921 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1922 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1923 flags[loop] = ((index & 0x0f) << 8);
1924 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1925 DPAA_PER_LCORE_DQRR_SIZE--;
1926 DPAA_PER_LCORE_DQRR_HELD &=
1931 switch (op->sess_type) {
1932 case RTE_CRYPTO_OP_WITH_SESSION:
1933 ses = (dpaa_sec_session *)
1934 get_sym_session_private_data(
1936 dpaa_cryptodev_driver_id);
1938 #ifdef RTE_LIB_SECURITY
1939 case RTE_CRYPTO_OP_SECURITY_SESSION:
1940 ses = (dpaa_sec_session *)
1941 get_sec_session_private_data(
1942 op->sym->sec_session);
1947 "sessionless crypto op not supported");
1948 frames_to_send = loop;
1954 DPAA_SEC_DP_ERR("session not available");
1955 frames_to_send = loop;
1960 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1961 if (dpaa_sec_attach_sess_q(qp, ses)) {
1962 frames_to_send = loop;
1966 } else if (unlikely(ses->qp[rte_lcore_id() %
1967 MAX_DPAA_CORES] != qp)) {
1968 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1970 ses->qp[rte_lcore_id() %
1971 MAX_DPAA_CORES], qp);
1972 frames_to_send = loop;
1977 auth_hdr_len = op->sym->auth.data.length -
1978 op->sym->cipher.data.length;
1981 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1982 ((op->sym->m_dst == NULL) ||
1983 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1984 switch (ses->ctxt) {
1985 #ifdef RTE_LIB_SECURITY
1987 case DPAA_SEC_IPSEC:
1988 cf = build_proto(op, ses);
1992 cf = build_auth_only(op, ses);
1994 case DPAA_SEC_CIPHER:
1995 cf = build_cipher_only(op, ses);
1998 cf = build_cipher_auth_gcm(op, ses);
1999 auth_hdr_len = ses->auth_only_len;
2001 case DPAA_SEC_CIPHER_HASH:
2003 op->sym->cipher.data.offset
2004 - op->sym->auth.data.offset;
2006 op->sym->auth.data.length
2007 - op->sym->cipher.data.length
2009 cf = build_cipher_auth(op, ses);
2012 DPAA_SEC_DP_ERR("not supported ops");
2013 frames_to_send = loop;
2018 switch (ses->ctxt) {
2019 #ifdef RTE_LIB_SECURITY
2021 case DPAA_SEC_IPSEC:
2022 cf = build_proto_sg(op, ses);
2026 cf = build_auth_only_sg(op, ses);
2028 case DPAA_SEC_CIPHER:
2029 cf = build_cipher_only_sg(op, ses);
2032 cf = build_cipher_auth_gcm_sg(op, ses);
2033 auth_hdr_len = ses->auth_only_len;
2035 case DPAA_SEC_CIPHER_HASH:
2037 op->sym->cipher.data.offset
2038 - op->sym->auth.data.offset;
2040 op->sym->auth.data.length
2041 - op->sym->cipher.data.length
2043 cf = build_cipher_auth_sg(op, ses);
2046 DPAA_SEC_DP_ERR("not supported ops");
2047 frames_to_send = loop;
2052 if (unlikely(!cf)) {
2053 frames_to_send = loop;
2059 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2060 fd->opaque_addr = 0;
2062 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2063 fd->_format1 = qm_fd_compound;
2064 fd->length29 = 2 * sizeof(struct qm_sg_entry);
2066 /* Auth_only_len is set as 0 in descriptor and it is
2067 * overwritten here in the fd.cmd which will update
2070 if (auth_hdr_len || auth_tail_len) {
2071 fd->cmd = 0x80000000;
2073 ((auth_tail_len << 16) | auth_hdr_len);
2076 #ifdef RTE_LIB_SECURITY
2077 /* In case of PDCP, per packet HFN is stored in
2078 * mbuf priv after sym_op.
2080 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2081 fd->cmd = 0x80000000 |
2082 *((uint32_t *)((uint8_t *)op +
2083 ses->pdcp.hfn_ovd_offset));
2084 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2085 *((uint32_t *)((uint8_t *)op +
2086 ses->pdcp.hfn_ovd_offset)),
2093 while (loop < frames_to_send) {
2094 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2095 &flags[loop], frames_to_send - loop);
2097 nb_ops -= frames_to_send;
2098 num_tx += frames_to_send;
2101 dpaa_qp->tx_pkts += num_tx;
2102 dpaa_qp->tx_errs += nb_ops - num_tx;
2108 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2112 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2114 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2115 if (rte_dpaa_portal_init((void *)0)) {
2116 DPAA_SEC_ERR("Failure in affining portal");
2121 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2123 dpaa_qp->rx_pkts += num_rx;
2124 dpaa_qp->rx_errs += nb_ops - num_rx;
2126 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2131 /** Release queue pair */
2133 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2136 struct dpaa_sec_dev_private *internals;
2137 struct dpaa_sec_qp *qp = NULL;
2139 PMD_INIT_FUNC_TRACE();
2141 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2143 internals = dev->data->dev_private;
2144 if (qp_id >= internals->max_nb_queue_pairs) {
2145 DPAA_SEC_ERR("Max supported qpid %d",
2146 internals->max_nb_queue_pairs);
2150 qp = &internals->qps[qp_id];
2151 rte_mempool_free(qp->ctx_pool);
2152 qp->internals = NULL;
2153 dev->data->queue_pairs[qp_id] = NULL;
2158 /** Setup a queue pair */
2160 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2161 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2162 __rte_unused int socket_id)
2164 struct dpaa_sec_dev_private *internals;
2165 struct dpaa_sec_qp *qp = NULL;
2168 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2170 internals = dev->data->dev_private;
2171 if (qp_id >= internals->max_nb_queue_pairs) {
2172 DPAA_SEC_ERR("Max supported qpid %d",
2173 internals->max_nb_queue_pairs);
2177 qp = &internals->qps[qp_id];
2178 qp->internals = internals;
2179 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2180 dev->data->dev_id, qp_id);
2181 if (!qp->ctx_pool) {
2182 qp->ctx_pool = rte_mempool_create((const char *)str,
2185 CTX_POOL_CACHE_SIZE, 0,
2186 NULL, NULL, NULL, NULL,
2188 if (!qp->ctx_pool) {
2189 DPAA_SEC_ERR("%s create failed\n", str);
2193 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2194 dev->data->dev_id, qp_id);
2195 dev->data->queue_pairs[qp_id] = qp;
2200 /** Returns the size of session structure */
2202 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2204 PMD_INIT_FUNC_TRACE();
2206 return sizeof(dpaa_sec_session);
2210 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2211 struct rte_crypto_sym_xform *xform,
2212 dpaa_sec_session *session)
2214 session->ctxt = DPAA_SEC_CIPHER;
2215 session->cipher_alg = xform->cipher.algo;
2216 session->iv.length = xform->cipher.iv.length;
2217 session->iv.offset = xform->cipher.iv.offset;
2218 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2219 RTE_CACHE_LINE_SIZE);
2220 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2221 DPAA_SEC_ERR("No Memory for cipher key");
2224 session->cipher_key.length = xform->cipher.key.length;
2226 memcpy(session->cipher_key.data, xform->cipher.key.data,
2227 xform->cipher.key.length);
2228 switch (xform->cipher.algo) {
2229 case RTE_CRYPTO_CIPHER_AES_CBC:
2230 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2231 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2233 case RTE_CRYPTO_CIPHER_DES_CBC:
2234 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2235 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2237 case RTE_CRYPTO_CIPHER_3DES_CBC:
2238 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2239 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2241 case RTE_CRYPTO_CIPHER_AES_CTR:
2242 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2243 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2245 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2246 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2248 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2249 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2252 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2253 xform->cipher.algo);
2256 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2263 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2264 struct rte_crypto_sym_xform *xform,
2265 dpaa_sec_session *session)
2267 session->ctxt = DPAA_SEC_AUTH;
2268 session->auth_alg = xform->auth.algo;
2269 session->auth_key.length = xform->auth.key.length;
2270 if (xform->auth.key.length) {
2271 session->auth_key.data =
2272 rte_zmalloc(NULL, xform->auth.key.length,
2273 RTE_CACHE_LINE_SIZE);
2274 if (session->auth_key.data == NULL) {
2275 DPAA_SEC_ERR("No Memory for auth key");
2278 memcpy(session->auth_key.data, xform->auth.key.data,
2279 xform->auth.key.length);
2282 session->digest_length = xform->auth.digest_length;
2283 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2284 session->iv.offset = xform->auth.iv.offset;
2285 session->iv.length = xform->auth.iv.length;
2288 switch (xform->auth.algo) {
2289 case RTE_CRYPTO_AUTH_SHA1:
2290 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2291 session->auth_key.algmode = OP_ALG_AAI_HASH;
2293 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2294 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2295 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2297 case RTE_CRYPTO_AUTH_MD5:
2298 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2299 session->auth_key.algmode = OP_ALG_AAI_HASH;
2301 case RTE_CRYPTO_AUTH_MD5_HMAC:
2302 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2303 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2305 case RTE_CRYPTO_AUTH_SHA224:
2306 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2307 session->auth_key.algmode = OP_ALG_AAI_HASH;
2309 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2310 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2311 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2313 case RTE_CRYPTO_AUTH_SHA256:
2314 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2315 session->auth_key.algmode = OP_ALG_AAI_HASH;
2317 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2318 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2319 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2321 case RTE_CRYPTO_AUTH_SHA384:
2322 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2323 session->auth_key.algmode = OP_ALG_AAI_HASH;
2325 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2326 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2327 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2329 case RTE_CRYPTO_AUTH_SHA512:
2330 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2331 session->auth_key.algmode = OP_ALG_AAI_HASH;
2333 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2334 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2335 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2337 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2338 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2339 session->auth_key.algmode = OP_ALG_AAI_F9;
2341 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2342 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2343 session->auth_key.algmode = OP_ALG_AAI_F9;
2345 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2346 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2347 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2349 case RTE_CRYPTO_AUTH_AES_CMAC:
2350 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2351 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2354 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2359 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2366 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2367 struct rte_crypto_sym_xform *xform,
2368 dpaa_sec_session *session)
2371 struct rte_crypto_cipher_xform *cipher_xform;
2372 struct rte_crypto_auth_xform *auth_xform;
2374 session->ctxt = DPAA_SEC_CIPHER_HASH;
2375 if (session->auth_cipher_text) {
2376 cipher_xform = &xform->cipher;
2377 auth_xform = &xform->next->auth;
2379 cipher_xform = &xform->next->cipher;
2380 auth_xform = &xform->auth;
2383 /* Set IV parameters */
2384 session->iv.offset = cipher_xform->iv.offset;
2385 session->iv.length = cipher_xform->iv.length;
2387 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2388 RTE_CACHE_LINE_SIZE);
2389 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2390 DPAA_SEC_ERR("No Memory for cipher key");
2393 session->cipher_key.length = cipher_xform->key.length;
2394 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2395 RTE_CACHE_LINE_SIZE);
2396 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2397 DPAA_SEC_ERR("No Memory for auth key");
2400 session->auth_key.length = auth_xform->key.length;
2401 memcpy(session->cipher_key.data, cipher_xform->key.data,
2402 cipher_xform->key.length);
2403 memcpy(session->auth_key.data, auth_xform->key.data,
2404 auth_xform->key.length);
2406 session->digest_length = auth_xform->digest_length;
2407 session->auth_alg = auth_xform->algo;
2409 switch (auth_xform->algo) {
2410 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2411 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2412 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2414 case RTE_CRYPTO_AUTH_MD5_HMAC:
2415 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2416 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2418 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2419 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2420 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2422 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2423 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2424 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2426 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2427 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2428 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2430 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2431 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2432 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2434 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2435 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2436 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2438 case RTE_CRYPTO_AUTH_AES_CMAC:
2439 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2440 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2443 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2448 session->cipher_alg = cipher_xform->algo;
2450 switch (cipher_xform->algo) {
2451 case RTE_CRYPTO_CIPHER_AES_CBC:
2452 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2453 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2455 case RTE_CRYPTO_CIPHER_DES_CBC:
2456 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2457 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2459 case RTE_CRYPTO_CIPHER_3DES_CBC:
2460 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2461 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2463 case RTE_CRYPTO_CIPHER_AES_CTR:
2464 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2465 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2468 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2469 cipher_xform->algo);
2472 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2478 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2479 struct rte_crypto_sym_xform *xform,
2480 dpaa_sec_session *session)
2482 session->aead_alg = xform->aead.algo;
2483 session->ctxt = DPAA_SEC_AEAD;
2484 session->iv.length = xform->aead.iv.length;
2485 session->iv.offset = xform->aead.iv.offset;
2486 session->auth_only_len = xform->aead.aad_length;
2487 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2488 RTE_CACHE_LINE_SIZE);
2489 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2490 DPAA_SEC_ERR("No Memory for aead key\n");
2493 session->aead_key.length = xform->aead.key.length;
2494 session->digest_length = xform->aead.digest_length;
2496 memcpy(session->aead_key.data, xform->aead.key.data,
2497 xform->aead.key.length);
2499 switch (session->aead_alg) {
2500 case RTE_CRYPTO_AEAD_AES_GCM:
2501 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2502 session->aead_key.algmode = OP_ALG_AAI_GCM;
2505 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2509 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2515 static struct qman_fq *
2516 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2520 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2521 if (qi->inq_attach[i] == 0) {
2522 qi->inq_attach[i] = 1;
2526 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2532 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2536 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2537 if (&qi->inq[i] == fq) {
2538 if (qman_retire_fq(fq, NULL) != 0)
2539 DPAA_SEC_DEBUG("Queue is not retired\n");
2541 qi->inq_attach[i] = 0;
2549 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2553 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2554 ret = dpaa_sec_prep_cdb(sess);
2556 DPAA_SEC_ERR("Unable to prepare sec cdb");
2559 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2560 ret = rte_dpaa_portal_init((void *)0);
2562 DPAA_SEC_ERR("Failure in affining portal");
2566 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2567 rte_dpaa_mem_vtop(&sess->cdb),
2568 qman_fq_fqid(&qp->outq));
2570 DPAA_SEC_ERR("Unable to init sec queue");
2576 free_session_data(dpaa_sec_session *s)
2579 rte_free(s->aead_key.data);
2581 rte_free(s->auth_key.data);
2582 rte_free(s->cipher_key.data);
2584 memset(s, 0, sizeof(dpaa_sec_session));
2588 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2589 struct rte_crypto_sym_xform *xform, void *sess)
2591 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2592 dpaa_sec_session *session = sess;
2596 PMD_INIT_FUNC_TRACE();
2598 if (unlikely(sess == NULL)) {
2599 DPAA_SEC_ERR("invalid session struct");
2602 memset(session, 0, sizeof(dpaa_sec_session));
2604 /* Default IV length = 0 */
2605 session->iv.length = 0;
2608 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2609 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2610 ret = dpaa_sec_cipher_init(dev, xform, session);
2612 /* Authentication Only */
2613 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2614 xform->next == NULL) {
2615 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2616 session->ctxt = DPAA_SEC_AUTH;
2617 ret = dpaa_sec_auth_init(dev, xform, session);
2619 /* Cipher then Authenticate */
2620 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2621 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2622 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2623 session->auth_cipher_text = 1;
2624 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2625 ret = dpaa_sec_auth_init(dev, xform, session);
2626 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2627 ret = dpaa_sec_cipher_init(dev, xform, session);
2629 ret = dpaa_sec_chain_init(dev, xform, session);
2631 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2634 /* Authenticate then Cipher */
2635 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2636 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2637 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2638 session->auth_cipher_text = 0;
2639 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2640 ret = dpaa_sec_cipher_init(dev, xform, session);
2641 else if (xform->next->cipher.algo
2642 == RTE_CRYPTO_CIPHER_NULL)
2643 ret = dpaa_sec_auth_init(dev, xform, session);
2645 ret = dpaa_sec_chain_init(dev, xform, session);
2647 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2651 /* AEAD operation for AES-GCM kind of Algorithms */
2652 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2653 xform->next == NULL) {
2654 ret = dpaa_sec_aead_init(dev, xform, session);
2657 DPAA_SEC_ERR("Invalid crypto type");
2661 DPAA_SEC_ERR("unable to init session");
2665 rte_spinlock_lock(&internals->lock);
2666 for (i = 0; i < MAX_DPAA_CORES; i++) {
2667 session->inq[i] = dpaa_sec_attach_rxq(internals);
2668 if (session->inq[i] == NULL) {
2669 DPAA_SEC_ERR("unable to attach sec queue");
2670 rte_spinlock_unlock(&internals->lock);
2675 rte_spinlock_unlock(&internals->lock);
2680 free_session_data(session);
2685 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2686 struct rte_crypto_sym_xform *xform,
2687 struct rte_cryptodev_sym_session *sess,
2688 struct rte_mempool *mempool)
2690 void *sess_private_data;
2693 PMD_INIT_FUNC_TRACE();
2695 if (rte_mempool_get(mempool, &sess_private_data)) {
2696 DPAA_SEC_ERR("Couldn't get object from session mempool");
2700 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2702 DPAA_SEC_ERR("failed to configure session parameters");
2704 /* Return session to mempool */
2705 rte_mempool_put(mempool, sess_private_data);
2709 set_sym_session_private_data(sess, dev->driver_id,
2717 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2719 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2720 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2723 for (i = 0; i < MAX_DPAA_CORES; i++) {
2725 dpaa_sec_detach_rxq(qi, s->inq[i]);
2729 free_session_data(s);
2730 rte_mempool_put(sess_mp, (void *)s);
2733 /** Clear the memory of session so it doesn't leave key material behind */
2735 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2736 struct rte_cryptodev_sym_session *sess)
2738 PMD_INIT_FUNC_TRACE();
2739 uint8_t index = dev->driver_id;
2740 void *sess_priv = get_sym_session_private_data(sess, index);
2741 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2744 free_session_memory(dev, s);
2745 set_sym_session_private_data(sess, index, NULL);
2749 #ifdef RTE_LIB_SECURITY
2751 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2752 struct rte_security_ipsec_xform *ipsec_xform,
2753 dpaa_sec_session *session)
2755 PMD_INIT_FUNC_TRACE();
2757 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2758 RTE_CACHE_LINE_SIZE);
2759 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2760 DPAA_SEC_ERR("No Memory for aead key");
2763 memcpy(session->aead_key.data, aead_xform->key.data,
2764 aead_xform->key.length);
2766 session->digest_length = aead_xform->digest_length;
2767 session->aead_key.length = aead_xform->key.length;
2769 switch (aead_xform->algo) {
2770 case RTE_CRYPTO_AEAD_AES_GCM:
2771 switch (session->digest_length) {
2773 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2776 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2779 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2782 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2783 session->digest_length);
2786 if (session->dir == DIR_ENC) {
2787 memcpy(session->encap_pdb.gcm.salt,
2788 (uint8_t *)&(ipsec_xform->salt), 4);
2790 memcpy(session->decap_pdb.gcm.salt,
2791 (uint8_t *)&(ipsec_xform->salt), 4);
2793 session->aead_key.algmode = OP_ALG_AAI_GCM;
2794 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2797 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2805 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2806 struct rte_crypto_auth_xform *auth_xform,
2807 struct rte_security_ipsec_xform *ipsec_xform,
2808 dpaa_sec_session *session)
2811 session->cipher_key.data = rte_zmalloc(NULL,
2812 cipher_xform->key.length,
2813 RTE_CACHE_LINE_SIZE);
2814 if (session->cipher_key.data == NULL &&
2815 cipher_xform->key.length > 0) {
2816 DPAA_SEC_ERR("No Memory for cipher key");
2820 session->cipher_key.length = cipher_xform->key.length;
2821 memcpy(session->cipher_key.data, cipher_xform->key.data,
2822 cipher_xform->key.length);
2823 session->cipher_alg = cipher_xform->algo;
2825 session->cipher_key.data = NULL;
2826 session->cipher_key.length = 0;
2827 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2831 session->auth_key.data = rte_zmalloc(NULL,
2832 auth_xform->key.length,
2833 RTE_CACHE_LINE_SIZE);
2834 if (session->auth_key.data == NULL &&
2835 auth_xform->key.length > 0) {
2836 DPAA_SEC_ERR("No Memory for auth key");
2839 session->auth_key.length = auth_xform->key.length;
2840 memcpy(session->auth_key.data, auth_xform->key.data,
2841 auth_xform->key.length);
2842 session->auth_alg = auth_xform->algo;
2843 session->digest_length = auth_xform->digest_length;
2845 session->auth_key.data = NULL;
2846 session->auth_key.length = 0;
2847 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2850 switch (session->auth_alg) {
2851 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2852 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2853 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2855 case RTE_CRYPTO_AUTH_MD5_HMAC:
2856 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2857 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2859 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2860 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2861 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2862 if (session->digest_length != 16)
2864 "+++Using sha256-hmac truncated len is non-standard,"
2865 "it will not work with lookaside proto");
2867 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2868 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2869 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2871 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2872 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2873 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2875 case RTE_CRYPTO_AUTH_AES_CMAC:
2876 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2877 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2879 case RTE_CRYPTO_AUTH_NULL:
2880 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2882 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2883 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2884 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2886 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2887 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2888 case RTE_CRYPTO_AUTH_SHA1:
2889 case RTE_CRYPTO_AUTH_SHA256:
2890 case RTE_CRYPTO_AUTH_SHA512:
2891 case RTE_CRYPTO_AUTH_SHA224:
2892 case RTE_CRYPTO_AUTH_SHA384:
2893 case RTE_CRYPTO_AUTH_MD5:
2894 case RTE_CRYPTO_AUTH_AES_GMAC:
2895 case RTE_CRYPTO_AUTH_KASUMI_F9:
2896 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2897 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2898 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2902 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2907 switch (session->cipher_alg) {
2908 case RTE_CRYPTO_CIPHER_AES_CBC:
2909 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2910 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2912 case RTE_CRYPTO_CIPHER_DES_CBC:
2913 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2914 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2916 case RTE_CRYPTO_CIPHER_3DES_CBC:
2917 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2918 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2920 case RTE_CRYPTO_CIPHER_AES_CTR:
2921 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2922 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2923 if (session->dir == DIR_ENC) {
2924 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2925 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2927 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2928 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2931 case RTE_CRYPTO_CIPHER_NULL:
2932 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2934 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2935 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2936 case RTE_CRYPTO_CIPHER_3DES_ECB:
2937 case RTE_CRYPTO_CIPHER_AES_ECB:
2938 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2939 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2940 session->cipher_alg);
2943 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2944 session->cipher_alg);
2952 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2953 struct rte_security_session_conf *conf,
2956 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2957 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2958 struct rte_crypto_auth_xform *auth_xform = NULL;
2959 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2960 struct rte_crypto_aead_xform *aead_xform = NULL;
2961 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2965 PMD_INIT_FUNC_TRACE();
2967 memset(session, 0, sizeof(dpaa_sec_session));
2968 session->proto_alg = conf->protocol;
2969 session->ctxt = DPAA_SEC_IPSEC;
2971 if (ipsec_xform->life.bytes_hard_limit != 0 ||
2972 ipsec_xform->life.bytes_soft_limit != 0 ||
2973 ipsec_xform->life.packets_hard_limit != 0 ||
2974 ipsec_xform->life.packets_soft_limit != 0)
2977 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2978 session->dir = DIR_ENC;
2980 session->dir = DIR_DEC;
2982 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2983 cipher_xform = &conf->crypto_xform->cipher;
2984 if (conf->crypto_xform->next)
2985 auth_xform = &conf->crypto_xform->next->auth;
2986 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2987 ipsec_xform, session);
2988 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2989 auth_xform = &conf->crypto_xform->auth;
2990 if (conf->crypto_xform->next)
2991 cipher_xform = &conf->crypto_xform->next->cipher;
2992 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2993 ipsec_xform, session);
2994 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2995 aead_xform = &conf->crypto_xform->aead;
2996 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2997 ipsec_xform, session);
2999 DPAA_SEC_ERR("XFORM not specified");
3004 DPAA_SEC_ERR("Failed to process xform");
3008 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3009 if (ipsec_xform->tunnel.type ==
3010 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3011 session->ip4_hdr.ip_v = IPVERSION;
3012 session->ip4_hdr.ip_hl = 5;
3013 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
3014 sizeof(session->ip4_hdr));
3015 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3016 session->ip4_hdr.ip_id = 0;
3017 session->ip4_hdr.ip_off = 0;
3018 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3019 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
3020 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3021 IPPROTO_ESP : IPPROTO_AH;
3022 session->ip4_hdr.ip_sum = 0;
3023 session->ip4_hdr.ip_src =
3024 ipsec_xform->tunnel.ipv4.src_ip;
3025 session->ip4_hdr.ip_dst =
3026 ipsec_xform->tunnel.ipv4.dst_ip;
3027 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
3028 (void *)&session->ip4_hdr,
3030 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3031 } else if (ipsec_xform->tunnel.type ==
3032 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3033 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3034 DPAA_IPv6_DEFAULT_VTC_FLOW |
3035 ((ipsec_xform->tunnel.ipv6.dscp <<
3036 RTE_IPV6_HDR_TC_SHIFT) &
3037 RTE_IPV6_HDR_TC_MASK) |
3038 ((ipsec_xform->tunnel.ipv6.flabel <<
3039 RTE_IPV6_HDR_FL_SHIFT) &
3040 RTE_IPV6_HDR_FL_MASK));
3041 /* Payload length will be updated by HW */
3042 session->ip6_hdr.payload_len = 0;
3043 session->ip6_hdr.hop_limits =
3044 ipsec_xform->tunnel.ipv6.hlimit;
3045 session->ip6_hdr.proto = (ipsec_xform->proto ==
3046 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3047 IPPROTO_ESP : IPPROTO_AH;
3048 memcpy(&session->ip6_hdr.src_addr,
3049 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3050 memcpy(&session->ip6_hdr.dst_addr,
3051 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3052 session->encap_pdb.ip_hdr_len =
3053 sizeof(struct rte_ipv6_hdr);
3056 session->encap_pdb.options =
3057 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3058 PDBOPTS_ESP_OIHI_PDB_INL |
3061 if (ipsec_xform->options.dec_ttl)
3062 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3063 if (ipsec_xform->options.esn)
3064 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3065 session->encap_pdb.spi = ipsec_xform->spi;
3067 } else if (ipsec_xform->direction ==
3068 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3069 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3070 session->decap_pdb.options = sizeof(struct ip) << 16;
3072 session->decap_pdb.options =
3073 sizeof(struct rte_ipv6_hdr) << 16;
3074 if (ipsec_xform->options.esn)
3075 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3076 if (ipsec_xform->replay_win_sz) {
3078 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3087 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3090 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3093 session->decap_pdb.options |=
3099 rte_spinlock_lock(&internals->lock);
3100 for (i = 0; i < MAX_DPAA_CORES; i++) {
3101 session->inq[i] = dpaa_sec_attach_rxq(internals);
3102 if (session->inq[i] == NULL) {
3103 DPAA_SEC_ERR("unable to attach sec queue");
3104 rte_spinlock_unlock(&internals->lock);
3108 rte_spinlock_unlock(&internals->lock);
3112 free_session_data(session);
3117 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3118 struct rte_security_session_conf *conf,
3121 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3122 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3123 struct rte_crypto_auth_xform *auth_xform = NULL;
3124 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3125 dpaa_sec_session *session = (dpaa_sec_session *)sess;
3126 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3130 PMD_INIT_FUNC_TRACE();
3132 memset(session, 0, sizeof(dpaa_sec_session));
3134 /* find xfrm types */
3135 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3136 cipher_xform = &xform->cipher;
3137 if (xform->next != NULL &&
3138 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3139 auth_xform = &xform->next->auth;
3140 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3141 auth_xform = &xform->auth;
3142 if (xform->next != NULL &&
3143 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3144 cipher_xform = &xform->next->cipher;
3146 DPAA_SEC_ERR("Invalid crypto type");
3150 session->proto_alg = conf->protocol;
3151 session->ctxt = DPAA_SEC_PDCP;
3154 switch (cipher_xform->algo) {
3155 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3156 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3158 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3159 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3161 case RTE_CRYPTO_CIPHER_AES_CTR:
3162 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3164 case RTE_CRYPTO_CIPHER_NULL:
3165 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3168 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3169 session->cipher_alg);
3173 session->cipher_key.data = rte_zmalloc(NULL,
3174 cipher_xform->key.length,
3175 RTE_CACHE_LINE_SIZE);
3176 if (session->cipher_key.data == NULL &&
3177 cipher_xform->key.length > 0) {
3178 DPAA_SEC_ERR("No Memory for cipher key");
3181 session->cipher_key.length = cipher_xform->key.length;
3182 memcpy(session->cipher_key.data, cipher_xform->key.data,
3183 cipher_xform->key.length);
3184 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3186 session->cipher_alg = cipher_xform->algo;
3188 session->cipher_key.data = NULL;
3189 session->cipher_key.length = 0;
3190 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3191 session->dir = DIR_ENC;
3194 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3195 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3196 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3198 "PDCP Seq Num size should be 5/12 bits for cmode");
3205 switch (auth_xform->algo) {
3206 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3207 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3209 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3210 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3212 case RTE_CRYPTO_AUTH_AES_CMAC:
3213 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3215 case RTE_CRYPTO_AUTH_NULL:
3216 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3219 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3221 rte_free(session->cipher_key.data);
3224 session->auth_key.data = rte_zmalloc(NULL,
3225 auth_xform->key.length,
3226 RTE_CACHE_LINE_SIZE);
3227 if (!session->auth_key.data &&
3228 auth_xform->key.length > 0) {
3229 DPAA_SEC_ERR("No Memory for auth key");
3230 rte_free(session->cipher_key.data);
3233 session->auth_key.length = auth_xform->key.length;
3234 memcpy(session->auth_key.data, auth_xform->key.data,
3235 auth_xform->key.length);
3236 session->auth_alg = auth_xform->algo;
3238 session->auth_key.data = NULL;
3239 session->auth_key.length = 0;
3240 session->auth_alg = 0;
3242 session->pdcp.domain = pdcp_xform->domain;
3243 session->pdcp.bearer = pdcp_xform->bearer;
3244 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3245 session->pdcp.sn_size = pdcp_xform->sn_size;
3246 session->pdcp.hfn = pdcp_xform->hfn;
3247 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3248 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3249 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3251 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3253 rte_spinlock_lock(&dev_priv->lock);
3254 for (i = 0; i < MAX_DPAA_CORES; i++) {
3255 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3256 if (session->inq[i] == NULL) {
3257 DPAA_SEC_ERR("unable to attach sec queue");
3258 rte_spinlock_unlock(&dev_priv->lock);
3263 rte_spinlock_unlock(&dev_priv->lock);
3266 rte_free(session->auth_key.data);
3267 rte_free(session->cipher_key.data);
3268 memset(session, 0, sizeof(dpaa_sec_session));
3273 dpaa_sec_security_session_create(void *dev,
3274 struct rte_security_session_conf *conf,
3275 struct rte_security_session *sess,
3276 struct rte_mempool *mempool)
3278 void *sess_private_data;
3279 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3282 if (rte_mempool_get(mempool, &sess_private_data)) {
3283 DPAA_SEC_ERR("Couldn't get object from session mempool");
3287 switch (conf->protocol) {
3288 case RTE_SECURITY_PROTOCOL_IPSEC:
3289 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3292 case RTE_SECURITY_PROTOCOL_PDCP:
3293 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3296 case RTE_SECURITY_PROTOCOL_MACSEC:
3302 DPAA_SEC_ERR("failed to configure session parameters");
3303 /* Return session to mempool */
3304 rte_mempool_put(mempool, sess_private_data);
3308 set_sec_session_private_data(sess, sess_private_data);
3313 /** Clear the memory of session so it doesn't leave key material behind */
3315 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3316 struct rte_security_session *sess)
3318 PMD_INIT_FUNC_TRACE();
3319 void *sess_priv = get_sec_session_private_data(sess);
3320 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3323 free_session_memory((struct rte_cryptodev *)dev, s);
3324 set_sec_session_private_data(sess, NULL);
3330 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3331 struct rte_cryptodev_config *config __rte_unused)
3333 PMD_INIT_FUNC_TRACE();
3339 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3341 PMD_INIT_FUNC_TRACE();
3346 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3348 PMD_INIT_FUNC_TRACE();
3352 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3354 PMD_INIT_FUNC_TRACE();
3363 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3364 struct rte_cryptodev_info *info)
3366 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3368 PMD_INIT_FUNC_TRACE();
3370 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3371 info->feature_flags = dev->feature_flags;
3372 info->capabilities = dpaa_sec_capabilities;
3373 info->sym.max_nb_sessions = internals->max_nb_sessions;
3374 info->driver_id = dpaa_cryptodev_driver_id;
3378 static enum qman_cb_dqrr_result
3379 dpaa_sec_process_parallel_event(void *event,
3380 struct qman_portal *qm __always_unused,
3381 struct qman_fq *outq,
3382 const struct qm_dqrr_entry *dqrr,
3385 const struct qm_fd *fd;
3386 struct dpaa_sec_job *job;
3387 struct dpaa_sec_op_ctx *ctx;
3388 struct rte_event *ev = (struct rte_event *)event;
3392 /* sg is embedded in an op ctx,
3393 * sg[0] is for output
3396 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3398 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3399 ctx->fd_status = fd->status;
3400 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3401 struct qm_sg_entry *sg_out;
3404 sg_out = &job->sg[0];
3405 hw_sg_to_cpu(sg_out);
3406 len = sg_out->length;
3407 ctx->op->sym->m_src->pkt_len = len;
3408 ctx->op->sym->m_src->data_len = len;
3410 if (!ctx->fd_status) {
3411 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3413 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3414 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3416 ev->event_ptr = (void *)ctx->op;
3418 ev->flow_id = outq->ev.flow_id;
3419 ev->sub_event_type = outq->ev.sub_event_type;
3420 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3421 ev->op = RTE_EVENT_OP_NEW;
3422 ev->sched_type = outq->ev.sched_type;
3423 ev->queue_id = outq->ev.queue_id;
3424 ev->priority = outq->ev.priority;
3425 *bufs = (void *)ctx->op;
3427 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3429 return qman_cb_dqrr_consume;
3432 static enum qman_cb_dqrr_result
3433 dpaa_sec_process_atomic_event(void *event,
3434 struct qman_portal *qm __rte_unused,
3435 struct qman_fq *outq,
3436 const struct qm_dqrr_entry *dqrr,
3440 const struct qm_fd *fd;
3441 struct dpaa_sec_job *job;
3442 struct dpaa_sec_op_ctx *ctx;
3443 struct rte_event *ev = (struct rte_event *)event;
3447 /* sg is embedded in an op ctx,
3448 * sg[0] is for output
3451 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3453 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3454 ctx->fd_status = fd->status;
3455 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3456 struct qm_sg_entry *sg_out;
3459 sg_out = &job->sg[0];
3460 hw_sg_to_cpu(sg_out);
3461 len = sg_out->length;
3462 ctx->op->sym->m_src->pkt_len = len;
3463 ctx->op->sym->m_src->data_len = len;
3465 if (!ctx->fd_status) {
3466 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3468 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3469 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3471 ev->event_ptr = (void *)ctx->op;
3472 ev->flow_id = outq->ev.flow_id;
3473 ev->sub_event_type = outq->ev.sub_event_type;
3474 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3475 ev->op = RTE_EVENT_OP_NEW;
3476 ev->sched_type = outq->ev.sched_type;
3477 ev->queue_id = outq->ev.queue_id;
3478 ev->priority = outq->ev.priority;
3480 /* Save active dqrr entries */
3481 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3482 DPAA_PER_LCORE_DQRR_SIZE++;
3483 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3484 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3485 ev->impl_opaque = index + 1;
3486 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3487 *bufs = (void *)ctx->op;
3489 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3491 return qman_cb_dqrr_defer;
3495 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3498 const struct rte_event *event)
3500 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3501 struct qm_mcc_initfq opts = {0};
3505 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3506 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3507 opts.fqd.dest.channel = ch_id;
3509 switch (event->sched_type) {
3510 case RTE_SCHED_TYPE_ATOMIC:
3511 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3512 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3513 * configuration with HOLD_ACTIVE setting
3515 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3516 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3518 case RTE_SCHED_TYPE_ORDERED:
3519 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3522 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3523 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3527 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3528 if (unlikely(ret)) {
3529 DPAA_SEC_ERR("unable to init caam source fq!");
3533 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3539 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3542 struct qm_mcc_initfq opts = {0};
3544 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3546 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3547 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3548 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3549 qp->outq.cb.ern = ern_sec_fq_handler;
3550 qman_retire_fq(&qp->outq, NULL);
3551 qman_oos_fq(&qp->outq);
3552 ret = qman_init_fq(&qp->outq, 0, &opts);
3554 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3555 qp->outq.cb.dqrr = NULL;
3560 static struct rte_cryptodev_ops crypto_ops = {
3561 .dev_configure = dpaa_sec_dev_configure,
3562 .dev_start = dpaa_sec_dev_start,
3563 .dev_stop = dpaa_sec_dev_stop,
3564 .dev_close = dpaa_sec_dev_close,
3565 .dev_infos_get = dpaa_sec_dev_infos_get,
3566 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3567 .queue_pair_release = dpaa_sec_queue_pair_release,
3568 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3569 .sym_session_configure = dpaa_sec_sym_session_configure,
3570 .sym_session_clear = dpaa_sec_sym_session_clear,
3571 /* Raw data-path API related operations */
3572 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3573 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3576 #ifdef RTE_LIB_SECURITY
3577 static const struct rte_security_capability *
3578 dpaa_sec_capabilities_get(void *device __rte_unused)
3580 return dpaa_sec_security_cap;
3583 static const struct rte_security_ops dpaa_sec_security_ops = {
3584 .session_create = dpaa_sec_security_session_create,
3585 .session_update = NULL,
3586 .session_stats_get = NULL,
3587 .session_destroy = dpaa_sec_security_session_destroy,
3588 .set_pkt_metadata = NULL,
3589 .capabilities_get = dpaa_sec_capabilities_get
3593 dpaa_sec_uninit(struct rte_cryptodev *dev)
3595 struct dpaa_sec_dev_private *internals;
3600 internals = dev->data->dev_private;
3601 rte_free(dev->security_ctx);
3603 rte_free(internals);
3605 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3606 dev->data->name, rte_socket_id());
3612 check_devargs_handler(__rte_unused const char *key, const char *value,
3613 __rte_unused void *opaque)
3615 dpaa_sec_dp_dump = atoi(value);
3616 if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3617 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3618 "supported, changing to FULL error prints\n");
3619 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3626 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3628 struct rte_kvargs *kvlist;
3633 kvlist = rte_kvargs_parse(devargs->args, NULL);
3637 if (!rte_kvargs_count(kvlist, key)) {
3638 rte_kvargs_free(kvlist);
3642 rte_kvargs_process(kvlist, key,
3643 check_devargs_handler, NULL);
3644 rte_kvargs_free(kvlist);
3648 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3650 struct dpaa_sec_dev_private *internals;
3651 #ifdef RTE_LIB_SECURITY
3652 struct rte_security_ctx *security_instance;
3654 struct dpaa_sec_qp *qp;
3658 PMD_INIT_FUNC_TRACE();
3660 cryptodev->driver_id = dpaa_cryptodev_driver_id;
3661 cryptodev->dev_ops = &crypto_ops;
3663 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3664 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3665 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3666 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3667 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3668 RTE_CRYPTODEV_FF_SECURITY |
3669 RTE_CRYPTODEV_FF_SYM_RAW_DP |
3670 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3671 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3672 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3673 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3674 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3676 internals = cryptodev->data->dev_private;
3677 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3678 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3681 * For secondary processes, we don't initialise any further as primary
3682 * has already done this work. Only check we don't need a different
3685 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3686 DPAA_SEC_WARN("Device already init by primary process");
3689 #ifdef RTE_LIB_SECURITY
3690 /* Initialize security_ctx only for primary process*/
3691 security_instance = rte_malloc("rte_security_instances_ops",
3692 sizeof(struct rte_security_ctx), 0);
3693 if (security_instance == NULL)
3695 security_instance->device = (void *)cryptodev;
3696 security_instance->ops = &dpaa_sec_security_ops;
3697 security_instance->sess_cnt = 0;
3698 cryptodev->security_ctx = security_instance;
3700 rte_spinlock_init(&internals->lock);
3701 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3702 /* init qman fq for queue pair */
3703 qp = &internals->qps[i];
3704 ret = dpaa_sec_init_tx(&qp->outq);
3706 DPAA_SEC_ERR("config tx of queue pair %d", i);
3711 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3712 QMAN_FQ_FLAG_TO_DCPORTAL;
3713 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3714 /* create rx qman fq for sessions*/
3715 ret = qman_create_fq(0, flags, &internals->inq[i]);
3716 if (unlikely(ret != 0)) {
3717 DPAA_SEC_ERR("sec qman_create_fq failed");
3722 dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3724 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3728 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3730 rte_free(cryptodev->security_ctx);
3735 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3736 struct rte_dpaa_device *dpaa_dev)
3738 struct rte_cryptodev *cryptodev;
3739 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3743 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3745 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3746 if (cryptodev == NULL)
3749 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3750 cryptodev->data->dev_private = rte_zmalloc_socket(
3751 "cryptodev private structure",
3752 sizeof(struct dpaa_sec_dev_private),
3753 RTE_CACHE_LINE_SIZE,
3756 if (cryptodev->data->dev_private == NULL)
3757 rte_panic("Cannot allocate memzone for private "
3761 dpaa_dev->crypto_dev = cryptodev;
3762 cryptodev->device = &dpaa_dev->device;
3764 /* init user callbacks */
3765 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3767 /* if sec device version is not configured */
3768 if (!rta_get_sec_era()) {
3769 const struct device_node *caam_node;
3771 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3772 const uint32_t *prop = of_get_property(caam_node,
3777 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3783 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3784 retval = rte_dpaa_portal_init((void *)1);
3786 DPAA_SEC_ERR("Unable to initialize portal");
3791 /* Invoke PMD device initialization function */
3792 retval = dpaa_sec_dev_init(cryptodev);
3794 rte_cryptodev_pmd_probing_finish(cryptodev);
3800 /* In case of error, cleanup is done */
3801 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3802 rte_free(cryptodev->data->dev_private);
3804 rte_cryptodev_pmd_release_device(cryptodev);
3810 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3812 struct rte_cryptodev *cryptodev;
3815 cryptodev = dpaa_dev->crypto_dev;
3816 if (cryptodev == NULL)
3819 ret = dpaa_sec_uninit(cryptodev);
3823 return rte_cryptodev_pmd_destroy(cryptodev);
3826 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3827 .drv_type = FSL_DPAA_CRYPTO,
3829 .name = "DPAA SEC PMD"
3831 .probe = cryptodev_dpaa_sec_probe,
3832 .remove = cryptodev_dpaa_sec_remove,
3835 static struct cryptodev_driver dpaa_sec_crypto_drv;
3837 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3838 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3839 dpaa_cryptodev_driver_id);
3840 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3841 DRIVER_DUMP_MODE "=<int>");
3842 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);