1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2022 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
21 #include <rte_cycles.h>
25 #include <rte_kvargs.h>
26 #include <rte_malloc.h>
28 #include <rte_memcpy.h>
29 #include <rte_string_fns.h>
30 #include <rte_spinlock.h>
31 #include <rte_hexdump.h>
37 /* RTA header files */
38 #include <desc/common.h>
39 #include <desc/algo.h>
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/sdap.h>
44 #include <rte_dpaa_bus.h>
46 #include <dpaa_sec_event.h>
47 #include <dpaa_sec_log.h>
48 #include <dpaax_iova_table.h>
50 #define DRIVER_DUMP_MODE "drv_dump_mode"
52 /* DPAA_SEC_DP_DUMP levels */
53 enum dpaa_sec_dump_levels {
59 uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
61 uint8_t dpaa_cryptodev_driver_id;
64 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
66 if (!ctx->fd_status) {
67 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
69 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
70 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
74 static inline struct dpaa_sec_op_ctx *
75 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
77 struct dpaa_sec_op_ctx *ctx;
80 retval = rte_mempool_get(
81 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
84 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
88 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
89 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
90 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
91 * each packet, memset is costlier than dcbz_64().
93 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
94 dcbz_64(&ctx->job.sg[i]);
96 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
97 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
103 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
105 const struct qm_mr_entry *msg)
107 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
108 fq->fqid, msg->ern.rc, msg->ern.seqnum);
111 /* initialize the queue with dest chan as caam chan so that
112 * all the packets in this queue could be dispatched into caam
115 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
118 struct qm_mcc_initfq fq_opts;
122 /* Clear FQ options */
123 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
125 flags = QMAN_INITFQ_FLAG_SCHED;
126 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
127 QM_INITFQ_WE_CONTEXTB;
129 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
130 fq_opts.fqd.context_b = fqid_out;
131 fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
132 fq_opts.fqd.dest.wq = 0;
134 fq_in->cb.ern = ern_sec_fq_handler;
136 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
138 ret = qman_init_fq(fq_in, flags, &fq_opts);
139 if (unlikely(ret != 0))
140 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
145 /* something is put into in_fq and caam put the crypto result into out_fq */
146 static enum qman_cb_dqrr_result
147 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
148 struct qman_fq *fq __always_unused,
149 const struct qm_dqrr_entry *dqrr)
151 const struct qm_fd *fd;
152 struct dpaa_sec_job *job;
153 struct dpaa_sec_op_ctx *ctx;
155 if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
156 return qman_cb_dqrr_defer;
158 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
159 return qman_cb_dqrr_consume;
162 /* sg is embedded in an op ctx,
163 * sg[0] is for output
166 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
168 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
169 ctx->fd_status = fd->status;
170 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
171 struct qm_sg_entry *sg_out;
173 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
174 ctx->op->sym->m_src : ctx->op->sym->m_dst;
176 sg_out = &job->sg[0];
177 hw_sg_to_cpu(sg_out);
178 len = sg_out->length;
180 while (mbuf->next != NULL) {
181 len -= mbuf->data_len;
184 mbuf->data_len = len;
186 DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
187 dpaa_sec_op_ending(ctx);
189 return qman_cb_dqrr_consume;
192 /* caam result is put into this queue */
194 dpaa_sec_init_tx(struct qman_fq *fq)
197 struct qm_mcc_initfq opts;
200 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
201 QMAN_FQ_FLAG_DYNAMIC_FQID;
203 ret = qman_create_fq(0, flags, fq);
205 DPAA_SEC_ERR("qman_create_fq failed");
209 memset(&opts, 0, sizeof(opts));
210 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
211 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
213 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
215 fq->cb.dqrr = dqrr_out_fq_cb_rx;
216 fq->cb.ern = ern_sec_fq_handler;
218 ret = qman_init_fq(fq, 0, &opts);
220 DPAA_SEC_ERR("unable to init caam source fq!");
227 static inline int is_aead(dpaa_sec_session *ses)
229 return ((ses->cipher_alg == 0) &&
230 (ses->auth_alg == 0) &&
231 (ses->aead_alg != 0));
234 static inline int is_encode(dpaa_sec_session *ses)
236 return ses->dir == DIR_ENC;
239 static inline int is_decode(dpaa_sec_session *ses)
241 return ses->dir == DIR_DEC;
244 #ifdef RTE_LIB_SECURITY
246 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
248 struct alginfo authdata = {0}, cipherdata = {0};
249 struct sec_cdb *cdb = &ses->cdb;
250 struct alginfo *p_authdata = NULL;
251 int32_t shared_desc_len = 0;
252 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
258 cipherdata.key = (size_t)ses->cipher_key.data;
259 cipherdata.keylen = ses->cipher_key.length;
260 cipherdata.key_enc_flags = 0;
261 cipherdata.key_type = RTA_DATA_IMM;
262 cipherdata.algtype = ses->cipher_key.alg;
263 cipherdata.algmode = ses->cipher_key.algmode;
266 authdata.key = (size_t)ses->auth_key.data;
267 authdata.keylen = ses->auth_key.length;
268 authdata.key_enc_flags = 0;
269 authdata.key_type = RTA_DATA_IMM;
270 authdata.algtype = ses->auth_key.alg;
271 authdata.algmode = ses->auth_key.algmode;
273 p_authdata = &authdata;
276 if (ses->pdcp.sdap_enabled) {
277 int nb_keys_to_inline =
278 rta_inline_pdcp_sdap_query(authdata.algtype,
282 if (nb_keys_to_inline >= 1) {
283 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
284 (size_t)cipherdata.key);
285 cipherdata.key_type = RTA_DATA_PTR;
287 if (nb_keys_to_inline >= 2) {
288 authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
289 (size_t)authdata.key);
290 authdata.key_type = RTA_DATA_PTR;
293 if (rta_inline_pdcp_query(authdata.algtype,
296 ses->pdcp.hfn_ovd)) {
297 cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
298 (size_t)cipherdata.key);
299 cipherdata.key_type = RTA_DATA_PTR;
303 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
304 if (ses->dir == DIR_ENC)
305 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
306 cdb->sh_desc, 1, swap,
311 ses->pdcp.hfn_threshold,
312 &cipherdata, &authdata);
313 else if (ses->dir == DIR_DEC)
314 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
315 cdb->sh_desc, 1, swap,
320 ses->pdcp.hfn_threshold,
321 &cipherdata, &authdata);
322 } else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
323 shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
326 if (ses->dir == DIR_ENC) {
327 if (ses->pdcp.sdap_enabled)
329 cnstr_shdsc_pdcp_sdap_u_plane_encap(
330 cdb->sh_desc, 1, swap,
335 ses->pdcp.hfn_threshold,
336 &cipherdata, p_authdata);
339 cnstr_shdsc_pdcp_u_plane_encap(
340 cdb->sh_desc, 1, swap,
345 ses->pdcp.hfn_threshold,
346 &cipherdata, p_authdata);
347 } else if (ses->dir == DIR_DEC) {
348 if (ses->pdcp.sdap_enabled)
350 cnstr_shdsc_pdcp_sdap_u_plane_decap(
351 cdb->sh_desc, 1, swap,
356 ses->pdcp.hfn_threshold,
357 &cipherdata, p_authdata);
360 cnstr_shdsc_pdcp_u_plane_decap(
361 cdb->sh_desc, 1, swap,
366 ses->pdcp.hfn_threshold,
367 &cipherdata, p_authdata);
370 return shared_desc_len;
373 /* prepare ipsec proto command block of the session */
375 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
377 struct alginfo cipherdata = {0}, authdata = {0};
378 struct sec_cdb *cdb = &ses->cdb;
379 int32_t shared_desc_len = 0;
381 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
387 cipherdata.key = (size_t)ses->cipher_key.data;
388 cipherdata.keylen = ses->cipher_key.length;
389 cipherdata.key_enc_flags = 0;
390 cipherdata.key_type = RTA_DATA_IMM;
391 cipherdata.algtype = ses->cipher_key.alg;
392 cipherdata.algmode = ses->cipher_key.algmode;
394 if (ses->auth_key.length) {
395 authdata.key = (size_t)ses->auth_key.data;
396 authdata.keylen = ses->auth_key.length;
397 authdata.key_enc_flags = 0;
398 authdata.key_type = RTA_DATA_IMM;
399 authdata.algtype = ses->auth_key.alg;
400 authdata.algmode = ses->auth_key.algmode;
403 cdb->sh_desc[0] = cipherdata.keylen;
404 cdb->sh_desc[1] = authdata.keylen;
405 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
407 (unsigned int *)cdb->sh_desc,
408 &cdb->sh_desc[2], 2);
411 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
414 if (cdb->sh_desc[2] & 1)
415 cipherdata.key_type = RTA_DATA_IMM;
417 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
418 (void *)(size_t)cipherdata.key);
419 cipherdata.key_type = RTA_DATA_PTR;
421 if (cdb->sh_desc[2] & (1<<1))
422 authdata.key_type = RTA_DATA_IMM;
424 authdata.key = (size_t)rte_dpaa_mem_vtop(
425 (void *)(size_t)authdata.key);
426 authdata.key_type = RTA_DATA_PTR;
432 if (ses->dir == DIR_ENC) {
433 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
435 true, swap, SHR_SERIAL,
437 (uint8_t *)&ses->ip4_hdr,
438 &cipherdata, &authdata);
439 } else if (ses->dir == DIR_DEC) {
440 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
442 true, swap, SHR_SERIAL,
444 &cipherdata, &authdata);
446 return shared_desc_len;
449 /* prepare command block of the session */
451 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
453 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
454 int32_t shared_desc_len = 0;
455 struct sec_cdb *cdb = &ses->cdb;
457 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
463 memset(cdb, 0, sizeof(struct sec_cdb));
466 #ifdef RTE_LIB_SECURITY
468 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
471 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
474 case DPAA_SEC_CIPHER:
475 alginfo_c.key = (size_t)ses->cipher_key.data;
476 alginfo_c.keylen = ses->cipher_key.length;
477 alginfo_c.key_enc_flags = 0;
478 alginfo_c.key_type = RTA_DATA_IMM;
479 alginfo_c.algtype = ses->cipher_key.alg;
480 alginfo_c.algmode = ses->cipher_key.algmode;
482 switch (ses->cipher_alg) {
483 case RTE_CRYPTO_CIPHER_AES_CBC:
484 case RTE_CRYPTO_CIPHER_3DES_CBC:
485 case RTE_CRYPTO_CIPHER_DES_CBC:
486 case RTE_CRYPTO_CIPHER_AES_CTR:
487 case RTE_CRYPTO_CIPHER_3DES_CTR:
488 shared_desc_len = cnstr_shdsc_blkcipher(
490 swap, SHR_NEVER, &alginfo_c,
494 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
495 shared_desc_len = cnstr_shdsc_snow_f8(
496 cdb->sh_desc, true, swap,
500 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
501 shared_desc_len = cnstr_shdsc_zuce(
502 cdb->sh_desc, true, swap,
507 DPAA_SEC_ERR("unsupported cipher alg %d",
513 alginfo_a.key = (size_t)ses->auth_key.data;
514 alginfo_a.keylen = ses->auth_key.length;
515 alginfo_a.key_enc_flags = 0;
516 alginfo_a.key_type = RTA_DATA_IMM;
517 alginfo_a.algtype = ses->auth_key.alg;
518 alginfo_a.algmode = ses->auth_key.algmode;
519 switch (ses->auth_alg) {
520 case RTE_CRYPTO_AUTH_MD5:
521 case RTE_CRYPTO_AUTH_SHA1:
522 case RTE_CRYPTO_AUTH_SHA224:
523 case RTE_CRYPTO_AUTH_SHA256:
524 case RTE_CRYPTO_AUTH_SHA384:
525 case RTE_CRYPTO_AUTH_SHA512:
526 shared_desc_len = cnstr_shdsc_hash(
528 swap, SHR_NEVER, &alginfo_a,
532 case RTE_CRYPTO_AUTH_MD5_HMAC:
533 case RTE_CRYPTO_AUTH_SHA1_HMAC:
534 case RTE_CRYPTO_AUTH_SHA224_HMAC:
535 case RTE_CRYPTO_AUTH_SHA256_HMAC:
536 case RTE_CRYPTO_AUTH_SHA384_HMAC:
537 case RTE_CRYPTO_AUTH_SHA512_HMAC:
538 shared_desc_len = cnstr_shdsc_hmac(
540 swap, SHR_NEVER, &alginfo_a,
544 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
545 shared_desc_len = cnstr_shdsc_snow_f9(
546 cdb->sh_desc, true, swap,
551 case RTE_CRYPTO_AUTH_ZUC_EIA3:
552 shared_desc_len = cnstr_shdsc_zuca(
553 cdb->sh_desc, true, swap,
558 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
559 case RTE_CRYPTO_AUTH_AES_CMAC:
560 shared_desc_len = cnstr_shdsc_aes_mac(
562 true, swap, SHR_NEVER,
568 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
572 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
573 DPAA_SEC_ERR("not supported aead alg");
576 alginfo.key = (size_t)ses->aead_key.data;
577 alginfo.keylen = ses->aead_key.length;
578 alginfo.key_enc_flags = 0;
579 alginfo.key_type = RTA_DATA_IMM;
580 alginfo.algtype = ses->aead_key.alg;
581 alginfo.algmode = ses->aead_key.algmode;
583 if (ses->dir == DIR_ENC)
584 shared_desc_len = cnstr_shdsc_gcm_encap(
585 cdb->sh_desc, true, swap, SHR_NEVER,
590 shared_desc_len = cnstr_shdsc_gcm_decap(
591 cdb->sh_desc, true, swap, SHR_NEVER,
596 case DPAA_SEC_CIPHER_HASH:
597 alginfo_c.key = (size_t)ses->cipher_key.data;
598 alginfo_c.keylen = ses->cipher_key.length;
599 alginfo_c.key_enc_flags = 0;
600 alginfo_c.key_type = RTA_DATA_IMM;
601 alginfo_c.algtype = ses->cipher_key.alg;
602 alginfo_c.algmode = ses->cipher_key.algmode;
604 alginfo_a.key = (size_t)ses->auth_key.data;
605 alginfo_a.keylen = ses->auth_key.length;
606 alginfo_a.key_enc_flags = 0;
607 alginfo_a.key_type = RTA_DATA_IMM;
608 alginfo_a.algtype = ses->auth_key.alg;
609 alginfo_a.algmode = ses->auth_key.algmode;
611 cdb->sh_desc[0] = alginfo_c.keylen;
612 cdb->sh_desc[1] = alginfo_a.keylen;
613 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
615 (unsigned int *)cdb->sh_desc,
616 &cdb->sh_desc[2], 2);
619 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
622 if (cdb->sh_desc[2] & 1)
623 alginfo_c.key_type = RTA_DATA_IMM;
625 alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
626 (void *)(size_t)alginfo_c.key);
627 alginfo_c.key_type = RTA_DATA_PTR;
629 if (cdb->sh_desc[2] & (1<<1))
630 alginfo_a.key_type = RTA_DATA_IMM;
632 alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
633 (void *)(size_t)alginfo_a.key);
634 alginfo_a.key_type = RTA_DATA_PTR;
639 /* Auth_only_len is set as 0 here and it will be
640 * overwritten in fd for each packet.
642 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
643 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
645 ses->digest_length, ses->dir);
647 case DPAA_SEC_HASH_CIPHER:
649 DPAA_SEC_ERR("error: Unsupported session");
653 if (shared_desc_len < 0) {
654 DPAA_SEC_ERR("error in preparing command block");
655 return shared_desc_len;
658 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
659 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
660 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
666 dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
668 struct dpaa_sec_job *job = &ctx->job;
669 struct rte_crypto_op *op = ctx->op;
670 dpaa_sec_session *sess = NULL;
671 struct sec_cdb c_cdb, *cdb;
673 struct rte_crypto_sym_op *sym_op;
674 struct qm_sg_entry sg[2];
676 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
677 sess = (dpaa_sec_session *)
678 get_sym_session_private_data(
680 dpaa_cryptodev_driver_id);
681 #ifdef RTE_LIBRTE_SECURITY
682 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
683 sess = (dpaa_sec_session *)
684 get_sec_session_private_data(
685 op->sym->sec_session);
688 printf("session is NULL\n");
693 rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
694 #ifdef RTE_LIBRTE_SECURITY
695 printf("\nsession protocol type = %d\n", sess->proto_alg);
697 printf("\n****************************************\n"
698 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
699 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
700 "\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
701 "\tCipher algmode:\t%d\n", sess->ctxt,
702 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
703 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
704 (uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
705 sess->cipher_key.algmode);
706 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
707 sess->cipher_key.length);
708 rte_hexdump(stdout, "auth key", sess->auth_key.data,
709 sess->auth_key.length);
710 printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
711 "\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
712 "\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
713 "\taead cipher text:\t%d\n",
714 (uint64_t)sess->auth_key.length, sess->auth_key.alg,
715 sess->auth_key.algmode,
716 sess->iv.length, sess->iv.offset,
717 sess->digest_length, sess->auth_only_len,
718 sess->auth_cipher_text);
719 #ifdef RTE_LIBRTE_SECURITY
720 printf("PDCP session params:\n"
721 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
722 "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
723 "\t%d\n\thfn:\t\t%d\n"
724 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
725 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
726 sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
727 sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
728 sess->pdcp.hfn_threshold);
730 c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
731 c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
732 bufsize = c_cdb.sh_hdr.hi.field.idlen;
734 printf("cdb = %p\n\n", cdb);
735 printf("Descriptor size = %d\n", bufsize);
737 for (m = 0; m < bufsize; m++)
738 printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
744 printf("Source mbuf:\n");
745 rte_pktmbuf_dump(stdout, sym_op->m_src,
746 sym_op->m_src->data_len);
749 printf("Destination mbuf:\n");
750 rte_pktmbuf_dump(stdout, sym_op->m_dst,
751 sym_op->m_dst->data_len);
754 printf("Session address = %p\ncipher offset: %d, length: %d\n"
755 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n",
756 sym_op->session, sym_op->cipher.data.offset,
757 sym_op->cipher.data.length,
758 sym_op->auth.data.offset, sym_op->auth.data.length,
759 sym_op->aead.data.offset, sym_op->aead.data.length);
762 printf("******************************************************\n");
763 printf("ctx info:\n");
764 printf("job->sg[0] output info:\n");
765 memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
766 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
767 "\n\tbpid = %d\n\toffset = %d\n",
768 (uint64_t)sg[0].addr, sg[0].length, sg[0].final,
769 sg[0].extension, sg[0].bpid, sg[0].offset);
770 printf("\njob->sg[1] input info:\n");
771 memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
772 hw_sg_to_cpu(&sg[1]);
773 printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
774 "\n\tbpid = %d\n\toffset = %d\n",
775 (uint64_t)sg[1].addr, sg[1].length, sg[1].final,
776 sg[1].extension, sg[1].bpid, sg[1].offset);
778 printf("\nctx pool addr = %p\n", ctx->ctx_pool);
780 printf("ctx pool available counts = %d\n",
781 rte_mempool_avail_count(ctx->ctx_pool));
783 printf("\nop pool addr = %p\n", op->mempool);
785 printf("op pool available counts = %d\n",
786 rte_mempool_avail_count(op->mempool));
788 printf("********************************************************\n");
789 printf("Queue data:\n");
790 printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
791 "\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
792 "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
793 qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
794 qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
795 qp->rx_errs, qp->tx_errs);
798 /* qp is lockless, should be accessed by only one thread */
800 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
803 unsigned int pkts = 0;
804 int num_rx_bufs, ret;
805 struct qm_dqrr_entry *dq;
806 uint32_t vdqcr_flags = 0;
810 * Until request for four buffers, we provide exact number of buffers.
811 * Otherwise we do not set the QM_VDQCR_EXACT flag.
812 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
813 * requested, so we request two less in this case.
816 vdqcr_flags = QM_VDQCR_EXACT;
817 num_rx_bufs = nb_ops;
819 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
820 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
822 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
827 const struct qm_fd *fd;
828 struct dpaa_sec_job *job;
829 struct dpaa_sec_op_ctx *ctx;
830 struct rte_crypto_op *op;
832 dq = qman_dequeue(fq);
837 /* sg is embedded in an op ctx,
838 * sg[0] is for output
841 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
843 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
844 ctx->fd_status = fd->status;
846 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
847 struct qm_sg_entry *sg_out;
849 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
850 op->sym->m_src : op->sym->m_dst;
852 sg_out = &job->sg[0];
853 hw_sg_to_cpu(sg_out);
854 len = sg_out->length;
856 while (mbuf->next != NULL) {
857 len -= mbuf->data_len;
860 mbuf->data_len = len;
862 if (!ctx->fd_status) {
863 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
865 if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
866 DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
868 if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
869 dpaa_sec_dump(ctx, qp);
871 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
875 /* report op status to sym->op and then free the ctx memory */
876 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
878 qman_dqrr_consume(fq, dq);
879 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
884 static inline struct dpaa_sec_job *
885 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
887 struct rte_crypto_sym_op *sym = op->sym;
888 struct rte_mbuf *mbuf = sym->m_src;
889 struct dpaa_sec_job *cf;
890 struct dpaa_sec_op_ctx *ctx;
891 struct qm_sg_entry *sg, *out_sg, *in_sg;
892 phys_addr_t start_addr;
893 uint8_t *old_digest, extra_segs;
894 int data_len, data_offset;
896 data_len = sym->auth.data.length;
897 data_offset = sym->auth.data.offset;
899 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
900 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
901 if ((data_len & 7) || (data_offset & 7)) {
902 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
906 data_len = data_len >> 3;
907 data_offset = data_offset >> 3;
915 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
916 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
920 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
926 old_digest = ctx->digest;
930 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
931 out_sg->length = ses->digest_length;
932 cpu_to_hw_sg(out_sg);
936 /* need to extend the input to a compound frame */
937 in_sg->extension = 1;
939 in_sg->length = data_len;
940 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
945 if (ses->iv.length) {
948 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
951 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
952 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
954 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
955 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
958 sg->length = ses->iv.length;
960 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
961 in_sg->length += sg->length;
966 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
967 sg->offset = data_offset;
969 if (data_len <= (mbuf->data_len - data_offset)) {
970 sg->length = data_len;
972 sg->length = mbuf->data_len - data_offset;
974 /* remaining i/p segs */
975 while ((data_len = data_len - sg->length) &&
976 (mbuf = mbuf->next)) {
979 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
980 if (data_len > mbuf->data_len)
981 sg->length = mbuf->data_len;
983 sg->length = data_len;
987 if (is_decode(ses)) {
988 /* Digest verification case */
991 rte_memcpy(old_digest, sym->auth.digest.data,
993 start_addr = rte_dpaa_mem_vtop(old_digest);
994 qm_sg_entry_set64(sg, start_addr);
995 sg->length = ses->digest_length;
996 in_sg->length += ses->digest_length;
1000 cpu_to_hw_sg(in_sg);
1006 * packet looks like:
1007 * |<----data_len------->|
1008 * |ip_header|ah_header|icv|payload|
1013 static inline struct dpaa_sec_job *
1014 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1016 struct rte_crypto_sym_op *sym = op->sym;
1017 struct rte_mbuf *mbuf = sym->m_src;
1018 struct dpaa_sec_job *cf;
1019 struct dpaa_sec_op_ctx *ctx;
1020 struct qm_sg_entry *sg, *in_sg;
1021 rte_iova_t start_addr;
1022 uint8_t *old_digest;
1023 int data_len, data_offset;
1025 data_len = sym->auth.data.length;
1026 data_offset = sym->auth.data.offset;
1028 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1029 ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1030 if ((data_len & 7) || (data_offset & 7)) {
1031 DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
1035 data_len = data_len >> 3;
1036 data_offset = data_offset >> 3;
1039 ctx = dpaa_sec_alloc_ctx(ses, 4);
1045 old_digest = ctx->digest;
1047 start_addr = rte_pktmbuf_iova(mbuf);
1050 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1051 sg->length = ses->digest_length;
1056 /* need to extend the input to a compound frame */
1057 in_sg->extension = 1;
1059 in_sg->length = data_len;
1060 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1063 if (ses->iv.length) {
1066 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1069 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1070 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1072 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1073 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1076 sg->length = ses->iv.length;
1078 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
1079 in_sg->length += sg->length;
1084 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1085 sg->offset = data_offset;
1086 sg->length = data_len;
1088 if (is_decode(ses)) {
1089 /* Digest verification case */
1091 /* hash result or digest, save digest first */
1092 rte_memcpy(old_digest, sym->auth.digest.data,
1093 ses->digest_length);
1094 /* let's check digest by hw */
1095 start_addr = rte_dpaa_mem_vtop(old_digest);
1097 qm_sg_entry_set64(sg, start_addr);
1098 sg->length = ses->digest_length;
1099 in_sg->length += ses->digest_length;
1103 cpu_to_hw_sg(in_sg);
1108 static inline struct dpaa_sec_job *
1109 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1111 struct rte_crypto_sym_op *sym = op->sym;
1112 struct dpaa_sec_job *cf;
1113 struct dpaa_sec_op_ctx *ctx;
1114 struct qm_sg_entry *sg, *out_sg, *in_sg;
1115 struct rte_mbuf *mbuf;
1117 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1119 int data_len, data_offset;
1121 data_len = sym->cipher.data.length;
1122 data_offset = sym->cipher.data.offset;
1124 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1125 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1126 if ((data_len & 7) || (data_offset & 7)) {
1127 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1131 data_len = data_len >> 3;
1132 data_offset = data_offset >> 3;
1137 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1140 req_segs = mbuf->nb_segs * 2 + 3;
1142 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1143 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1148 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1156 out_sg = &cf->sg[0];
1157 out_sg->extension = 1;
1158 out_sg->length = data_len;
1159 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1160 cpu_to_hw_sg(out_sg);
1164 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1165 sg->length = mbuf->data_len - data_offset;
1166 sg->offset = data_offset;
1168 /* Successive segs */
1173 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1174 sg->length = mbuf->data_len;
1183 in_sg->extension = 1;
1185 in_sg->length = data_len + ses->iv.length;
1188 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1189 cpu_to_hw_sg(in_sg);
1192 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1193 sg->length = ses->iv.length;
1198 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1199 sg->length = mbuf->data_len - data_offset;
1200 sg->offset = data_offset;
1202 /* Successive segs */
1207 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1208 sg->length = mbuf->data_len;
1217 static inline struct dpaa_sec_job *
1218 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1220 struct rte_crypto_sym_op *sym = op->sym;
1221 struct dpaa_sec_job *cf;
1222 struct dpaa_sec_op_ctx *ctx;
1223 struct qm_sg_entry *sg;
1224 rte_iova_t src_start_addr, dst_start_addr;
1225 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1227 int data_len, data_offset;
1229 data_len = sym->cipher.data.length;
1230 data_offset = sym->cipher.data.offset;
1232 if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1233 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1234 if ((data_len & 7) || (data_offset & 7)) {
1235 DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1239 data_len = data_len >> 3;
1240 data_offset = data_offset >> 3;
1243 ctx = dpaa_sec_alloc_ctx(ses, 4);
1250 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1253 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1255 dst_start_addr = src_start_addr;
1259 qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1260 sg->length = data_len + ses->iv.length;
1266 /* need to extend the input to a compound frame */
1269 sg->length = data_len + ses->iv.length;
1270 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1274 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1275 sg->length = ses->iv.length;
1279 qm_sg_entry_set64(sg, src_start_addr + data_offset);
1280 sg->length = data_len;
1287 static inline struct dpaa_sec_job *
1288 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1290 struct rte_crypto_sym_op *sym = op->sym;
1291 struct dpaa_sec_job *cf;
1292 struct dpaa_sec_op_ctx *ctx;
1293 struct qm_sg_entry *sg, *out_sg, *in_sg;
1294 struct rte_mbuf *mbuf;
1296 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1301 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1304 req_segs = mbuf->nb_segs * 2 + 4;
1307 if (ses->auth_only_len)
1310 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1311 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1316 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1323 rte_prefetch0(cf->sg);
1326 out_sg = &cf->sg[0];
1327 out_sg->extension = 1;
1329 out_sg->length = sym->aead.data.length + ses->digest_length;
1331 out_sg->length = sym->aead.data.length;
1333 /* output sg entries */
1335 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1336 cpu_to_hw_sg(out_sg);
1339 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1340 sg->length = mbuf->data_len - sym->aead.data.offset;
1341 sg->offset = sym->aead.data.offset;
1343 /* Successive segs */
1348 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1349 sg->length = mbuf->data_len;
1352 sg->length -= ses->digest_length;
1354 if (is_encode(ses)) {
1356 /* set auth output */
1358 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1359 sg->length = ses->digest_length;
1367 in_sg->extension = 1;
1370 in_sg->length = ses->iv.length + sym->aead.data.length
1371 + ses->auth_only_len;
1373 in_sg->length = ses->iv.length + sym->aead.data.length
1374 + ses->auth_only_len + ses->digest_length;
1376 /* input sg entries */
1378 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1379 cpu_to_hw_sg(in_sg);
1382 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1383 sg->length = ses->iv.length;
1386 /* 2nd seg auth only */
1387 if (ses->auth_only_len) {
1389 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1390 sg->length = ses->auth_only_len;
1396 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1397 sg->length = mbuf->data_len - sym->aead.data.offset;
1398 sg->offset = sym->aead.data.offset;
1400 /* Successive segs */
1405 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1406 sg->length = mbuf->data_len;
1410 if (is_decode(ses)) {
1413 memcpy(ctx->digest, sym->aead.digest.data,
1414 ses->digest_length);
1415 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1416 sg->length = ses->digest_length;
1424 static inline struct dpaa_sec_job *
1425 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1427 struct rte_crypto_sym_op *sym = op->sym;
1428 struct dpaa_sec_job *cf;
1429 struct dpaa_sec_op_ctx *ctx;
1430 struct qm_sg_entry *sg;
1431 uint32_t length = 0;
1432 rte_iova_t src_start_addr, dst_start_addr;
1433 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1436 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1439 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1441 dst_start_addr = src_start_addr;
1443 ctx = dpaa_sec_alloc_ctx(ses, 7);
1451 rte_prefetch0(cf->sg);
1453 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1454 if (is_encode(ses)) {
1455 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1456 sg->length = ses->iv.length;
1457 length += sg->length;
1461 if (ses->auth_only_len) {
1462 qm_sg_entry_set64(sg,
1463 rte_dpaa_mem_vtop(sym->aead.aad.data));
1464 sg->length = ses->auth_only_len;
1465 length += sg->length;
1469 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1470 sg->length = sym->aead.data.length;
1471 length += sg->length;
1475 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1476 sg->length = ses->iv.length;
1477 length += sg->length;
1481 if (ses->auth_only_len) {
1482 qm_sg_entry_set64(sg,
1483 rte_dpaa_mem_vtop(sym->aead.aad.data));
1484 sg->length = ses->auth_only_len;
1485 length += sg->length;
1489 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1490 sg->length = sym->aead.data.length;
1491 length += sg->length;
1494 memcpy(ctx->digest, sym->aead.digest.data,
1495 ses->digest_length);
1498 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1499 sg->length = ses->digest_length;
1500 length += sg->length;
1504 /* input compound frame */
1505 cf->sg[1].length = length;
1506 cf->sg[1].extension = 1;
1507 cf->sg[1].final = 1;
1508 cpu_to_hw_sg(&cf->sg[1]);
1512 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1513 qm_sg_entry_set64(sg,
1514 dst_start_addr + sym->aead.data.offset);
1515 sg->length = sym->aead.data.length;
1516 length = sg->length;
1517 if (is_encode(ses)) {
1519 /* set auth output */
1521 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1522 sg->length = ses->digest_length;
1523 length += sg->length;
1528 /* output compound frame */
1529 cf->sg[0].length = length;
1530 cf->sg[0].extension = 1;
1531 cpu_to_hw_sg(&cf->sg[0]);
1536 static inline struct dpaa_sec_job *
1537 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1539 struct rte_crypto_sym_op *sym = op->sym;
1540 struct dpaa_sec_job *cf;
1541 struct dpaa_sec_op_ctx *ctx;
1542 struct qm_sg_entry *sg, *out_sg, *in_sg;
1543 struct rte_mbuf *mbuf;
1545 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1550 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1553 req_segs = mbuf->nb_segs * 2 + 4;
1556 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1557 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1562 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1569 rte_prefetch0(cf->sg);
1572 out_sg = &cf->sg[0];
1573 out_sg->extension = 1;
1575 out_sg->length = sym->auth.data.length + ses->digest_length;
1577 out_sg->length = sym->auth.data.length;
1579 /* output sg entries */
1581 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1582 cpu_to_hw_sg(out_sg);
1585 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1586 sg->length = mbuf->data_len - sym->auth.data.offset;
1587 sg->offset = sym->auth.data.offset;
1589 /* Successive segs */
1594 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1595 sg->length = mbuf->data_len;
1598 sg->length -= ses->digest_length;
1600 if (is_encode(ses)) {
1602 /* set auth output */
1604 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1605 sg->length = ses->digest_length;
1613 in_sg->extension = 1;
1616 in_sg->length = ses->iv.length + sym->auth.data.length;
1618 in_sg->length = ses->iv.length + sym->auth.data.length
1619 + ses->digest_length;
1621 /* input sg entries */
1623 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1624 cpu_to_hw_sg(in_sg);
1627 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1628 sg->length = ses->iv.length;
1633 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1634 sg->length = mbuf->data_len - sym->auth.data.offset;
1635 sg->offset = sym->auth.data.offset;
1637 /* Successive segs */
1642 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1643 sg->length = mbuf->data_len;
1647 sg->length -= ses->digest_length;
1648 if (is_decode(ses)) {
1651 memcpy(ctx->digest, sym->auth.digest.data,
1652 ses->digest_length);
1653 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1654 sg->length = ses->digest_length;
1662 static inline struct dpaa_sec_job *
1663 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1665 struct rte_crypto_sym_op *sym = op->sym;
1666 struct dpaa_sec_job *cf;
1667 struct dpaa_sec_op_ctx *ctx;
1668 struct qm_sg_entry *sg;
1669 rte_iova_t src_start_addr, dst_start_addr;
1670 uint32_t length = 0;
1671 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1674 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1676 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1678 dst_start_addr = src_start_addr;
1680 ctx = dpaa_sec_alloc_ctx(ses, 7);
1688 rte_prefetch0(cf->sg);
1690 qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1691 if (is_encode(ses)) {
1692 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1693 sg->length = ses->iv.length;
1694 length += sg->length;
1698 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1699 sg->length = sym->auth.data.length;
1700 length += sg->length;
1704 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1705 sg->length = ses->iv.length;
1706 length += sg->length;
1711 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1712 sg->length = sym->auth.data.length;
1713 length += sg->length;
1716 memcpy(ctx->digest, sym->auth.digest.data,
1717 ses->digest_length);
1720 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1721 sg->length = ses->digest_length;
1722 length += sg->length;
1726 /* input compound frame */
1727 cf->sg[1].length = length;
1728 cf->sg[1].extension = 1;
1729 cf->sg[1].final = 1;
1730 cpu_to_hw_sg(&cf->sg[1]);
1734 qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1735 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1736 sg->length = sym->cipher.data.length;
1737 length = sg->length;
1738 if (is_encode(ses)) {
1740 /* set auth output */
1742 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1743 sg->length = ses->digest_length;
1744 length += sg->length;
1749 /* output compound frame */
1750 cf->sg[0].length = length;
1751 cf->sg[0].extension = 1;
1752 cpu_to_hw_sg(&cf->sg[0]);
1757 #ifdef RTE_LIB_SECURITY
1758 static inline struct dpaa_sec_job *
1759 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1761 struct rte_crypto_sym_op *sym = op->sym;
1762 struct dpaa_sec_job *cf;
1763 struct dpaa_sec_op_ctx *ctx;
1764 struct qm_sg_entry *sg;
1765 phys_addr_t src_start_addr, dst_start_addr;
1767 ctx = dpaa_sec_alloc_ctx(ses, 2);
1773 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1776 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1778 dst_start_addr = src_start_addr;
1782 qm_sg_entry_set64(sg, src_start_addr);
1783 sg->length = sym->m_src->pkt_len;
1787 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1790 qm_sg_entry_set64(sg, dst_start_addr);
1791 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1797 static inline struct dpaa_sec_job *
1798 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1800 struct rte_crypto_sym_op *sym = op->sym;
1801 struct dpaa_sec_job *cf;
1802 struct dpaa_sec_op_ctx *ctx;
1803 struct qm_sg_entry *sg, *out_sg, *in_sg;
1804 struct rte_mbuf *mbuf;
1806 uint32_t in_len = 0, out_len = 0;
1813 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1814 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1815 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1820 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1826 out_sg = &cf->sg[0];
1827 out_sg->extension = 1;
1828 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1832 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1835 /* Successive segs */
1836 while (mbuf->next) {
1837 sg->length = mbuf->data_len;
1838 out_len += sg->length;
1842 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1845 sg->length = mbuf->buf_len - mbuf->data_off;
1846 out_len += sg->length;
1850 out_sg->length = out_len;
1851 cpu_to_hw_sg(out_sg);
1856 in_sg->extension = 1;
1858 in_len = mbuf->data_len;
1861 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1864 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1865 sg->length = mbuf->data_len;
1868 /* Successive segs */
1873 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1874 sg->length = mbuf->data_len;
1876 in_len += sg->length;
1882 in_sg->length = in_len;
1883 cpu_to_hw_sg(in_sg);
1885 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1892 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1895 /* Function to transmit the frames to given device and queuepair */
1897 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1898 uint16_t num_tx = 0;
1899 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1900 uint32_t frames_to_send;
1901 struct rte_crypto_op *op;
1902 struct dpaa_sec_job *cf;
1903 dpaa_sec_session *ses;
1904 uint16_t auth_hdr_len, auth_tail_len;
1905 uint32_t index, flags[DPAA_SEC_BURST] = {0};
1906 struct qman_fq *inq[DPAA_SEC_BURST];
1908 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1909 if (rte_dpaa_portal_init((void *)0)) {
1910 DPAA_SEC_ERR("Failure in affining portal");
1916 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1917 DPAA_SEC_BURST : nb_ops;
1918 for (loop = 0; loop < frames_to_send; loop++) {
1920 if (*dpaa_seqn(op->sym->m_src) != 0) {
1921 index = *dpaa_seqn(op->sym->m_src) - 1;
1922 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1923 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1924 flags[loop] = ((index & 0x0f) << 8);
1925 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1926 DPAA_PER_LCORE_DQRR_SIZE--;
1927 DPAA_PER_LCORE_DQRR_HELD &=
1932 switch (op->sess_type) {
1933 case RTE_CRYPTO_OP_WITH_SESSION:
1934 ses = (dpaa_sec_session *)
1935 get_sym_session_private_data(
1937 dpaa_cryptodev_driver_id);
1939 #ifdef RTE_LIB_SECURITY
1940 case RTE_CRYPTO_OP_SECURITY_SESSION:
1941 ses = (dpaa_sec_session *)
1942 get_sec_session_private_data(
1943 op->sym->sec_session);
1948 "sessionless crypto op not supported");
1949 frames_to_send = loop;
1955 DPAA_SEC_DP_ERR("session not available");
1956 frames_to_send = loop;
1961 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1962 if (dpaa_sec_attach_sess_q(qp, ses)) {
1963 frames_to_send = loop;
1967 } else if (unlikely(ses->qp[rte_lcore_id() %
1968 MAX_DPAA_CORES] != qp)) {
1969 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1971 ses->qp[rte_lcore_id() %
1972 MAX_DPAA_CORES], qp);
1973 frames_to_send = loop;
1978 auth_hdr_len = op->sym->auth.data.length -
1979 op->sym->cipher.data.length;
1982 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1983 ((op->sym->m_dst == NULL) ||
1984 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1985 switch (ses->ctxt) {
1986 #ifdef RTE_LIB_SECURITY
1988 case DPAA_SEC_IPSEC:
1989 cf = build_proto(op, ses);
1993 cf = build_auth_only(op, ses);
1995 case DPAA_SEC_CIPHER:
1996 cf = build_cipher_only(op, ses);
1999 cf = build_cipher_auth_gcm(op, ses);
2000 auth_hdr_len = ses->auth_only_len;
2002 case DPAA_SEC_CIPHER_HASH:
2004 op->sym->cipher.data.offset
2005 - op->sym->auth.data.offset;
2007 op->sym->auth.data.length
2008 - op->sym->cipher.data.length
2010 cf = build_cipher_auth(op, ses);
2013 DPAA_SEC_DP_ERR("not supported ops");
2014 frames_to_send = loop;
2019 switch (ses->ctxt) {
2020 #ifdef RTE_LIB_SECURITY
2022 case DPAA_SEC_IPSEC:
2023 cf = build_proto_sg(op, ses);
2027 cf = build_auth_only_sg(op, ses);
2029 case DPAA_SEC_CIPHER:
2030 cf = build_cipher_only_sg(op, ses);
2033 cf = build_cipher_auth_gcm_sg(op, ses);
2034 auth_hdr_len = ses->auth_only_len;
2036 case DPAA_SEC_CIPHER_HASH:
2038 op->sym->cipher.data.offset
2039 - op->sym->auth.data.offset;
2041 op->sym->auth.data.length
2042 - op->sym->cipher.data.length
2044 cf = build_cipher_auth_sg(op, ses);
2047 DPAA_SEC_DP_ERR("not supported ops");
2048 frames_to_send = loop;
2053 if (unlikely(!cf)) {
2054 frames_to_send = loop;
2060 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
2061 fd->opaque_addr = 0;
2063 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
2064 fd->_format1 = qm_fd_compound;
2065 fd->length29 = 2 * sizeof(struct qm_sg_entry);
2067 /* Auth_only_len is set as 0 in descriptor and it is
2068 * overwritten here in the fd.cmd which will update
2071 if (auth_hdr_len || auth_tail_len) {
2072 fd->cmd = 0x80000000;
2074 ((auth_tail_len << 16) | auth_hdr_len);
2077 #ifdef RTE_LIB_SECURITY
2078 /* In case of PDCP, per packet HFN is stored in
2079 * mbuf priv after sym_op.
2081 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
2082 fd->cmd = 0x80000000 |
2083 *((uint32_t *)((uint8_t *)op +
2084 ses->pdcp.hfn_ovd_offset));
2085 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
2086 *((uint32_t *)((uint8_t *)op +
2087 ses->pdcp.hfn_ovd_offset)),
2094 while (loop < frames_to_send) {
2095 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
2096 &flags[loop], frames_to_send - loop);
2098 nb_ops -= frames_to_send;
2099 num_tx += frames_to_send;
2102 dpaa_qp->tx_pkts += num_tx;
2103 dpaa_qp->tx_errs += nb_ops - num_tx;
2109 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
2113 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
2115 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2116 if (rte_dpaa_portal_init((void *)0)) {
2117 DPAA_SEC_ERR("Failure in affining portal");
2122 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
2124 dpaa_qp->rx_pkts += num_rx;
2125 dpaa_qp->rx_errs += nb_ops - num_rx;
2127 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
2132 /** Release queue pair */
2134 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
2137 struct dpaa_sec_dev_private *internals;
2138 struct dpaa_sec_qp *qp = NULL;
2140 PMD_INIT_FUNC_TRACE();
2142 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
2144 internals = dev->data->dev_private;
2145 if (qp_id >= internals->max_nb_queue_pairs) {
2146 DPAA_SEC_ERR("Max supported qpid %d",
2147 internals->max_nb_queue_pairs);
2151 qp = &internals->qps[qp_id];
2152 rte_mempool_free(qp->ctx_pool);
2153 qp->internals = NULL;
2154 dev->data->queue_pairs[qp_id] = NULL;
2159 /** Setup a queue pair */
2161 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2162 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2163 __rte_unused int socket_id)
2165 struct dpaa_sec_dev_private *internals;
2166 struct dpaa_sec_qp *qp = NULL;
2169 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
2171 internals = dev->data->dev_private;
2172 if (qp_id >= internals->max_nb_queue_pairs) {
2173 DPAA_SEC_ERR("Max supported qpid %d",
2174 internals->max_nb_queue_pairs);
2178 qp = &internals->qps[qp_id];
2179 qp->internals = internals;
2180 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
2181 dev->data->dev_id, qp_id);
2182 if (!qp->ctx_pool) {
2183 qp->ctx_pool = rte_mempool_create((const char *)str,
2186 CTX_POOL_CACHE_SIZE, 0,
2187 NULL, NULL, NULL, NULL,
2189 if (!qp->ctx_pool) {
2190 DPAA_SEC_ERR("%s create failed\n", str);
2194 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2195 dev->data->dev_id, qp_id);
2196 dev->data->queue_pairs[qp_id] = qp;
2201 /** Returns the size of session structure */
2203 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2205 PMD_INIT_FUNC_TRACE();
2207 return sizeof(dpaa_sec_session);
2211 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2212 struct rte_crypto_sym_xform *xform,
2213 dpaa_sec_session *session)
2215 session->ctxt = DPAA_SEC_CIPHER;
2216 session->cipher_alg = xform->cipher.algo;
2217 session->iv.length = xform->cipher.iv.length;
2218 session->iv.offset = xform->cipher.iv.offset;
2219 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2220 RTE_CACHE_LINE_SIZE);
2221 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2222 DPAA_SEC_ERR("No Memory for cipher key");
2225 session->cipher_key.length = xform->cipher.key.length;
2227 memcpy(session->cipher_key.data, xform->cipher.key.data,
2228 xform->cipher.key.length);
2229 switch (xform->cipher.algo) {
2230 case RTE_CRYPTO_CIPHER_AES_CBC:
2231 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2232 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2234 case RTE_CRYPTO_CIPHER_DES_CBC:
2235 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2236 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2238 case RTE_CRYPTO_CIPHER_3DES_CBC:
2239 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2240 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2242 case RTE_CRYPTO_CIPHER_AES_CTR:
2243 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2244 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2246 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2247 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2249 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2250 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2253 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2254 xform->cipher.algo);
2257 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2264 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2265 struct rte_crypto_sym_xform *xform,
2266 dpaa_sec_session *session)
2268 session->ctxt = DPAA_SEC_AUTH;
2269 session->auth_alg = xform->auth.algo;
2270 session->auth_key.length = xform->auth.key.length;
2271 if (xform->auth.key.length) {
2272 session->auth_key.data =
2273 rte_zmalloc(NULL, xform->auth.key.length,
2274 RTE_CACHE_LINE_SIZE);
2275 if (session->auth_key.data == NULL) {
2276 DPAA_SEC_ERR("No Memory for auth key");
2279 memcpy(session->auth_key.data, xform->auth.key.data,
2280 xform->auth.key.length);
2283 session->digest_length = xform->auth.digest_length;
2284 if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2285 session->iv.offset = xform->auth.iv.offset;
2286 session->iv.length = xform->auth.iv.length;
2289 switch (xform->auth.algo) {
2290 case RTE_CRYPTO_AUTH_SHA1:
2291 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2292 session->auth_key.algmode = OP_ALG_AAI_HASH;
2294 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2295 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2296 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2298 case RTE_CRYPTO_AUTH_MD5:
2299 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2300 session->auth_key.algmode = OP_ALG_AAI_HASH;
2302 case RTE_CRYPTO_AUTH_MD5_HMAC:
2303 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2304 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2306 case RTE_CRYPTO_AUTH_SHA224:
2307 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2308 session->auth_key.algmode = OP_ALG_AAI_HASH;
2310 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2311 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2312 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2314 case RTE_CRYPTO_AUTH_SHA256:
2315 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2316 session->auth_key.algmode = OP_ALG_AAI_HASH;
2318 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2319 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2320 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2322 case RTE_CRYPTO_AUTH_SHA384:
2323 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2324 session->auth_key.algmode = OP_ALG_AAI_HASH;
2326 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2327 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2328 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2330 case RTE_CRYPTO_AUTH_SHA512:
2331 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2332 session->auth_key.algmode = OP_ALG_AAI_HASH;
2334 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2335 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2336 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2338 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2339 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2340 session->auth_key.algmode = OP_ALG_AAI_F9;
2342 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2343 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2344 session->auth_key.algmode = OP_ALG_AAI_F9;
2346 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2347 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2348 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2350 case RTE_CRYPTO_AUTH_AES_CMAC:
2351 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2352 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2355 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2360 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2367 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2368 struct rte_crypto_sym_xform *xform,
2369 dpaa_sec_session *session)
2372 struct rte_crypto_cipher_xform *cipher_xform;
2373 struct rte_crypto_auth_xform *auth_xform;
2375 session->ctxt = DPAA_SEC_CIPHER_HASH;
2376 if (session->auth_cipher_text) {
2377 cipher_xform = &xform->cipher;
2378 auth_xform = &xform->next->auth;
2380 cipher_xform = &xform->next->cipher;
2381 auth_xform = &xform->auth;
2384 /* Set IV parameters */
2385 session->iv.offset = cipher_xform->iv.offset;
2386 session->iv.length = cipher_xform->iv.length;
2388 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2389 RTE_CACHE_LINE_SIZE);
2390 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2391 DPAA_SEC_ERR("No Memory for cipher key");
2394 session->cipher_key.length = cipher_xform->key.length;
2395 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2396 RTE_CACHE_LINE_SIZE);
2397 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2398 DPAA_SEC_ERR("No Memory for auth key");
2401 session->auth_key.length = auth_xform->key.length;
2402 memcpy(session->cipher_key.data, cipher_xform->key.data,
2403 cipher_xform->key.length);
2404 memcpy(session->auth_key.data, auth_xform->key.data,
2405 auth_xform->key.length);
2407 session->digest_length = auth_xform->digest_length;
2408 session->auth_alg = auth_xform->algo;
2410 switch (auth_xform->algo) {
2411 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2412 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2413 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2415 case RTE_CRYPTO_AUTH_MD5_HMAC:
2416 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2417 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2419 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2420 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2421 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2423 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2424 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2425 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2427 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2428 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2429 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2431 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2432 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2433 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2435 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2436 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2437 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2439 case RTE_CRYPTO_AUTH_AES_CMAC:
2440 session->auth_key.alg = OP_ALG_ALGSEL_AES;
2441 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2444 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2449 session->cipher_alg = cipher_xform->algo;
2451 switch (cipher_xform->algo) {
2452 case RTE_CRYPTO_CIPHER_AES_CBC:
2453 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2454 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2456 case RTE_CRYPTO_CIPHER_DES_CBC:
2457 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2458 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2460 case RTE_CRYPTO_CIPHER_3DES_CBC:
2461 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2462 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2464 case RTE_CRYPTO_CIPHER_AES_CTR:
2465 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2466 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2469 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2470 cipher_xform->algo);
2473 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2479 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2480 struct rte_crypto_sym_xform *xform,
2481 dpaa_sec_session *session)
2483 session->aead_alg = xform->aead.algo;
2484 session->ctxt = DPAA_SEC_AEAD;
2485 session->iv.length = xform->aead.iv.length;
2486 session->iv.offset = xform->aead.iv.offset;
2487 session->auth_only_len = xform->aead.aad_length;
2488 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2489 RTE_CACHE_LINE_SIZE);
2490 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2491 DPAA_SEC_ERR("No Memory for aead key\n");
2494 session->aead_key.length = xform->aead.key.length;
2495 session->digest_length = xform->aead.digest_length;
2497 memcpy(session->aead_key.data, xform->aead.key.data,
2498 xform->aead.key.length);
2500 switch (session->aead_alg) {
2501 case RTE_CRYPTO_AEAD_AES_GCM:
2502 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2503 session->aead_key.algmode = OP_ALG_AAI_GCM;
2506 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2510 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2516 static struct qman_fq *
2517 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2521 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2522 if (qi->inq_attach[i] == 0) {
2523 qi->inq_attach[i] = 1;
2527 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2533 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2537 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2538 if (&qi->inq[i] == fq) {
2539 if (qman_retire_fq(fq, NULL) != 0)
2540 DPAA_SEC_DEBUG("Queue is not retired\n");
2542 qi->inq_attach[i] = 0;
2550 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2554 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2555 ret = dpaa_sec_prep_cdb(sess);
2557 DPAA_SEC_ERR("Unable to prepare sec cdb");
2560 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2561 ret = rte_dpaa_portal_init((void *)0);
2563 DPAA_SEC_ERR("Failure in affining portal");
2567 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2568 rte_dpaa_mem_vtop(&sess->cdb),
2569 qman_fq_fqid(&qp->outq));
2571 DPAA_SEC_ERR("Unable to init sec queue");
2577 free_session_data(dpaa_sec_session *s)
2580 rte_free(s->aead_key.data);
2582 rte_free(s->auth_key.data);
2583 rte_free(s->cipher_key.data);
2585 memset(s, 0, sizeof(dpaa_sec_session));
2589 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2590 struct rte_crypto_sym_xform *xform, void *sess)
2592 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2593 dpaa_sec_session *session = sess;
2597 PMD_INIT_FUNC_TRACE();
2599 if (unlikely(sess == NULL)) {
2600 DPAA_SEC_ERR("invalid session struct");
2603 memset(session, 0, sizeof(dpaa_sec_session));
2605 /* Default IV length = 0 */
2606 session->iv.length = 0;
2609 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2610 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2611 ret = dpaa_sec_cipher_init(dev, xform, session);
2613 /* Authentication Only */
2614 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2615 xform->next == NULL) {
2616 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2617 session->ctxt = DPAA_SEC_AUTH;
2618 ret = dpaa_sec_auth_init(dev, xform, session);
2620 /* Cipher then Authenticate */
2621 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2622 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2623 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2624 session->auth_cipher_text = 1;
2625 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2626 ret = dpaa_sec_auth_init(dev, xform, session);
2627 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2628 ret = dpaa_sec_cipher_init(dev, xform, session);
2630 ret = dpaa_sec_chain_init(dev, xform, session);
2632 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2635 /* Authenticate then Cipher */
2636 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2637 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2638 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2639 session->auth_cipher_text = 0;
2640 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2641 ret = dpaa_sec_cipher_init(dev, xform, session);
2642 else if (xform->next->cipher.algo
2643 == RTE_CRYPTO_CIPHER_NULL)
2644 ret = dpaa_sec_auth_init(dev, xform, session);
2646 ret = dpaa_sec_chain_init(dev, xform, session);
2648 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2652 /* AEAD operation for AES-GCM kind of Algorithms */
2653 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2654 xform->next == NULL) {
2655 ret = dpaa_sec_aead_init(dev, xform, session);
2658 DPAA_SEC_ERR("Invalid crypto type");
2662 DPAA_SEC_ERR("unable to init session");
2666 rte_spinlock_lock(&internals->lock);
2667 for (i = 0; i < MAX_DPAA_CORES; i++) {
2668 session->inq[i] = dpaa_sec_attach_rxq(internals);
2669 if (session->inq[i] == NULL) {
2670 DPAA_SEC_ERR("unable to attach sec queue");
2671 rte_spinlock_unlock(&internals->lock);
2676 rte_spinlock_unlock(&internals->lock);
2681 free_session_data(session);
2686 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2687 struct rte_crypto_sym_xform *xform,
2688 struct rte_cryptodev_sym_session *sess,
2689 struct rte_mempool *mempool)
2691 void *sess_private_data;
2694 PMD_INIT_FUNC_TRACE();
2696 if (rte_mempool_get(mempool, &sess_private_data)) {
2697 DPAA_SEC_ERR("Couldn't get object from session mempool");
2701 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2703 DPAA_SEC_ERR("failed to configure session parameters");
2705 /* Return session to mempool */
2706 rte_mempool_put(mempool, sess_private_data);
2710 set_sym_session_private_data(sess, dev->driver_id,
2718 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2720 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2721 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2724 for (i = 0; i < MAX_DPAA_CORES; i++) {
2726 dpaa_sec_detach_rxq(qi, s->inq[i]);
2730 free_session_data(s);
2731 rte_mempool_put(sess_mp, (void *)s);
2734 /** Clear the memory of session so it doesn't leave key material behind */
2736 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2737 struct rte_cryptodev_sym_session *sess)
2739 PMD_INIT_FUNC_TRACE();
2740 uint8_t index = dev->driver_id;
2741 void *sess_priv = get_sym_session_private_data(sess, index);
2742 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2745 free_session_memory(dev, s);
2746 set_sym_session_private_data(sess, index, NULL);
2750 #ifdef RTE_LIB_SECURITY
2752 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2753 struct rte_security_ipsec_xform *ipsec_xform,
2754 dpaa_sec_session *session)
2756 PMD_INIT_FUNC_TRACE();
2758 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2759 RTE_CACHE_LINE_SIZE);
2760 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2761 DPAA_SEC_ERR("No Memory for aead key");
2764 memcpy(session->aead_key.data, aead_xform->key.data,
2765 aead_xform->key.length);
2767 session->digest_length = aead_xform->digest_length;
2768 session->aead_key.length = aead_xform->key.length;
2770 switch (aead_xform->algo) {
2771 case RTE_CRYPTO_AEAD_AES_GCM:
2772 switch (session->digest_length) {
2774 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2777 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2780 session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2783 DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2784 session->digest_length);
2787 if (session->dir == DIR_ENC) {
2788 memcpy(session->encap_pdb.gcm.salt,
2789 (uint8_t *)&(ipsec_xform->salt), 4);
2791 memcpy(session->decap_pdb.gcm.salt,
2792 (uint8_t *)&(ipsec_xform->salt), 4);
2794 session->aead_key.algmode = OP_ALG_AAI_GCM;
2795 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2798 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2806 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2807 struct rte_crypto_auth_xform *auth_xform,
2808 struct rte_security_ipsec_xform *ipsec_xform,
2809 dpaa_sec_session *session)
2812 session->cipher_key.data = rte_zmalloc(NULL,
2813 cipher_xform->key.length,
2814 RTE_CACHE_LINE_SIZE);
2815 if (session->cipher_key.data == NULL &&
2816 cipher_xform->key.length > 0) {
2817 DPAA_SEC_ERR("No Memory for cipher key");
2821 session->cipher_key.length = cipher_xform->key.length;
2822 memcpy(session->cipher_key.data, cipher_xform->key.data,
2823 cipher_xform->key.length);
2824 session->cipher_alg = cipher_xform->algo;
2826 session->cipher_key.data = NULL;
2827 session->cipher_key.length = 0;
2828 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2832 session->auth_key.data = rte_zmalloc(NULL,
2833 auth_xform->key.length,
2834 RTE_CACHE_LINE_SIZE);
2835 if (session->auth_key.data == NULL &&
2836 auth_xform->key.length > 0) {
2837 DPAA_SEC_ERR("No Memory for auth key");
2840 session->auth_key.length = auth_xform->key.length;
2841 memcpy(session->auth_key.data, auth_xform->key.data,
2842 auth_xform->key.length);
2843 session->auth_alg = auth_xform->algo;
2844 session->digest_length = auth_xform->digest_length;
2846 session->auth_key.data = NULL;
2847 session->auth_key.length = 0;
2848 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2851 switch (session->auth_alg) {
2852 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2853 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2854 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2856 case RTE_CRYPTO_AUTH_MD5_HMAC:
2857 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2858 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2860 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2861 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2862 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2863 if (session->digest_length != 16)
2865 "+++Using sha256-hmac truncated len is non-standard,"
2866 "it will not work with lookaside proto");
2868 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2869 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2870 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2872 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2873 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2874 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2876 case RTE_CRYPTO_AUTH_AES_CMAC:
2877 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2878 session->auth_key.algmode = OP_ALG_AAI_CMAC;
2880 case RTE_CRYPTO_AUTH_NULL:
2881 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2883 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2884 session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2885 session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
2887 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2888 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2889 case RTE_CRYPTO_AUTH_SHA1:
2890 case RTE_CRYPTO_AUTH_SHA256:
2891 case RTE_CRYPTO_AUTH_SHA512:
2892 case RTE_CRYPTO_AUTH_SHA224:
2893 case RTE_CRYPTO_AUTH_SHA384:
2894 case RTE_CRYPTO_AUTH_MD5:
2895 case RTE_CRYPTO_AUTH_AES_GMAC:
2896 case RTE_CRYPTO_AUTH_KASUMI_F9:
2897 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2898 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2899 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2903 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2908 switch (session->cipher_alg) {
2909 case RTE_CRYPTO_CIPHER_AES_CBC:
2910 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2911 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2913 case RTE_CRYPTO_CIPHER_DES_CBC:
2914 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2915 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2917 case RTE_CRYPTO_CIPHER_3DES_CBC:
2918 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2919 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2921 case RTE_CRYPTO_CIPHER_AES_CTR:
2922 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2923 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2924 if (session->dir == DIR_ENC) {
2925 session->encap_pdb.ctr.ctr_initial = 0x00000001;
2926 session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2928 session->decap_pdb.ctr.ctr_initial = 0x00000001;
2929 session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2932 case RTE_CRYPTO_CIPHER_NULL:
2933 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2935 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2936 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2937 case RTE_CRYPTO_CIPHER_3DES_ECB:
2938 case RTE_CRYPTO_CIPHER_AES_ECB:
2939 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2940 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2941 session->cipher_alg);
2944 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2945 session->cipher_alg);
2953 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2954 struct rte_security_session_conf *conf,
2957 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2958 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2959 struct rte_crypto_auth_xform *auth_xform = NULL;
2960 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2961 struct rte_crypto_aead_xform *aead_xform = NULL;
2962 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2966 PMD_INIT_FUNC_TRACE();
2968 memset(session, 0, sizeof(dpaa_sec_session));
2969 session->proto_alg = conf->protocol;
2970 session->ctxt = DPAA_SEC_IPSEC;
2972 if (ipsec_xform->life.bytes_hard_limit != 0 ||
2973 ipsec_xform->life.bytes_soft_limit != 0 ||
2974 ipsec_xform->life.packets_hard_limit != 0 ||
2975 ipsec_xform->life.packets_soft_limit != 0)
2978 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2979 session->dir = DIR_ENC;
2981 session->dir = DIR_DEC;
2983 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2984 cipher_xform = &conf->crypto_xform->cipher;
2985 if (conf->crypto_xform->next)
2986 auth_xform = &conf->crypto_xform->next->auth;
2987 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2988 ipsec_xform, session);
2989 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2990 auth_xform = &conf->crypto_xform->auth;
2991 if (conf->crypto_xform->next)
2992 cipher_xform = &conf->crypto_xform->next->cipher;
2993 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2994 ipsec_xform, session);
2995 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2996 aead_xform = &conf->crypto_xform->aead;
2997 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2998 ipsec_xform, session);
3000 DPAA_SEC_ERR("XFORM not specified");
3005 DPAA_SEC_ERR("Failed to process xform");
3009 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3010 if (ipsec_xform->tunnel.type ==
3011 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3012 session->ip4_hdr.ip_v = IPVERSION;
3013 session->ip4_hdr.ip_hl = 5;
3014 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
3015 sizeof(session->ip4_hdr));
3016 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3017 session->ip4_hdr.ip_id = 0;
3018 session->ip4_hdr.ip_off = 0;
3019 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3020 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
3021 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3022 IPPROTO_ESP : IPPROTO_AH;
3023 session->ip4_hdr.ip_sum = 0;
3024 session->ip4_hdr.ip_src =
3025 ipsec_xform->tunnel.ipv4.src_ip;
3026 session->ip4_hdr.ip_dst =
3027 ipsec_xform->tunnel.ipv4.dst_ip;
3028 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
3029 (void *)&session->ip4_hdr,
3031 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
3032 } else if (ipsec_xform->tunnel.type ==
3033 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3034 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3035 DPAA_IPv6_DEFAULT_VTC_FLOW |
3036 ((ipsec_xform->tunnel.ipv6.dscp <<
3037 RTE_IPV6_HDR_TC_SHIFT) &
3038 RTE_IPV6_HDR_TC_MASK) |
3039 ((ipsec_xform->tunnel.ipv6.flabel <<
3040 RTE_IPV6_HDR_FL_SHIFT) &
3041 RTE_IPV6_HDR_FL_MASK));
3042 /* Payload length will be updated by HW */
3043 session->ip6_hdr.payload_len = 0;
3044 session->ip6_hdr.hop_limits =
3045 ipsec_xform->tunnel.ipv6.hlimit;
3046 session->ip6_hdr.proto = (ipsec_xform->proto ==
3047 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3048 IPPROTO_ESP : IPPROTO_AH;
3049 memcpy(&session->ip6_hdr.src_addr,
3050 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3051 memcpy(&session->ip6_hdr.dst_addr,
3052 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3053 session->encap_pdb.ip_hdr_len =
3054 sizeof(struct rte_ipv6_hdr);
3057 session->encap_pdb.options =
3058 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3059 PDBOPTS_ESP_OIHI_PDB_INL |
3062 if (ipsec_xform->options.dec_ttl)
3063 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3064 if (ipsec_xform->options.esn)
3065 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
3066 session->encap_pdb.spi = ipsec_xform->spi;
3068 } else if (ipsec_xform->direction ==
3069 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3070 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3071 session->decap_pdb.options = sizeof(struct ip) << 16;
3073 session->decap_pdb.options =
3074 sizeof(struct rte_ipv6_hdr) << 16;
3075 if (ipsec_xform->options.esn)
3076 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
3077 if (ipsec_xform->replay_win_sz) {
3079 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3088 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
3091 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
3094 session->decap_pdb.options |=
3100 rte_spinlock_lock(&internals->lock);
3101 for (i = 0; i < MAX_DPAA_CORES; i++) {
3102 session->inq[i] = dpaa_sec_attach_rxq(internals);
3103 if (session->inq[i] == NULL) {
3104 DPAA_SEC_ERR("unable to attach sec queue");
3105 rte_spinlock_unlock(&internals->lock);
3109 rte_spinlock_unlock(&internals->lock);
3113 free_session_data(session);
3118 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
3119 struct rte_security_session_conf *conf,
3122 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3123 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3124 struct rte_crypto_auth_xform *auth_xform = NULL;
3125 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3126 dpaa_sec_session *session = (dpaa_sec_session *)sess;
3127 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
3131 PMD_INIT_FUNC_TRACE();
3133 memset(session, 0, sizeof(dpaa_sec_session));
3135 /* find xfrm types */
3136 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3137 cipher_xform = &xform->cipher;
3138 if (xform->next != NULL &&
3139 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
3140 auth_xform = &xform->next->auth;
3141 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3142 auth_xform = &xform->auth;
3143 if (xform->next != NULL &&
3144 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
3145 cipher_xform = &xform->next->cipher;
3147 DPAA_SEC_ERR("Invalid crypto type");
3151 session->proto_alg = conf->protocol;
3152 session->ctxt = DPAA_SEC_PDCP;
3155 switch (cipher_xform->algo) {
3156 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3157 session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
3159 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3160 session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
3162 case RTE_CRYPTO_CIPHER_AES_CTR:
3163 session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
3165 case RTE_CRYPTO_CIPHER_NULL:
3166 session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
3169 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
3170 session->cipher_alg);
3174 session->cipher_key.data = rte_zmalloc(NULL,
3175 cipher_xform->key.length,
3176 RTE_CACHE_LINE_SIZE);
3177 if (session->cipher_key.data == NULL &&
3178 cipher_xform->key.length > 0) {
3179 DPAA_SEC_ERR("No Memory for cipher key");
3182 session->cipher_key.length = cipher_xform->key.length;
3183 memcpy(session->cipher_key.data, cipher_xform->key.data,
3184 cipher_xform->key.length);
3185 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3187 session->cipher_alg = cipher_xform->algo;
3189 session->cipher_key.data = NULL;
3190 session->cipher_key.length = 0;
3191 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3192 session->dir = DIR_ENC;
3195 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3196 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
3197 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
3199 "PDCP Seq Num size should be 5/12 bits for cmode");
3206 switch (auth_xform->algo) {
3207 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3208 session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
3210 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3211 session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
3213 case RTE_CRYPTO_AUTH_AES_CMAC:
3214 session->auth_key.alg = PDCP_AUTH_TYPE_AES;
3216 case RTE_CRYPTO_AUTH_NULL:
3217 session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
3220 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
3222 rte_free(session->cipher_key.data);
3225 session->auth_key.data = rte_zmalloc(NULL,
3226 auth_xform->key.length,
3227 RTE_CACHE_LINE_SIZE);
3228 if (!session->auth_key.data &&
3229 auth_xform->key.length > 0) {
3230 DPAA_SEC_ERR("No Memory for auth key");
3231 rte_free(session->cipher_key.data);
3234 session->auth_key.length = auth_xform->key.length;
3235 memcpy(session->auth_key.data, auth_xform->key.data,
3236 auth_xform->key.length);
3237 session->auth_alg = auth_xform->algo;
3239 session->auth_key.data = NULL;
3240 session->auth_key.length = 0;
3241 session->auth_alg = 0;
3243 session->pdcp.domain = pdcp_xform->domain;
3244 session->pdcp.bearer = pdcp_xform->bearer;
3245 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3246 session->pdcp.sn_size = pdcp_xform->sn_size;
3247 session->pdcp.hfn = pdcp_xform->hfn;
3248 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3249 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3250 session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3252 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3254 rte_spinlock_lock(&dev_priv->lock);
3255 for (i = 0; i < MAX_DPAA_CORES; i++) {
3256 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3257 if (session->inq[i] == NULL) {
3258 DPAA_SEC_ERR("unable to attach sec queue");
3259 rte_spinlock_unlock(&dev_priv->lock);
3264 rte_spinlock_unlock(&dev_priv->lock);
3267 rte_free(session->auth_key.data);
3268 rte_free(session->cipher_key.data);
3269 memset(session, 0, sizeof(dpaa_sec_session));
3274 dpaa_sec_security_session_create(void *dev,
3275 struct rte_security_session_conf *conf,
3276 struct rte_security_session *sess,
3277 struct rte_mempool *mempool)
3279 void *sess_private_data;
3280 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3283 if (rte_mempool_get(mempool, &sess_private_data)) {
3284 DPAA_SEC_ERR("Couldn't get object from session mempool");
3288 switch (conf->protocol) {
3289 case RTE_SECURITY_PROTOCOL_IPSEC:
3290 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3293 case RTE_SECURITY_PROTOCOL_PDCP:
3294 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3297 case RTE_SECURITY_PROTOCOL_MACSEC:
3303 DPAA_SEC_ERR("failed to configure session parameters");
3304 /* Return session to mempool */
3305 rte_mempool_put(mempool, sess_private_data);
3309 set_sec_session_private_data(sess, sess_private_data);
3314 /** Clear the memory of session so it doesn't leave key material behind */
3316 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3317 struct rte_security_session *sess)
3319 PMD_INIT_FUNC_TRACE();
3320 void *sess_priv = get_sec_session_private_data(sess);
3321 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3324 free_session_memory((struct rte_cryptodev *)dev, s);
3325 set_sec_session_private_data(sess, NULL);
3331 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3332 struct rte_cryptodev_config *config __rte_unused)
3334 PMD_INIT_FUNC_TRACE();
3340 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3342 PMD_INIT_FUNC_TRACE();
3347 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3349 PMD_INIT_FUNC_TRACE();
3353 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3355 PMD_INIT_FUNC_TRACE();
3364 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3365 struct rte_cryptodev_info *info)
3367 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3369 PMD_INIT_FUNC_TRACE();
3371 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3372 info->feature_flags = dev->feature_flags;
3373 info->capabilities = dpaa_sec_capabilities;
3374 info->sym.max_nb_sessions = internals->max_nb_sessions;
3375 info->driver_id = dpaa_cryptodev_driver_id;
3379 static enum qman_cb_dqrr_result
3380 dpaa_sec_process_parallel_event(void *event,
3381 struct qman_portal *qm __always_unused,
3382 struct qman_fq *outq,
3383 const struct qm_dqrr_entry *dqrr,
3386 const struct qm_fd *fd;
3387 struct dpaa_sec_job *job;
3388 struct dpaa_sec_op_ctx *ctx;
3389 struct rte_event *ev = (struct rte_event *)event;
3393 /* sg is embedded in an op ctx,
3394 * sg[0] is for output
3397 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3399 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3400 ctx->fd_status = fd->status;
3401 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3402 struct qm_sg_entry *sg_out;
3405 sg_out = &job->sg[0];
3406 hw_sg_to_cpu(sg_out);
3407 len = sg_out->length;
3408 ctx->op->sym->m_src->pkt_len = len;
3409 ctx->op->sym->m_src->data_len = len;
3411 if (!ctx->fd_status) {
3412 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3414 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3415 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3417 ev->event_ptr = (void *)ctx->op;
3419 ev->flow_id = outq->ev.flow_id;
3420 ev->sub_event_type = outq->ev.sub_event_type;
3421 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3422 ev->op = RTE_EVENT_OP_NEW;
3423 ev->sched_type = outq->ev.sched_type;
3424 ev->queue_id = outq->ev.queue_id;
3425 ev->priority = outq->ev.priority;
3426 *bufs = (void *)ctx->op;
3428 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3430 return qman_cb_dqrr_consume;
3433 static enum qman_cb_dqrr_result
3434 dpaa_sec_process_atomic_event(void *event,
3435 struct qman_portal *qm __rte_unused,
3436 struct qman_fq *outq,
3437 const struct qm_dqrr_entry *dqrr,
3441 const struct qm_fd *fd;
3442 struct dpaa_sec_job *job;
3443 struct dpaa_sec_op_ctx *ctx;
3444 struct rte_event *ev = (struct rte_event *)event;
3448 /* sg is embedded in an op ctx,
3449 * sg[0] is for output
3452 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3454 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3455 ctx->fd_status = fd->status;
3456 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3457 struct qm_sg_entry *sg_out;
3460 sg_out = &job->sg[0];
3461 hw_sg_to_cpu(sg_out);
3462 len = sg_out->length;
3463 ctx->op->sym->m_src->pkt_len = len;
3464 ctx->op->sym->m_src->data_len = len;
3466 if (!ctx->fd_status) {
3467 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3469 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3470 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3472 ev->event_ptr = (void *)ctx->op;
3473 ev->flow_id = outq->ev.flow_id;
3474 ev->sub_event_type = outq->ev.sub_event_type;
3475 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3476 ev->op = RTE_EVENT_OP_NEW;
3477 ev->sched_type = outq->ev.sched_type;
3478 ev->queue_id = outq->ev.queue_id;
3479 ev->priority = outq->ev.priority;
3481 /* Save active dqrr entries */
3482 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3483 DPAA_PER_LCORE_DQRR_SIZE++;
3484 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3485 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3486 ev->impl_opaque = index + 1;
3487 *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3488 *bufs = (void *)ctx->op;
3490 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3492 return qman_cb_dqrr_defer;
3496 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3499 const struct rte_event *event)
3501 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3502 struct qm_mcc_initfq opts = {0};
3506 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3507 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3508 opts.fqd.dest.channel = ch_id;
3510 switch (event->sched_type) {
3511 case RTE_SCHED_TYPE_ATOMIC:
3512 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3513 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3514 * configuration with HOLD_ACTIVE setting
3516 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3517 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3519 case RTE_SCHED_TYPE_ORDERED:
3520 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3523 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3524 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3528 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3529 if (unlikely(ret)) {
3530 DPAA_SEC_ERR("unable to init caam source fq!");
3534 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3540 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3543 struct qm_mcc_initfq opts = {0};
3545 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3547 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3548 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3549 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3550 qp->outq.cb.ern = ern_sec_fq_handler;
3551 qman_retire_fq(&qp->outq, NULL);
3552 qman_oos_fq(&qp->outq);
3553 ret = qman_init_fq(&qp->outq, 0, &opts);
3555 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3556 qp->outq.cb.dqrr = NULL;
3561 static struct rte_cryptodev_ops crypto_ops = {
3562 .dev_configure = dpaa_sec_dev_configure,
3563 .dev_start = dpaa_sec_dev_start,
3564 .dev_stop = dpaa_sec_dev_stop,
3565 .dev_close = dpaa_sec_dev_close,
3566 .dev_infos_get = dpaa_sec_dev_infos_get,
3567 .queue_pair_setup = dpaa_sec_queue_pair_setup,
3568 .queue_pair_release = dpaa_sec_queue_pair_release,
3569 .sym_session_get_size = dpaa_sec_sym_session_get_size,
3570 .sym_session_configure = dpaa_sec_sym_session_configure,
3571 .sym_session_clear = dpaa_sec_sym_session_clear,
3572 /* Raw data-path API related operations */
3573 .sym_get_raw_dp_ctx_size = dpaa_sec_get_dp_ctx_size,
3574 .sym_configure_raw_dp_ctx = dpaa_sec_configure_raw_dp_ctx,
3577 #ifdef RTE_LIB_SECURITY
3578 static const struct rte_security_capability *
3579 dpaa_sec_capabilities_get(void *device __rte_unused)
3581 return dpaa_sec_security_cap;
3584 static const struct rte_security_ops dpaa_sec_security_ops = {
3585 .session_create = dpaa_sec_security_session_create,
3586 .session_update = NULL,
3587 .session_stats_get = NULL,
3588 .session_destroy = dpaa_sec_security_session_destroy,
3589 .set_pkt_metadata = NULL,
3590 .capabilities_get = dpaa_sec_capabilities_get
3594 dpaa_sec_uninit(struct rte_cryptodev *dev)
3596 struct dpaa_sec_dev_private *internals;
3601 internals = dev->data->dev_private;
3602 rte_free(dev->security_ctx);
3604 rte_free(internals);
3606 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3607 dev->data->name, rte_socket_id());
3613 check_devargs_handler(__rte_unused const char *key, const char *value,
3614 __rte_unused void *opaque)
3616 dpaa_sec_dp_dump = atoi(value);
3617 if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
3618 DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
3619 "supported, changing to FULL error prints\n");
3620 dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
3627 dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
3629 struct rte_kvargs *kvlist;
3634 kvlist = rte_kvargs_parse(devargs->args, NULL);
3638 if (!rte_kvargs_count(kvlist, key)) {
3639 rte_kvargs_free(kvlist);
3643 rte_kvargs_process(kvlist, key,
3644 check_devargs_handler, NULL);
3645 rte_kvargs_free(kvlist);
3649 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3651 struct dpaa_sec_dev_private *internals;
3652 #ifdef RTE_LIB_SECURITY
3653 struct rte_security_ctx *security_instance;
3655 struct dpaa_sec_qp *qp;
3661 PMD_INIT_FUNC_TRACE();
3663 internals = cryptodev->data->dev_private;
3664 map_fd = open("/dev/mem", O_RDWR);
3665 if (unlikely(map_fd < 0)) {
3666 DPAA_SEC_ERR("Unable to open (/dev/mem)");
3669 internals->sec_hw = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
3670 MAP_SHARED, map_fd, SEC_BASE_ADDR);
3671 if (internals->sec_hw == MAP_FAILED) {
3672 DPAA_SEC_ERR("Memory map failed");
3676 cmd_map = (uint8_t *)internals->sec_hw +
3677 (BLOCK_OFFSET * QI_BLOCK_NUMBER) + CMD_REG;
3678 if (!(be32_to_cpu(rte_read32(cmd_map)) & QICTL_DQEN))
3679 /* enable QI interface */
3680 rte_write32(cpu_to_be32(QICTL_DQEN), cmd_map);
3682 ret = munmap(internals->sec_hw, MAP_SIZE);
3684 DPAA_SEC_WARN("munmap failed\n");
3687 cryptodev->driver_id = dpaa_cryptodev_driver_id;
3688 cryptodev->dev_ops = &crypto_ops;
3690 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3691 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3692 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3693 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3694 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3695 RTE_CRYPTODEV_FF_SECURITY |
3696 RTE_CRYPTODEV_FF_SYM_RAW_DP |
3697 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3698 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3699 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3700 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3701 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3703 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3704 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3707 * For secondary processes, we don't initialise any further as primary
3708 * has already done this work. Only check we don't need a different
3711 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3712 DPAA_SEC_WARN("Device already init by primary process");
3715 #ifdef RTE_LIB_SECURITY
3716 /* Initialize security_ctx only for primary process*/
3717 security_instance = rte_malloc("rte_security_instances_ops",
3718 sizeof(struct rte_security_ctx), 0);
3719 if (security_instance == NULL)
3721 security_instance->device = (void *)cryptodev;
3722 security_instance->ops = &dpaa_sec_security_ops;
3723 security_instance->sess_cnt = 0;
3724 cryptodev->security_ctx = security_instance;
3726 rte_spinlock_init(&internals->lock);
3727 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3728 /* init qman fq for queue pair */
3729 qp = &internals->qps[i];
3730 ret = dpaa_sec_init_tx(&qp->outq);
3732 DPAA_SEC_ERR("config tx of queue pair %d", i);
3737 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3738 QMAN_FQ_FLAG_TO_DCPORTAL;
3739 for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3740 /* create rx qman fq for sessions*/
3741 ret = qman_create_fq(0, flags, &internals->inq[i]);
3742 if (unlikely(ret != 0)) {
3743 DPAA_SEC_ERR("sec qman_create_fq failed");
3748 dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
3750 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3754 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3756 rte_free(cryptodev->security_ctx);
3761 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3762 struct rte_dpaa_device *dpaa_dev)
3764 struct rte_cryptodev *cryptodev;
3765 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3769 snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3771 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3772 if (cryptodev == NULL)
3775 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3776 cryptodev->data->dev_private = rte_zmalloc_socket(
3777 "cryptodev private structure",
3778 sizeof(struct dpaa_sec_dev_private),
3779 RTE_CACHE_LINE_SIZE,
3782 if (cryptodev->data->dev_private == NULL)
3783 rte_panic("Cannot allocate memzone for private "
3787 dpaa_dev->crypto_dev = cryptodev;
3788 cryptodev->device = &dpaa_dev->device;
3790 /* init user callbacks */
3791 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3793 /* if sec device version is not configured */
3794 if (!rta_get_sec_era()) {
3795 const struct device_node *caam_node;
3797 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3798 const uint32_t *prop = of_get_property(caam_node,
3803 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3809 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3810 retval = rte_dpaa_portal_init((void *)1);
3812 DPAA_SEC_ERR("Unable to initialize portal");
3817 /* Invoke PMD device initialization function */
3818 retval = dpaa_sec_dev_init(cryptodev);
3820 rte_cryptodev_pmd_probing_finish(cryptodev);
3826 /* In case of error, cleanup is done */
3827 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3828 rte_free(cryptodev->data->dev_private);
3830 rte_cryptodev_pmd_release_device(cryptodev);
3836 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3838 struct rte_cryptodev *cryptodev;
3841 cryptodev = dpaa_dev->crypto_dev;
3842 if (cryptodev == NULL)
3845 ret = dpaa_sec_uninit(cryptodev);
3849 return rte_cryptodev_pmd_destroy(cryptodev);
3852 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3853 .drv_type = FSL_DPAA_CRYPTO,
3855 .name = "DPAA SEC PMD"
3857 .probe = cryptodev_dpaa_sec_probe,
3858 .remove = cryptodev_dpaa_sec_remove,
3861 static struct cryptodev_driver dpaa_sec_crypto_drv;
3863 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3864 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3865 dpaa_cryptodev_driver_id);
3866 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
3867 DRIVER_DUMP_MODE "=<int>");
3868 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);