1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2019 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36 #include <hw/desc/pdcp.h>
38 #include <rte_dpaa_bus.h>
40 #include <dpaa_sec_event.h>
41 #include <dpaa_sec_log.h>
42 #include <dpaax_iova_table.h>
44 enum rta_sec_era rta_sec_era;
48 static uint8_t cryptodev_driver_id;
50 static __thread struct rte_crypto_op **dpaa_sec_ops;
51 static __thread int dpaa_sec_op_nb;
54 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
57 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
59 if (!ctx->fd_status) {
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
62 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
63 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(
74 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
77 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
81 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
82 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
83 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
84 * each packet, memset is costlier than dcbz_64().
86 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
87 dcbz_64(&ctx->job.sg[i]);
89 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
109 dpaa_mem_ptov(rte_iova_t paddr)
113 va = (void *)dpaax_iova_table_get_va(paddr);
117 return rte_mem_iova2virt(paddr);
121 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
123 const struct qm_mr_entry *msg)
125 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
126 fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 /* initialize the queue with dest chan as caam chan so that
130 * all the packets in this queue could be dispatched into caam
133 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 struct qm_mcc_initfq fq_opts;
140 /* Clear FQ options */
141 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
143 flags = QMAN_INITFQ_FLAG_SCHED;
144 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
145 QM_INITFQ_WE_CONTEXTB;
147 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
148 fq_opts.fqd.context_b = fqid_out;
149 fq_opts.fqd.dest.channel = qm_channel_caam;
150 fq_opts.fqd.dest.wq = 0;
152 fq_in->cb.ern = ern_sec_fq_handler;
154 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
156 ret = qman_init_fq(fq_in, flags, &fq_opts);
157 if (unlikely(ret != 0))
158 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
163 /* something is put into in_fq and caam put the crypto result into out_fq */
164 static enum qman_cb_dqrr_result
165 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
166 struct qman_fq *fq __always_unused,
167 const struct qm_dqrr_entry *dqrr)
169 const struct qm_fd *fd;
170 struct dpaa_sec_job *job;
171 struct dpaa_sec_op_ctx *ctx;
173 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
174 return qman_cb_dqrr_defer;
176 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
177 return qman_cb_dqrr_consume;
180 /* sg is embedded in an op ctx,
181 * sg[0] is for output
184 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
186 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
187 ctx->fd_status = fd->status;
188 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
189 struct qm_sg_entry *sg_out;
191 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
192 ctx->op->sym->m_src : ctx->op->sym->m_dst;
194 sg_out = &job->sg[0];
195 hw_sg_to_cpu(sg_out);
196 len = sg_out->length;
198 while (mbuf->next != NULL) {
199 len -= mbuf->data_len;
202 mbuf->data_len = len;
204 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205 dpaa_sec_op_ending(ctx);
207 return qman_cb_dqrr_consume;
210 /* caam result is put into this queue */
212 dpaa_sec_init_tx(struct qman_fq *fq)
215 struct qm_mcc_initfq opts;
218 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219 QMAN_FQ_FLAG_DYNAMIC_FQID;
221 ret = qman_create_fq(0, flags, fq);
223 DPAA_SEC_ERR("qman_create_fq failed");
227 memset(&opts, 0, sizeof(opts));
228 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233 fq->cb.dqrr = dqrr_out_fq_cb_rx;
234 fq->cb.ern = ern_sec_fq_handler;
236 ret = qman_init_fq(fq, 0, &opts);
238 DPAA_SEC_ERR("unable to init caam source fq!");
245 static inline int is_cipher_only(dpaa_sec_session *ses)
247 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
251 static inline int is_auth_only(dpaa_sec_session *ses)
253 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
257 static inline int is_aead(dpaa_sec_session *ses)
259 return ((ses->cipher_alg == 0) &&
260 (ses->auth_alg == 0) &&
261 (ses->aead_alg != 0));
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
266 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268 (ses->proto_alg != RTE_SECURITY_PROTOCOL_PDCP) &&
269 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
272 static inline int is_proto_ipsec(dpaa_sec_session *ses)
274 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
277 static inline int is_proto_pdcp(dpaa_sec_session *ses)
279 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP);
282 static inline int is_encode(dpaa_sec_session *ses)
284 return ses->dir == DIR_ENC;
287 static inline int is_decode(dpaa_sec_session *ses)
289 return ses->dir == DIR_DEC;
293 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
295 switch (ses->auth_alg) {
296 case RTE_CRYPTO_AUTH_NULL:
298 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299 OP_PCL_IPSEC_HMAC_NULL : 0;
300 ses->digest_length = 0;
302 case RTE_CRYPTO_AUTH_MD5_HMAC:
304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA1_HMAC:
310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 case RTE_CRYPTO_AUTH_SHA224_HMAC:
316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320 case RTE_CRYPTO_AUTH_SHA256_HMAC:
322 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 alginfo_a->algmode = OP_ALG_AAI_HMAC;
326 case RTE_CRYPTO_AUTH_SHA384_HMAC:
328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 alginfo_a->algmode = OP_ALG_AAI_HMAC;
332 case RTE_CRYPTO_AUTH_SHA512_HMAC:
334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 alginfo_a->algmode = OP_ALG_AAI_HMAC;
339 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
346 switch (ses->cipher_alg) {
347 case RTE_CRYPTO_CIPHER_NULL:
349 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
350 OP_PCL_IPSEC_NULL : 0;
352 case RTE_CRYPTO_CIPHER_AES_CBC:
354 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
355 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
356 alginfo_c->algmode = OP_ALG_AAI_CBC;
358 case RTE_CRYPTO_CIPHER_3DES_CBC:
360 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
361 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
362 alginfo_c->algmode = OP_ALG_AAI_CBC;
364 case RTE_CRYPTO_CIPHER_AES_CTR:
366 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
367 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
368 alginfo_c->algmode = OP_ALG_AAI_CTR;
371 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
376 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
378 switch (ses->aead_alg) {
379 case RTE_CRYPTO_AEAD_AES_GCM:
380 alginfo->algtype = OP_ALG_ALGSEL_AES;
381 alginfo->algmode = OP_ALG_AAI_GCM;
384 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
389 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
391 struct alginfo authdata = {0}, cipherdata = {0};
392 struct sec_cdb *cdb = &ses->cdb;
393 struct alginfo *p_authdata = NULL;
394 int32_t shared_desc_len = 0;
396 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
402 switch (ses->cipher_alg) {
403 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
404 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
406 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
407 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
409 case RTE_CRYPTO_CIPHER_AES_CTR:
410 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
412 case RTE_CRYPTO_CIPHER_NULL:
413 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
416 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
421 cipherdata.key = (size_t)ses->cipher_key.data;
422 cipherdata.keylen = ses->cipher_key.length;
423 cipherdata.key_enc_flags = 0;
424 cipherdata.key_type = RTA_DATA_IMM;
426 cdb->sh_desc[0] = cipherdata.keylen;
431 switch (ses->auth_alg) {
432 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
433 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
435 case RTE_CRYPTO_AUTH_ZUC_EIA3:
436 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
438 case RTE_CRYPTO_AUTH_AES_CMAC:
439 authdata.algtype = PDCP_AUTH_TYPE_AES;
441 case RTE_CRYPTO_AUTH_NULL:
442 authdata.algtype = PDCP_AUTH_TYPE_NULL;
445 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
450 authdata.key = (size_t)ses->auth_key.data;
451 authdata.keylen = ses->auth_key.length;
452 authdata.key_enc_flags = 0;
453 authdata.key_type = RTA_DATA_IMM;
455 p_authdata = &authdata;
457 cdb->sh_desc[1] = authdata.keylen;
460 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
462 (unsigned int *)cdb->sh_desc,
463 &cdb->sh_desc[2], 2);
465 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
469 if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
471 (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
472 cipherdata.key_type = RTA_DATA_PTR;
474 if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
476 (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
477 authdata.key_type = RTA_DATA_PTR;
484 if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
485 if (ses->dir == DIR_ENC)
486 shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
487 cdb->sh_desc, 1, swap,
492 ses->pdcp.hfn_threshold,
493 &cipherdata, &authdata,
495 else if (ses->dir == DIR_DEC)
496 shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
497 cdb->sh_desc, 1, swap,
502 ses->pdcp.hfn_threshold,
503 &cipherdata, &authdata,
506 if (ses->dir == DIR_ENC)
507 shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
508 cdb->sh_desc, 1, swap,
513 ses->pdcp.hfn_threshold,
514 &cipherdata, p_authdata, 0);
515 else if (ses->dir == DIR_DEC)
516 shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
517 cdb->sh_desc, 1, swap,
522 ses->pdcp.hfn_threshold,
523 &cipherdata, p_authdata, 0);
526 return shared_desc_len;
529 /* prepare ipsec proto command block of the session */
531 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
533 struct alginfo cipherdata = {0}, authdata = {0};
534 struct sec_cdb *cdb = &ses->cdb;
535 int32_t shared_desc_len = 0;
537 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
543 caam_cipher_alg(ses, &cipherdata);
544 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
545 DPAA_SEC_ERR("not supported cipher alg");
549 cipherdata.key = (size_t)ses->cipher_key.data;
550 cipherdata.keylen = ses->cipher_key.length;
551 cipherdata.key_enc_flags = 0;
552 cipherdata.key_type = RTA_DATA_IMM;
554 caam_auth_alg(ses, &authdata);
555 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
556 DPAA_SEC_ERR("not supported auth alg");
560 authdata.key = (size_t)ses->auth_key.data;
561 authdata.keylen = ses->auth_key.length;
562 authdata.key_enc_flags = 0;
563 authdata.key_type = RTA_DATA_IMM;
565 cdb->sh_desc[0] = cipherdata.keylen;
566 cdb->sh_desc[1] = authdata.keylen;
567 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
569 (unsigned int *)cdb->sh_desc,
570 &cdb->sh_desc[2], 2);
573 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
576 if (cdb->sh_desc[2] & 1)
577 cipherdata.key_type = RTA_DATA_IMM;
579 cipherdata.key = (size_t)dpaa_mem_vtop(
580 (void *)(size_t)cipherdata.key);
581 cipherdata.key_type = RTA_DATA_PTR;
583 if (cdb->sh_desc[2] & (1<<1))
584 authdata.key_type = RTA_DATA_IMM;
586 authdata.key = (size_t)dpaa_mem_vtop(
587 (void *)(size_t)authdata.key);
588 authdata.key_type = RTA_DATA_PTR;
594 if (ses->dir == DIR_ENC) {
595 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
597 true, swap, SHR_SERIAL,
599 (uint8_t *)&ses->ip4_hdr,
600 &cipherdata, &authdata);
601 } else if (ses->dir == DIR_DEC) {
602 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
604 true, swap, SHR_SERIAL,
606 &cipherdata, &authdata);
608 return shared_desc_len;
611 /* prepare command block of the session */
613 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
615 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
616 int32_t shared_desc_len = 0;
617 struct sec_cdb *cdb = &ses->cdb;
619 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
625 memset(cdb, 0, sizeof(struct sec_cdb));
627 if (is_proto_ipsec(ses)) {
628 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
629 } else if (is_proto_pdcp(ses)) {
630 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
631 } else if (is_cipher_only(ses)) {
632 caam_cipher_alg(ses, &alginfo_c);
633 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
634 DPAA_SEC_ERR("not supported cipher alg");
638 alginfo_c.key = (size_t)ses->cipher_key.data;
639 alginfo_c.keylen = ses->cipher_key.length;
640 alginfo_c.key_enc_flags = 0;
641 alginfo_c.key_type = RTA_DATA_IMM;
643 shared_desc_len = cnstr_shdsc_blkcipher(
645 swap, SHR_NEVER, &alginfo_c,
649 } else if (is_auth_only(ses)) {
650 caam_auth_alg(ses, &alginfo_a);
651 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
652 DPAA_SEC_ERR("not supported auth alg");
656 alginfo_a.key = (size_t)ses->auth_key.data;
657 alginfo_a.keylen = ses->auth_key.length;
658 alginfo_a.key_enc_flags = 0;
659 alginfo_a.key_type = RTA_DATA_IMM;
661 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
662 swap, SHR_NEVER, &alginfo_a,
665 } else if (is_aead(ses)) {
666 caam_aead_alg(ses, &alginfo);
667 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
668 DPAA_SEC_ERR("not supported aead alg");
671 alginfo.key = (size_t)ses->aead_key.data;
672 alginfo.keylen = ses->aead_key.length;
673 alginfo.key_enc_flags = 0;
674 alginfo.key_type = RTA_DATA_IMM;
676 if (ses->dir == DIR_ENC)
677 shared_desc_len = cnstr_shdsc_gcm_encap(
678 cdb->sh_desc, true, swap, SHR_NEVER,
683 shared_desc_len = cnstr_shdsc_gcm_decap(
684 cdb->sh_desc, true, swap, SHR_NEVER,
689 caam_cipher_alg(ses, &alginfo_c);
690 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
691 DPAA_SEC_ERR("not supported cipher alg");
695 alginfo_c.key = (size_t)ses->cipher_key.data;
696 alginfo_c.keylen = ses->cipher_key.length;
697 alginfo_c.key_enc_flags = 0;
698 alginfo_c.key_type = RTA_DATA_IMM;
700 caam_auth_alg(ses, &alginfo_a);
701 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
702 DPAA_SEC_ERR("not supported auth alg");
706 alginfo_a.key = (size_t)ses->auth_key.data;
707 alginfo_a.keylen = ses->auth_key.length;
708 alginfo_a.key_enc_flags = 0;
709 alginfo_a.key_type = RTA_DATA_IMM;
711 cdb->sh_desc[0] = alginfo_c.keylen;
712 cdb->sh_desc[1] = alginfo_a.keylen;
713 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
715 (unsigned int *)cdb->sh_desc,
716 &cdb->sh_desc[2], 2);
719 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
722 if (cdb->sh_desc[2] & 1)
723 alginfo_c.key_type = RTA_DATA_IMM;
725 alginfo_c.key = (size_t)dpaa_mem_vtop(
726 (void *)(size_t)alginfo_c.key);
727 alginfo_c.key_type = RTA_DATA_PTR;
729 if (cdb->sh_desc[2] & (1<<1))
730 alginfo_a.key_type = RTA_DATA_IMM;
732 alginfo_a.key = (size_t)dpaa_mem_vtop(
733 (void *)(size_t)alginfo_a.key);
734 alginfo_a.key_type = RTA_DATA_PTR;
739 /* Auth_only_len is set as 0 here and it will be
740 * overwritten in fd for each packet.
742 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
743 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
745 ses->digest_length, ses->dir);
748 if (shared_desc_len < 0) {
749 DPAA_SEC_ERR("error in preparing command block");
750 return shared_desc_len;
753 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
754 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
755 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
760 /* qp is lockless, should be accessed by only one thread */
762 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
765 unsigned int pkts = 0;
766 int num_rx_bufs, ret;
767 struct qm_dqrr_entry *dq;
768 uint32_t vdqcr_flags = 0;
772 * Until request for four buffers, we provide exact number of buffers.
773 * Otherwise we do not set the QM_VDQCR_EXACT flag.
774 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
775 * requested, so we request two less in this case.
778 vdqcr_flags = QM_VDQCR_EXACT;
779 num_rx_bufs = nb_ops;
781 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
782 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
784 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
789 const struct qm_fd *fd;
790 struct dpaa_sec_job *job;
791 struct dpaa_sec_op_ctx *ctx;
792 struct rte_crypto_op *op;
794 dq = qman_dequeue(fq);
799 /* sg is embedded in an op ctx,
800 * sg[0] is for output
803 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
805 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
806 ctx->fd_status = fd->status;
808 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
809 struct qm_sg_entry *sg_out;
811 struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
812 op->sym->m_src : op->sym->m_dst;
814 sg_out = &job->sg[0];
815 hw_sg_to_cpu(sg_out);
816 len = sg_out->length;
818 while (mbuf->next != NULL) {
819 len -= mbuf->data_len;
822 mbuf->data_len = len;
824 if (!ctx->fd_status) {
825 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
827 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
828 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
832 /* report op status to sym->op and then free the ctx memeory */
833 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
835 qman_dqrr_consume(fq, dq);
836 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
841 static inline struct dpaa_sec_job *
842 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
844 struct rte_crypto_sym_op *sym = op->sym;
845 struct rte_mbuf *mbuf = sym->m_src;
846 struct dpaa_sec_job *cf;
847 struct dpaa_sec_op_ctx *ctx;
848 struct qm_sg_entry *sg, *out_sg, *in_sg;
849 phys_addr_t start_addr;
850 uint8_t *old_digest, extra_segs;
857 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
858 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
862 ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
868 old_digest = ctx->digest;
872 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
873 out_sg->length = ses->digest_length;
874 cpu_to_hw_sg(out_sg);
878 /* need to extend the input to a compound frame */
879 in_sg->extension = 1;
881 in_sg->length = sym->auth.data.length;
882 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
886 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
887 sg->length = mbuf->data_len - sym->auth.data.offset;
888 sg->offset = sym->auth.data.offset;
890 /* Successive segs */
895 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
896 sg->length = mbuf->data_len;
900 if (is_decode(ses)) {
901 /* Digest verification case */
904 rte_memcpy(old_digest, sym->auth.digest.data,
906 start_addr = dpaa_mem_vtop(old_digest);
907 qm_sg_entry_set64(sg, start_addr);
908 sg->length = ses->digest_length;
909 in_sg->length += ses->digest_length;
911 /* Digest calculation case */
912 sg->length -= ses->digest_length;
923 * |<----data_len------->|
924 * |ip_header|ah_header|icv|payload|
929 static inline struct dpaa_sec_job *
930 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
932 struct rte_crypto_sym_op *sym = op->sym;
933 struct rte_mbuf *mbuf = sym->m_src;
934 struct dpaa_sec_job *cf;
935 struct dpaa_sec_op_ctx *ctx;
936 struct qm_sg_entry *sg;
937 rte_iova_t start_addr;
940 ctx = dpaa_sec_alloc_ctx(ses, 4);
946 old_digest = ctx->digest;
948 start_addr = rte_pktmbuf_iova(mbuf);
951 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
952 sg->length = ses->digest_length;
957 if (is_decode(ses)) {
958 /* need to extend the input to a compound frame */
960 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
961 sg->length = sym->auth.data.length + ses->digest_length;
966 /* hash result or digest, save digest first */
967 rte_memcpy(old_digest, sym->auth.digest.data,
969 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
970 sg->length = sym->auth.data.length;
973 /* let's check digest by hw */
974 start_addr = dpaa_mem_vtop(old_digest);
976 qm_sg_entry_set64(sg, start_addr);
977 sg->length = ses->digest_length;
981 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
982 sg->length = sym->auth.data.length;
990 static inline struct dpaa_sec_job *
991 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
993 struct rte_crypto_sym_op *sym = op->sym;
994 struct dpaa_sec_job *cf;
995 struct dpaa_sec_op_ctx *ctx;
996 struct qm_sg_entry *sg, *out_sg, *in_sg;
997 struct rte_mbuf *mbuf;
999 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1004 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1007 req_segs = mbuf->nb_segs * 2 + 3;
1010 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1011 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
1016 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1024 out_sg = &cf->sg[0];
1025 out_sg->extension = 1;
1026 out_sg->length = sym->cipher.data.length;
1027 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1028 cpu_to_hw_sg(out_sg);
1032 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1033 sg->length = mbuf->data_len - sym->cipher.data.offset;
1034 sg->offset = sym->cipher.data.offset;
1036 /* Successive segs */
1041 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1042 sg->length = mbuf->data_len;
1051 in_sg->extension = 1;
1053 in_sg->length = sym->cipher.data.length + ses->iv.length;
1056 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1057 cpu_to_hw_sg(in_sg);
1060 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1061 sg->length = ses->iv.length;
1066 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1067 sg->length = mbuf->data_len - sym->cipher.data.offset;
1068 sg->offset = sym->cipher.data.offset;
1070 /* Successive segs */
1075 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1076 sg->length = mbuf->data_len;
1085 static inline struct dpaa_sec_job *
1086 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1088 struct rte_crypto_sym_op *sym = op->sym;
1089 struct dpaa_sec_job *cf;
1090 struct dpaa_sec_op_ctx *ctx;
1091 struct qm_sg_entry *sg;
1092 rte_iova_t src_start_addr, dst_start_addr;
1093 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1096 ctx = dpaa_sec_alloc_ctx(ses, 4);
1103 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1106 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1108 dst_start_addr = src_start_addr;
1112 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1113 sg->length = sym->cipher.data.length + ses->iv.length;
1119 /* need to extend the input to a compound frame */
1122 sg->length = sym->cipher.data.length + ses->iv.length;
1123 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
1127 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1128 sg->length = ses->iv.length;
1132 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
1133 sg->length = sym->cipher.data.length;
1140 static inline struct dpaa_sec_job *
1141 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1143 struct rte_crypto_sym_op *sym = op->sym;
1144 struct dpaa_sec_job *cf;
1145 struct dpaa_sec_op_ctx *ctx;
1146 struct qm_sg_entry *sg, *out_sg, *in_sg;
1147 struct rte_mbuf *mbuf;
1149 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1154 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1157 req_segs = mbuf->nb_segs * 2 + 4;
1160 if (ses->auth_only_len)
1163 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1164 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1169 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1176 rte_prefetch0(cf->sg);
1179 out_sg = &cf->sg[0];
1180 out_sg->extension = 1;
1182 out_sg->length = sym->aead.data.length + ses->auth_only_len
1183 + ses->digest_length;
1185 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1187 /* output sg entries */
1189 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1190 cpu_to_hw_sg(out_sg);
1193 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 sg->length = mbuf->data_len - sym->aead.data.offset +
1196 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1198 /* Successive segs */
1203 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1204 sg->length = mbuf->data_len;
1207 sg->length -= ses->digest_length;
1209 if (is_encode(ses)) {
1211 /* set auth output */
1213 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1214 sg->length = ses->digest_length;
1222 in_sg->extension = 1;
1225 in_sg->length = ses->iv.length + sym->aead.data.length
1226 + ses->auth_only_len;
1228 in_sg->length = ses->iv.length + sym->aead.data.length
1229 + ses->auth_only_len + ses->digest_length;
1231 /* input sg entries */
1233 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1234 cpu_to_hw_sg(in_sg);
1237 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1238 sg->length = ses->iv.length;
1241 /* 2nd seg auth only */
1242 if (ses->auth_only_len) {
1244 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1245 sg->length = ses->auth_only_len;
1251 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1252 sg->length = mbuf->data_len - sym->aead.data.offset;
1253 sg->offset = sym->aead.data.offset;
1255 /* Successive segs */
1260 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1261 sg->length = mbuf->data_len;
1265 if (is_decode(ses)) {
1268 memcpy(ctx->digest, sym->aead.digest.data,
1269 ses->digest_length);
1270 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1271 sg->length = ses->digest_length;
1279 static inline struct dpaa_sec_job *
1280 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1282 struct rte_crypto_sym_op *sym = op->sym;
1283 struct dpaa_sec_job *cf;
1284 struct dpaa_sec_op_ctx *ctx;
1285 struct qm_sg_entry *sg;
1286 uint32_t length = 0;
1287 rte_iova_t src_start_addr, dst_start_addr;
1288 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1291 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1294 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1296 dst_start_addr = src_start_addr;
1298 ctx = dpaa_sec_alloc_ctx(ses, 7);
1306 rte_prefetch0(cf->sg);
1308 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1309 if (is_encode(ses)) {
1310 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1311 sg->length = ses->iv.length;
1312 length += sg->length;
1316 if (ses->auth_only_len) {
1317 qm_sg_entry_set64(sg,
1318 dpaa_mem_vtop(sym->aead.aad.data));
1319 sg->length = ses->auth_only_len;
1320 length += sg->length;
1324 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1325 sg->length = sym->aead.data.length;
1326 length += sg->length;
1330 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1331 sg->length = ses->iv.length;
1332 length += sg->length;
1336 if (ses->auth_only_len) {
1337 qm_sg_entry_set64(sg,
1338 dpaa_mem_vtop(sym->aead.aad.data));
1339 sg->length = ses->auth_only_len;
1340 length += sg->length;
1344 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1345 sg->length = sym->aead.data.length;
1346 length += sg->length;
1349 memcpy(ctx->digest, sym->aead.digest.data,
1350 ses->digest_length);
1353 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1354 sg->length = ses->digest_length;
1355 length += sg->length;
1359 /* input compound frame */
1360 cf->sg[1].length = length;
1361 cf->sg[1].extension = 1;
1362 cf->sg[1].final = 1;
1363 cpu_to_hw_sg(&cf->sg[1]);
1367 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1368 qm_sg_entry_set64(sg,
1369 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1370 sg->length = sym->aead.data.length + ses->auth_only_len;
1371 length = sg->length;
1372 if (is_encode(ses)) {
1374 /* set auth output */
1376 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1377 sg->length = ses->digest_length;
1378 length += sg->length;
1383 /* output compound frame */
1384 cf->sg[0].length = length;
1385 cf->sg[0].extension = 1;
1386 cpu_to_hw_sg(&cf->sg[0]);
1391 static inline struct dpaa_sec_job *
1392 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1394 struct rte_crypto_sym_op *sym = op->sym;
1395 struct dpaa_sec_job *cf;
1396 struct dpaa_sec_op_ctx *ctx;
1397 struct qm_sg_entry *sg, *out_sg, *in_sg;
1398 struct rte_mbuf *mbuf;
1400 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1405 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1408 req_segs = mbuf->nb_segs * 2 + 4;
1411 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1412 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1417 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1424 rte_prefetch0(cf->sg);
1427 out_sg = &cf->sg[0];
1428 out_sg->extension = 1;
1430 out_sg->length = sym->auth.data.length + ses->digest_length;
1432 out_sg->length = sym->auth.data.length;
1434 /* output sg entries */
1436 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1437 cpu_to_hw_sg(out_sg);
1440 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1441 sg->length = mbuf->data_len - sym->auth.data.offset;
1442 sg->offset = sym->auth.data.offset;
1444 /* Successive segs */
1449 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1450 sg->length = mbuf->data_len;
1453 sg->length -= ses->digest_length;
1455 if (is_encode(ses)) {
1457 /* set auth output */
1459 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1460 sg->length = ses->digest_length;
1468 in_sg->extension = 1;
1471 in_sg->length = ses->iv.length + sym->auth.data.length;
1473 in_sg->length = ses->iv.length + sym->auth.data.length
1474 + ses->digest_length;
1476 /* input sg entries */
1478 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1479 cpu_to_hw_sg(in_sg);
1482 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1483 sg->length = ses->iv.length;
1488 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1489 sg->length = mbuf->data_len - sym->auth.data.offset;
1490 sg->offset = sym->auth.data.offset;
1492 /* Successive segs */
1497 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1498 sg->length = mbuf->data_len;
1502 sg->length -= ses->digest_length;
1503 if (is_decode(ses)) {
1506 memcpy(ctx->digest, sym->auth.digest.data,
1507 ses->digest_length);
1508 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1509 sg->length = ses->digest_length;
1517 static inline struct dpaa_sec_job *
1518 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1520 struct rte_crypto_sym_op *sym = op->sym;
1521 struct dpaa_sec_job *cf;
1522 struct dpaa_sec_op_ctx *ctx;
1523 struct qm_sg_entry *sg;
1524 rte_iova_t src_start_addr, dst_start_addr;
1525 uint32_t length = 0;
1526 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1529 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1531 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1533 dst_start_addr = src_start_addr;
1535 ctx = dpaa_sec_alloc_ctx(ses, 7);
1543 rte_prefetch0(cf->sg);
1545 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1546 if (is_encode(ses)) {
1547 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1548 sg->length = ses->iv.length;
1549 length += sg->length;
1553 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1554 sg->length = sym->auth.data.length;
1555 length += sg->length;
1559 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1560 sg->length = ses->iv.length;
1561 length += sg->length;
1566 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1567 sg->length = sym->auth.data.length;
1568 length += sg->length;
1571 memcpy(ctx->digest, sym->auth.digest.data,
1572 ses->digest_length);
1575 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1576 sg->length = ses->digest_length;
1577 length += sg->length;
1581 /* input compound frame */
1582 cf->sg[1].length = length;
1583 cf->sg[1].extension = 1;
1584 cf->sg[1].final = 1;
1585 cpu_to_hw_sg(&cf->sg[1]);
1589 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1590 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1591 sg->length = sym->cipher.data.length;
1592 length = sg->length;
1593 if (is_encode(ses)) {
1595 /* set auth output */
1597 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1598 sg->length = ses->digest_length;
1599 length += sg->length;
1604 /* output compound frame */
1605 cf->sg[0].length = length;
1606 cf->sg[0].extension = 1;
1607 cpu_to_hw_sg(&cf->sg[0]);
1612 static inline struct dpaa_sec_job *
1613 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1615 struct rte_crypto_sym_op *sym = op->sym;
1616 struct dpaa_sec_job *cf;
1617 struct dpaa_sec_op_ctx *ctx;
1618 struct qm_sg_entry *sg;
1619 phys_addr_t src_start_addr, dst_start_addr;
1621 ctx = dpaa_sec_alloc_ctx(ses, 2);
1627 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1630 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1632 dst_start_addr = src_start_addr;
1636 qm_sg_entry_set64(sg, src_start_addr);
1637 sg->length = sym->m_src->pkt_len;
1641 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1644 qm_sg_entry_set64(sg, dst_start_addr);
1645 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1651 static inline struct dpaa_sec_job *
1652 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1654 struct rte_crypto_sym_op *sym = op->sym;
1655 struct dpaa_sec_job *cf;
1656 struct dpaa_sec_op_ctx *ctx;
1657 struct qm_sg_entry *sg, *out_sg, *in_sg;
1658 struct rte_mbuf *mbuf;
1660 uint32_t in_len = 0, out_len = 0;
1667 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1668 if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1669 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1674 ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1680 out_sg = &cf->sg[0];
1681 out_sg->extension = 1;
1682 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
1686 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1689 /* Successive segs */
1690 while (mbuf->next) {
1691 sg->length = mbuf->data_len;
1692 out_len += sg->length;
1696 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1699 sg->length = mbuf->buf_len - mbuf->data_off;
1700 out_len += sg->length;
1704 out_sg->length = out_len;
1705 cpu_to_hw_sg(out_sg);
1710 in_sg->extension = 1;
1712 in_len = mbuf->data_len;
1715 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1718 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1719 sg->length = mbuf->data_len;
1722 /* Successive segs */
1727 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1728 sg->length = mbuf->data_len;
1730 in_len += sg->length;
1736 in_sg->length = in_len;
1737 cpu_to_hw_sg(in_sg);
1739 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1745 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1748 /* Function to transmit the frames to given device and queuepair */
1750 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1751 uint16_t num_tx = 0;
1752 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1753 uint32_t frames_to_send;
1754 struct rte_crypto_op *op;
1755 struct dpaa_sec_job *cf;
1756 dpaa_sec_session *ses;
1757 uint32_t auth_only_len, index, flags[DPAA_SEC_BURST] = {0};
1758 struct qman_fq *inq[DPAA_SEC_BURST];
1761 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1762 DPAA_SEC_BURST : nb_ops;
1763 for (loop = 0; loop < frames_to_send; loop++) {
1765 if (op->sym->m_src->seqn != 0) {
1766 index = op->sym->m_src->seqn - 1;
1767 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1768 /* QM_EQCR_DCA_IDXMASK = 0x0f */
1769 flags[loop] = ((index & 0x0f) << 8);
1770 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1771 DPAA_PER_LCORE_DQRR_SIZE--;
1772 DPAA_PER_LCORE_DQRR_HELD &=
1777 switch (op->sess_type) {
1778 case RTE_CRYPTO_OP_WITH_SESSION:
1779 ses = (dpaa_sec_session *)
1780 get_sym_session_private_data(
1782 cryptodev_driver_id);
1784 case RTE_CRYPTO_OP_SECURITY_SESSION:
1785 ses = (dpaa_sec_session *)
1786 get_sec_session_private_data(
1787 op->sym->sec_session);
1791 "sessionless crypto op not supported");
1792 frames_to_send = loop;
1796 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1797 if (dpaa_sec_attach_sess_q(qp, ses)) {
1798 frames_to_send = loop;
1802 } else if (unlikely(ses->qp[rte_lcore_id() %
1803 MAX_DPAA_CORES] != qp)) {
1804 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1806 ses->qp[rte_lcore_id() %
1807 MAX_DPAA_CORES], qp);
1808 frames_to_send = loop;
1813 auth_only_len = op->sym->auth.data.length -
1814 op->sym->cipher.data.length;
1815 if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1816 ((op->sym->m_dst == NULL) ||
1817 rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1818 if (is_proto_ipsec(ses)) {
1819 cf = build_proto(op, ses);
1820 } else if (is_proto_pdcp(ses)) {
1821 cf = build_proto(op, ses);
1822 } else if (is_auth_only(ses)) {
1823 cf = build_auth_only(op, ses);
1824 } else if (is_cipher_only(ses)) {
1825 cf = build_cipher_only(op, ses);
1826 } else if (is_aead(ses)) {
1827 cf = build_cipher_auth_gcm(op, ses);
1828 auth_only_len = ses->auth_only_len;
1829 } else if (is_auth_cipher(ses)) {
1830 cf = build_cipher_auth(op, ses);
1832 DPAA_SEC_DP_ERR("not supported ops");
1833 frames_to_send = loop;
1838 if (is_proto_pdcp(ses) || is_proto_ipsec(ses)) {
1839 cf = build_proto_sg(op, ses);
1840 } else if (is_auth_only(ses)) {
1841 cf = build_auth_only_sg(op, ses);
1842 } else if (is_cipher_only(ses)) {
1843 cf = build_cipher_only_sg(op, ses);
1844 } else if (is_aead(ses)) {
1845 cf = build_cipher_auth_gcm_sg(op, ses);
1846 auth_only_len = ses->auth_only_len;
1847 } else if (is_auth_cipher(ses)) {
1848 cf = build_cipher_auth_sg(op, ses);
1850 DPAA_SEC_DP_ERR("not supported ops");
1851 frames_to_send = loop;
1856 if (unlikely(!cf)) {
1857 frames_to_send = loop;
1863 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1864 fd->opaque_addr = 0;
1866 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1867 fd->_format1 = qm_fd_compound;
1868 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1869 /* Auth_only_len is set as 0 in descriptor and it is
1870 * overwritten here in the fd.cmd which will update
1874 fd->cmd = 0x80000000 | auth_only_len;
1876 /* In case of PDCP, per packet HFN is stored in
1877 * mbuf priv after sym_op.
1879 if (is_proto_pdcp(ses) && ses->pdcp.hfn_ovd) {
1880 fd->cmd = 0x80000000 |
1881 *((uint32_t *)((uint8_t *)op +
1882 ses->pdcp.hfn_ovd_offset));
1883 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u,%u\n",
1884 *((uint32_t *)((uint8_t *)op +
1885 ses->pdcp.hfn_ovd_offset)),
1887 is_proto_pdcp(ses));
1893 while (loop < frames_to_send) {
1894 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1895 &flags[loop], frames_to_send - loop);
1897 nb_ops -= frames_to_send;
1898 num_tx += frames_to_send;
1901 dpaa_qp->tx_pkts += num_tx;
1902 dpaa_qp->tx_errs += nb_ops - num_tx;
1908 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1912 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1914 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1916 dpaa_qp->rx_pkts += num_rx;
1917 dpaa_qp->rx_errs += nb_ops - num_rx;
1919 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1924 /** Release queue pair */
1926 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1929 struct dpaa_sec_dev_private *internals;
1930 struct dpaa_sec_qp *qp = NULL;
1932 PMD_INIT_FUNC_TRACE();
1934 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1936 internals = dev->data->dev_private;
1937 if (qp_id >= internals->max_nb_queue_pairs) {
1938 DPAA_SEC_ERR("Max supported qpid %d",
1939 internals->max_nb_queue_pairs);
1943 qp = &internals->qps[qp_id];
1944 rte_mempool_free(qp->ctx_pool);
1945 qp->internals = NULL;
1946 dev->data->queue_pairs[qp_id] = NULL;
1951 /** Setup a queue pair */
1953 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1954 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1955 __rte_unused int socket_id)
1957 struct dpaa_sec_dev_private *internals;
1958 struct dpaa_sec_qp *qp = NULL;
1961 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1963 internals = dev->data->dev_private;
1964 if (qp_id >= internals->max_nb_queue_pairs) {
1965 DPAA_SEC_ERR("Max supported qpid %d",
1966 internals->max_nb_queue_pairs);
1970 qp = &internals->qps[qp_id];
1971 qp->internals = internals;
1972 snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1973 dev->data->dev_id, qp_id);
1974 if (!qp->ctx_pool) {
1975 qp->ctx_pool = rte_mempool_create((const char *)str,
1978 CTX_POOL_CACHE_SIZE, 0,
1979 NULL, NULL, NULL, NULL,
1981 if (!qp->ctx_pool) {
1982 DPAA_SEC_ERR("%s create failed\n", str);
1986 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1987 dev->data->dev_id, qp_id);
1988 dev->data->queue_pairs[qp_id] = qp;
1993 /** Return the number of allocated queue pairs */
1995 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1997 PMD_INIT_FUNC_TRACE();
1999 return dev->data->nb_queue_pairs;
2002 /** Returns the size of session structure */
2004 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2006 PMD_INIT_FUNC_TRACE();
2008 return sizeof(dpaa_sec_session);
2012 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2013 struct rte_crypto_sym_xform *xform,
2014 dpaa_sec_session *session)
2016 session->cipher_alg = xform->cipher.algo;
2017 session->iv.length = xform->cipher.iv.length;
2018 session->iv.offset = xform->cipher.iv.offset;
2019 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2020 RTE_CACHE_LINE_SIZE);
2021 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2022 DPAA_SEC_ERR("No Memory for cipher key");
2025 session->cipher_key.length = xform->cipher.key.length;
2027 memcpy(session->cipher_key.data, xform->cipher.key.data,
2028 xform->cipher.key.length);
2029 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2036 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2037 struct rte_crypto_sym_xform *xform,
2038 dpaa_sec_session *session)
2040 session->auth_alg = xform->auth.algo;
2041 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2042 RTE_CACHE_LINE_SIZE);
2043 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2044 DPAA_SEC_ERR("No Memory for auth key");
2047 session->auth_key.length = xform->auth.key.length;
2048 session->digest_length = xform->auth.digest_length;
2050 memcpy(session->auth_key.data, xform->auth.key.data,
2051 xform->auth.key.length);
2052 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2059 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2060 struct rte_crypto_sym_xform *xform,
2061 dpaa_sec_session *session)
2063 session->aead_alg = xform->aead.algo;
2064 session->iv.length = xform->aead.iv.length;
2065 session->iv.offset = xform->aead.iv.offset;
2066 session->auth_only_len = xform->aead.aad_length;
2067 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2068 RTE_CACHE_LINE_SIZE);
2069 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2070 DPAA_SEC_ERR("No Memory for aead key\n");
2073 session->aead_key.length = xform->aead.key.length;
2074 session->digest_length = xform->aead.digest_length;
2076 memcpy(session->aead_key.data, xform->aead.key.data,
2077 xform->aead.key.length);
2078 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2084 static struct qman_fq *
2085 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2089 for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
2090 if (qi->inq_attach[i] == 0) {
2091 qi->inq_attach[i] = 1;
2095 DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2101 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2105 for (i = 0; i < qi->max_nb_sessions; i++) {
2106 if (&qi->inq[i] == fq) {
2107 qman_retire_fq(fq, NULL);
2109 qi->inq_attach[i] = 0;
2117 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2121 sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2122 ret = dpaa_sec_prep_cdb(sess);
2124 DPAA_SEC_ERR("Unable to prepare sec cdb");
2127 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2128 ret = rte_dpaa_portal_init((void *)0);
2130 DPAA_SEC_ERR("Failure in affining portal");
2134 ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2135 dpaa_mem_vtop(&sess->cdb),
2136 qman_fq_fqid(&qp->outq));
2138 DPAA_SEC_ERR("Unable to init sec queue");
2144 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2145 struct rte_crypto_sym_xform *xform, void *sess)
2147 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2148 dpaa_sec_session *session = sess;
2151 PMD_INIT_FUNC_TRACE();
2153 if (unlikely(sess == NULL)) {
2154 DPAA_SEC_ERR("invalid session struct");
2157 memset(session, 0, sizeof(dpaa_sec_session));
2159 /* Default IV length = 0 */
2160 session->iv.length = 0;
2163 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2164 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2165 dpaa_sec_cipher_init(dev, xform, session);
2167 /* Authentication Only */
2168 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2169 xform->next == NULL) {
2170 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2171 dpaa_sec_auth_init(dev, xform, session);
2173 /* Cipher then Authenticate */
2174 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2175 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2176 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2177 dpaa_sec_cipher_init(dev, xform, session);
2178 dpaa_sec_auth_init(dev, xform->next, session);
2180 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2184 /* Authenticate then Cipher */
2185 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2186 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2187 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2188 dpaa_sec_auth_init(dev, xform, session);
2189 dpaa_sec_cipher_init(dev, xform->next, session);
2191 DPAA_SEC_ERR("Not supported: Auth then Cipher");
2195 /* AEAD operation for AES-GCM kind of Algorithms */
2196 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2197 xform->next == NULL) {
2198 dpaa_sec_aead_init(dev, xform, session);
2201 DPAA_SEC_ERR("Invalid crypto type");
2204 rte_spinlock_lock(&internals->lock);
2205 for (i = 0; i < MAX_DPAA_CORES; i++) {
2206 session->inq[i] = dpaa_sec_attach_rxq(internals);
2207 if (session->inq[i] == NULL) {
2208 DPAA_SEC_ERR("unable to attach sec queue");
2209 rte_spinlock_unlock(&internals->lock);
2213 rte_spinlock_unlock(&internals->lock);
2218 rte_free(session->cipher_key.data);
2219 rte_free(session->auth_key.data);
2220 memset(session, 0, sizeof(dpaa_sec_session));
2226 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2227 struct rte_crypto_sym_xform *xform,
2228 struct rte_cryptodev_sym_session *sess,
2229 struct rte_mempool *mempool)
2231 void *sess_private_data;
2234 PMD_INIT_FUNC_TRACE();
2236 if (rte_mempool_get(mempool, &sess_private_data)) {
2237 DPAA_SEC_ERR("Couldn't get object from session mempool");
2241 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2243 DPAA_SEC_ERR("failed to configure session parameters");
2245 /* Return session to mempool */
2246 rte_mempool_put(mempool, sess_private_data);
2250 set_sym_session_private_data(sess, dev->driver_id,
2258 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2260 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2261 struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2264 for (i = 0; i < MAX_DPAA_CORES; i++) {
2266 dpaa_sec_detach_rxq(qi, s->inq[i]);
2270 rte_free(s->cipher_key.data);
2271 rte_free(s->auth_key.data);
2272 memset(s, 0, sizeof(dpaa_sec_session));
2273 rte_mempool_put(sess_mp, (void *)s);
2276 /** Clear the memory of session so it doesn't leave key material behind */
2278 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2279 struct rte_cryptodev_sym_session *sess)
2281 PMD_INIT_FUNC_TRACE();
2282 uint8_t index = dev->driver_id;
2283 void *sess_priv = get_sym_session_private_data(sess, index);
2284 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2287 free_session_memory(dev, s);
2288 set_sym_session_private_data(sess, index, NULL);
2293 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2294 struct rte_security_session_conf *conf,
2297 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2298 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2299 struct rte_crypto_auth_xform *auth_xform = NULL;
2300 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2301 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2304 PMD_INIT_FUNC_TRACE();
2306 memset(session, 0, sizeof(dpaa_sec_session));
2307 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2308 cipher_xform = &conf->crypto_xform->cipher;
2309 if (conf->crypto_xform->next)
2310 auth_xform = &conf->crypto_xform->next->auth;
2312 auth_xform = &conf->crypto_xform->auth;
2313 if (conf->crypto_xform->next)
2314 cipher_xform = &conf->crypto_xform->next->cipher;
2316 session->proto_alg = conf->protocol;
2318 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
2319 session->cipher_key.data = rte_zmalloc(NULL,
2320 cipher_xform->key.length,
2321 RTE_CACHE_LINE_SIZE);
2322 if (session->cipher_key.data == NULL &&
2323 cipher_xform->key.length > 0) {
2324 DPAA_SEC_ERR("No Memory for cipher key");
2327 memcpy(session->cipher_key.data, cipher_xform->key.data,
2328 cipher_xform->key.length);
2329 session->cipher_key.length = cipher_xform->key.length;
2331 switch (cipher_xform->algo) {
2332 case RTE_CRYPTO_CIPHER_AES_CBC:
2333 case RTE_CRYPTO_CIPHER_3DES_CBC:
2334 case RTE_CRYPTO_CIPHER_AES_CTR:
2337 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2338 cipher_xform->algo);
2341 session->cipher_alg = cipher_xform->algo;
2343 session->cipher_key.data = NULL;
2344 session->cipher_key.length = 0;
2345 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2348 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2349 session->auth_key.data = rte_zmalloc(NULL,
2350 auth_xform->key.length,
2351 RTE_CACHE_LINE_SIZE);
2352 if (session->auth_key.data == NULL &&
2353 auth_xform->key.length > 0) {
2354 DPAA_SEC_ERR("No Memory for auth key");
2355 rte_free(session->cipher_key.data);
2358 memcpy(session->auth_key.data, auth_xform->key.data,
2359 auth_xform->key.length);
2360 session->auth_key.length = auth_xform->key.length;
2362 switch (auth_xform->algo) {
2363 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2364 case RTE_CRYPTO_AUTH_MD5_HMAC:
2365 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2366 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2367 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2368 case RTE_CRYPTO_AUTH_AES_CMAC:
2371 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2375 session->auth_alg = auth_xform->algo;
2377 session->auth_key.data = NULL;
2378 session->auth_key.length = 0;
2379 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2382 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2383 if (ipsec_xform->tunnel.type ==
2384 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2385 memset(&session->encap_pdb, 0,
2386 sizeof(struct ipsec_encap_pdb) +
2387 sizeof(session->ip4_hdr));
2388 session->ip4_hdr.ip_v = IPVERSION;
2389 session->ip4_hdr.ip_hl = 5;
2390 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2391 sizeof(session->ip4_hdr));
2392 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2393 session->ip4_hdr.ip_id = 0;
2394 session->ip4_hdr.ip_off = 0;
2395 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2396 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2397 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2398 IPPROTO_ESP : IPPROTO_AH;
2399 session->ip4_hdr.ip_sum = 0;
2400 session->ip4_hdr.ip_src =
2401 ipsec_xform->tunnel.ipv4.src_ip;
2402 session->ip4_hdr.ip_dst =
2403 ipsec_xform->tunnel.ipv4.dst_ip;
2404 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2405 (void *)&session->ip4_hdr,
2407 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2408 } else if (ipsec_xform->tunnel.type ==
2409 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2410 memset(&session->encap_pdb, 0,
2411 sizeof(struct ipsec_encap_pdb) +
2412 sizeof(session->ip6_hdr));
2413 session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2414 DPAA_IPv6_DEFAULT_VTC_FLOW |
2415 ((ipsec_xform->tunnel.ipv6.dscp <<
2416 RTE_IPV6_HDR_TC_SHIFT) &
2417 RTE_IPV6_HDR_TC_MASK) |
2418 ((ipsec_xform->tunnel.ipv6.flabel <<
2419 RTE_IPV6_HDR_FL_SHIFT) &
2420 RTE_IPV6_HDR_FL_MASK));
2421 /* Payload length will be updated by HW */
2422 session->ip6_hdr.payload_len = 0;
2423 session->ip6_hdr.hop_limits =
2424 ipsec_xform->tunnel.ipv6.hlimit;
2425 session->ip6_hdr.proto = (ipsec_xform->proto ==
2426 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2427 IPPROTO_ESP : IPPROTO_AH;
2428 memcpy(&session->ip6_hdr.src_addr,
2429 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2430 memcpy(&session->ip6_hdr.dst_addr,
2431 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2432 session->encap_pdb.ip_hdr_len =
2433 sizeof(struct rte_ipv6_hdr);
2435 session->encap_pdb.options =
2436 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2437 PDBOPTS_ESP_OIHI_PDB_INL |
2439 PDBHMO_ESP_ENCAP_DTTL |
2441 if (ipsec_xform->options.esn)
2442 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2443 session->encap_pdb.spi = ipsec_xform->spi;
2444 session->dir = DIR_ENC;
2445 } else if (ipsec_xform->direction ==
2446 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2447 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2448 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2449 session->decap_pdb.options = sizeof(struct ip) << 16;
2451 session->decap_pdb.options =
2452 sizeof(struct rte_ipv6_hdr) << 16;
2453 if (ipsec_xform->options.esn)
2454 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2455 session->dir = DIR_DEC;
2458 rte_spinlock_lock(&internals->lock);
2459 for (i = 0; i < MAX_DPAA_CORES; i++) {
2460 session->inq[i] = dpaa_sec_attach_rxq(internals);
2461 if (session->inq[i] == NULL) {
2462 DPAA_SEC_ERR("unable to attach sec queue");
2463 rte_spinlock_unlock(&internals->lock);
2467 rte_spinlock_unlock(&internals->lock);
2471 rte_free(session->auth_key.data);
2472 rte_free(session->cipher_key.data);
2473 memset(session, 0, sizeof(dpaa_sec_session));
2478 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2479 struct rte_security_session_conf *conf,
2482 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2483 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2484 struct rte_crypto_auth_xform *auth_xform = NULL;
2485 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2486 dpaa_sec_session *session = (dpaa_sec_session *)sess;
2487 struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2490 PMD_INIT_FUNC_TRACE();
2492 memset(session, 0, sizeof(dpaa_sec_session));
2494 /* find xfrm types */
2495 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2496 cipher_xform = &xform->cipher;
2497 if (xform->next != NULL)
2498 auth_xform = &xform->next->auth;
2499 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2500 auth_xform = &xform->auth;
2501 if (xform->next != NULL)
2502 cipher_xform = &xform->next->cipher;
2504 DPAA_SEC_ERR("Invalid crypto type");
2508 session->proto_alg = conf->protocol;
2510 session->cipher_key.data = rte_zmalloc(NULL,
2511 cipher_xform->key.length,
2512 RTE_CACHE_LINE_SIZE);
2513 if (session->cipher_key.data == NULL &&
2514 cipher_xform->key.length > 0) {
2515 DPAA_SEC_ERR("No Memory for cipher key");
2518 session->cipher_key.length = cipher_xform->key.length;
2519 memcpy(session->cipher_key.data, cipher_xform->key.data,
2520 cipher_xform->key.length);
2521 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2523 session->cipher_alg = cipher_xform->algo;
2525 session->cipher_key.data = NULL;
2526 session->cipher_key.length = 0;
2527 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2528 session->dir = DIR_ENC;
2531 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2532 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2533 pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2535 "PDCP Seq Num size should be 5/12 bits for cmode");
2541 session->auth_key.data = rte_zmalloc(NULL,
2542 auth_xform->key.length,
2543 RTE_CACHE_LINE_SIZE);
2544 if (!session->auth_key.data &&
2545 auth_xform->key.length > 0) {
2546 DPAA_SEC_ERR("No Memory for auth key");
2547 rte_free(session->cipher_key.data);
2550 session->auth_key.length = auth_xform->key.length;
2551 memcpy(session->auth_key.data, auth_xform->key.data,
2552 auth_xform->key.length);
2553 session->auth_alg = auth_xform->algo;
2555 session->auth_key.data = NULL;
2556 session->auth_key.length = 0;
2557 session->auth_alg = 0;
2559 session->pdcp.domain = pdcp_xform->domain;
2560 session->pdcp.bearer = pdcp_xform->bearer;
2561 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2562 session->pdcp.sn_size = pdcp_xform->sn_size;
2563 session->pdcp.hfn = pdcp_xform->hfn;
2564 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2565 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2566 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2568 rte_spinlock_lock(&dev_priv->lock);
2569 for (i = 0; i < MAX_DPAA_CORES; i++) {
2570 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2571 if (session->inq[i] == NULL) {
2572 DPAA_SEC_ERR("unable to attach sec queue");
2573 rte_spinlock_unlock(&dev_priv->lock);
2577 rte_spinlock_unlock(&dev_priv->lock);
2580 rte_free(session->auth_key.data);
2581 rte_free(session->cipher_key.data);
2582 memset(session, 0, sizeof(dpaa_sec_session));
2587 dpaa_sec_security_session_create(void *dev,
2588 struct rte_security_session_conf *conf,
2589 struct rte_security_session *sess,
2590 struct rte_mempool *mempool)
2592 void *sess_private_data;
2593 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2596 if (rte_mempool_get(mempool, &sess_private_data)) {
2597 DPAA_SEC_ERR("Couldn't get object from session mempool");
2601 switch (conf->protocol) {
2602 case RTE_SECURITY_PROTOCOL_IPSEC:
2603 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2606 case RTE_SECURITY_PROTOCOL_PDCP:
2607 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2610 case RTE_SECURITY_PROTOCOL_MACSEC:
2616 DPAA_SEC_ERR("failed to configure session parameters");
2617 /* Return session to mempool */
2618 rte_mempool_put(mempool, sess_private_data);
2622 set_sec_session_private_data(sess, sess_private_data);
2627 /** Clear the memory of session so it doesn't leave key material behind */
2629 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2630 struct rte_security_session *sess)
2632 PMD_INIT_FUNC_TRACE();
2633 void *sess_priv = get_sec_session_private_data(sess);
2634 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2637 free_session_memory((struct rte_cryptodev *)dev, s);
2638 set_sec_session_private_data(sess, NULL);
2644 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2645 struct rte_cryptodev_config *config __rte_unused)
2647 PMD_INIT_FUNC_TRACE();
2653 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2655 PMD_INIT_FUNC_TRACE();
2660 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2662 PMD_INIT_FUNC_TRACE();
2666 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2668 PMD_INIT_FUNC_TRACE();
2677 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2678 struct rte_cryptodev_info *info)
2680 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2682 PMD_INIT_FUNC_TRACE();
2684 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2685 info->feature_flags = dev->feature_flags;
2686 info->capabilities = dpaa_sec_capabilities;
2687 info->sym.max_nb_sessions = internals->max_nb_sessions;
2688 info->driver_id = cryptodev_driver_id;
2692 static enum qman_cb_dqrr_result
2693 dpaa_sec_process_parallel_event(void *event,
2694 struct qman_portal *qm __always_unused,
2695 struct qman_fq *outq,
2696 const struct qm_dqrr_entry *dqrr,
2699 const struct qm_fd *fd;
2700 struct dpaa_sec_job *job;
2701 struct dpaa_sec_op_ctx *ctx;
2702 struct rte_event *ev = (struct rte_event *)event;
2706 /* sg is embedded in an op ctx,
2707 * sg[0] is for output
2710 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2712 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2713 ctx->fd_status = fd->status;
2714 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2715 struct qm_sg_entry *sg_out;
2718 sg_out = &job->sg[0];
2719 hw_sg_to_cpu(sg_out);
2720 len = sg_out->length;
2721 ctx->op->sym->m_src->pkt_len = len;
2722 ctx->op->sym->m_src->data_len = len;
2724 if (!ctx->fd_status) {
2725 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2727 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2728 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2730 ev->event_ptr = (void *)ctx->op;
2732 ev->flow_id = outq->ev.flow_id;
2733 ev->sub_event_type = outq->ev.sub_event_type;
2734 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2735 ev->op = RTE_EVENT_OP_NEW;
2736 ev->sched_type = outq->ev.sched_type;
2737 ev->queue_id = outq->ev.queue_id;
2738 ev->priority = outq->ev.priority;
2739 *bufs = (void *)ctx->op;
2741 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2743 return qman_cb_dqrr_consume;
2746 static enum qman_cb_dqrr_result
2747 dpaa_sec_process_atomic_event(void *event,
2748 struct qman_portal *qm __rte_unused,
2749 struct qman_fq *outq,
2750 const struct qm_dqrr_entry *dqrr,
2754 const struct qm_fd *fd;
2755 struct dpaa_sec_job *job;
2756 struct dpaa_sec_op_ctx *ctx;
2757 struct rte_event *ev = (struct rte_event *)event;
2761 /* sg is embedded in an op ctx,
2762 * sg[0] is for output
2765 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
2767 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
2768 ctx->fd_status = fd->status;
2769 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
2770 struct qm_sg_entry *sg_out;
2773 sg_out = &job->sg[0];
2774 hw_sg_to_cpu(sg_out);
2775 len = sg_out->length;
2776 ctx->op->sym->m_src->pkt_len = len;
2777 ctx->op->sym->m_src->data_len = len;
2779 if (!ctx->fd_status) {
2780 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2782 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
2783 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
2785 ev->event_ptr = (void *)ctx->op;
2786 ev->flow_id = outq->ev.flow_id;
2787 ev->sub_event_type = outq->ev.sub_event_type;
2788 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
2789 ev->op = RTE_EVENT_OP_NEW;
2790 ev->sched_type = outq->ev.sched_type;
2791 ev->queue_id = outq->ev.queue_id;
2792 ev->priority = outq->ev.priority;
2794 /* Save active dqrr entries */
2795 index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
2796 DPAA_PER_LCORE_DQRR_SIZE++;
2797 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
2798 DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
2799 ev->impl_opaque = index + 1;
2800 ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
2801 *bufs = (void *)ctx->op;
2803 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
2805 return qman_cb_dqrr_defer;
2809 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
2812 const struct rte_event *event)
2814 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2815 struct qm_mcc_initfq opts = {0};
2819 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2820 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2821 opts.fqd.dest.channel = ch_id;
2823 switch (event->sched_type) {
2824 case RTE_SCHED_TYPE_ATOMIC:
2825 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
2826 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
2827 * configuration with HOLD_ACTIVE setting
2829 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
2830 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
2832 case RTE_SCHED_TYPE_ORDERED:
2833 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
2836 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
2837 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
2841 ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
2842 if (unlikely(ret)) {
2843 DPAA_SEC_ERR("unable to init caam source fq!");
2847 memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
2853 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
2856 struct qm_mcc_initfq opts = {0};
2858 struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
2860 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
2861 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
2862 qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
2863 qp->outq.cb.ern = ern_sec_fq_handler;
2864 qman_retire_fq(&qp->outq, NULL);
2865 qman_oos_fq(&qp->outq);
2866 ret = qman_init_fq(&qp->outq, 0, &opts);
2868 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
2869 qp->outq.cb.dqrr = NULL;
2874 static struct rte_cryptodev_ops crypto_ops = {
2875 .dev_configure = dpaa_sec_dev_configure,
2876 .dev_start = dpaa_sec_dev_start,
2877 .dev_stop = dpaa_sec_dev_stop,
2878 .dev_close = dpaa_sec_dev_close,
2879 .dev_infos_get = dpaa_sec_dev_infos_get,
2880 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2881 .queue_pair_release = dpaa_sec_queue_pair_release,
2882 .queue_pair_count = dpaa_sec_queue_pair_count,
2883 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2884 .sym_session_configure = dpaa_sec_sym_session_configure,
2885 .sym_session_clear = dpaa_sec_sym_session_clear
2888 static const struct rte_security_capability *
2889 dpaa_sec_capabilities_get(void *device __rte_unused)
2891 return dpaa_sec_security_cap;
2894 static const struct rte_security_ops dpaa_sec_security_ops = {
2895 .session_create = dpaa_sec_security_session_create,
2896 .session_update = NULL,
2897 .session_stats_get = NULL,
2898 .session_destroy = dpaa_sec_security_session_destroy,
2899 .set_pkt_metadata = NULL,
2900 .capabilities_get = dpaa_sec_capabilities_get
2904 dpaa_sec_uninit(struct rte_cryptodev *dev)
2906 struct dpaa_sec_dev_private *internals;
2911 internals = dev->data->dev_private;
2912 rte_free(dev->security_ctx);
2914 rte_free(internals);
2916 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2917 dev->data->name, rte_socket_id());
2923 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2925 struct dpaa_sec_dev_private *internals;
2926 struct rte_security_ctx *security_instance;
2927 struct dpaa_sec_qp *qp;
2931 PMD_INIT_FUNC_TRACE();
2933 cryptodev->driver_id = cryptodev_driver_id;
2934 cryptodev->dev_ops = &crypto_ops;
2936 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2937 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2938 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2939 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2940 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2941 RTE_CRYPTODEV_FF_SECURITY |
2942 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2943 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2944 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2945 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2946 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2948 internals = cryptodev->data->dev_private;
2949 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2950 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2953 * For secondary processes, we don't initialise any further as primary
2954 * has already done this work. Only check we don't need a different
2957 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2958 DPAA_SEC_WARN("Device already init by primary process");
2962 /* Initialize security_ctx only for primary process*/
2963 security_instance = rte_malloc("rte_security_instances_ops",
2964 sizeof(struct rte_security_ctx), 0);
2965 if (security_instance == NULL)
2967 security_instance->device = (void *)cryptodev;
2968 security_instance->ops = &dpaa_sec_security_ops;
2969 security_instance->sess_cnt = 0;
2970 cryptodev->security_ctx = security_instance;
2972 rte_spinlock_init(&internals->lock);
2973 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2974 /* init qman fq for queue pair */
2975 qp = &internals->qps[i];
2976 ret = dpaa_sec_init_tx(&qp->outq);
2978 DPAA_SEC_ERR("config tx of queue pair %d", i);
2983 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2984 QMAN_FQ_FLAG_TO_DCPORTAL;
2985 for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
2986 /* create rx qman fq for sessions*/
2987 ret = qman_create_fq(0, flags, &internals->inq[i]);
2988 if (unlikely(ret != 0)) {
2989 DPAA_SEC_ERR("sec qman_create_fq failed");
2994 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2998 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3000 dpaa_sec_uninit(cryptodev);
3005 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3006 struct rte_dpaa_device *dpaa_dev)
3008 struct rte_cryptodev *cryptodev;
3009 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3013 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d",
3014 dpaa_dev->id.dev_id);
3016 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3017 if (cryptodev == NULL)
3020 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3021 cryptodev->data->dev_private = rte_zmalloc_socket(
3022 "cryptodev private structure",
3023 sizeof(struct dpaa_sec_dev_private),
3024 RTE_CACHE_LINE_SIZE,
3027 if (cryptodev->data->dev_private == NULL)
3028 rte_panic("Cannot allocate memzone for private "
3032 dpaa_dev->crypto_dev = cryptodev;
3033 cryptodev->device = &dpaa_dev->device;
3035 /* init user callbacks */
3036 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3038 /* if sec device version is not configured */
3039 if (!rta_get_sec_era()) {
3040 const struct device_node *caam_node;
3042 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3043 const uint32_t *prop = of_get_property(caam_node,
3048 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3054 /* Invoke PMD device initialization function */
3055 retval = dpaa_sec_dev_init(cryptodev);
3059 /* In case of error, cleanup is done */
3060 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3061 rte_free(cryptodev->data->dev_private);
3063 rte_cryptodev_pmd_release_device(cryptodev);
3069 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3071 struct rte_cryptodev *cryptodev;
3074 cryptodev = dpaa_dev->crypto_dev;
3075 if (cryptodev == NULL)
3078 ret = dpaa_sec_uninit(cryptodev);
3082 return rte_cryptodev_pmd_destroy(cryptodev);
3085 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3086 .drv_type = FSL_DPAA_CRYPTO,
3088 .name = "DPAA SEC PMD"
3090 .probe = cryptodev_dpaa_sec_probe,
3091 .remove = cryptodev_dpaa_sec_remove,
3094 static struct cryptodev_driver dpaa_sec_crypto_drv;
3096 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3097 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3098 cryptodev_driver_id);
3100 RTE_INIT(dpaa_sec_init_log)
3102 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
3103 if (dpaa_logtype_sec >= 0)
3104 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);