1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
44 static uint8_t cryptodev_driver_id;
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 if (!ctx->fd_status) {
56 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62 /* report op status to sym->op and then free the ctx memeory */
63 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
69 struct dpaa_sec_op_ctx *ctx;
72 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
74 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
78 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 * each packet, memset is costlier than dcbz_64().
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
88 ctx->ctx_pool = ses->ctx_pool;
89 ctx->vtop_offset = (size_t) ctx
90 - rte_mempool_virt2iova(ctx);
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
98 const struct rte_memseg *ms;
100 ms = rte_mem_virt2memseg(vaddr, NULL);
102 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
106 /* virtual address conversin when mempool support is available for ctx */
107 static inline phys_addr_t
108 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
110 return (size_t)vaddr - ctx->vtop_offset;
114 dpaa_mem_ptov(rte_iova_t paddr)
116 return rte_mem_iova2virt(paddr);
120 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
122 const struct qm_mr_entry *msg)
124 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
125 fq->fqid, msg->ern.rc, msg->ern.seqnum);
128 /* initialize the queue with dest chan as caam chan so that
129 * all the packets in this queue could be dispatched into caam
132 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
135 struct qm_mcc_initfq fq_opts;
139 /* Clear FQ options */
140 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
142 flags = QMAN_INITFQ_FLAG_SCHED;
143 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
144 QM_INITFQ_WE_CONTEXTB;
146 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
147 fq_opts.fqd.context_b = fqid_out;
148 fq_opts.fqd.dest.channel = qm_channel_caam;
149 fq_opts.fqd.dest.wq = 0;
151 fq_in->cb.ern = ern_sec_fq_handler;
153 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
155 ret = qman_init_fq(fq_in, flags, &fq_opts);
156 if (unlikely(ret != 0))
157 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
162 /* something is put into in_fq and caam put the crypto result into out_fq */
163 static enum qman_cb_dqrr_result
164 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
165 struct qman_fq *fq __always_unused,
166 const struct qm_dqrr_entry *dqrr)
168 const struct qm_fd *fd;
169 struct dpaa_sec_job *job;
170 struct dpaa_sec_op_ctx *ctx;
172 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
173 return qman_cb_dqrr_defer;
175 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
176 return qman_cb_dqrr_consume;
179 /* sg is embedded in an op ctx,
180 * sg[0] is for output
183 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
185 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
186 ctx->fd_status = fd->status;
187 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
188 struct qm_sg_entry *sg_out;
191 sg_out = &job->sg[0];
192 hw_sg_to_cpu(sg_out);
193 len = sg_out->length;
194 ctx->op->sym->m_src->pkt_len = len;
195 ctx->op->sym->m_src->data_len = len;
197 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
198 dpaa_sec_op_ending(ctx);
200 return qman_cb_dqrr_consume;
203 /* caam result is put into this queue */
205 dpaa_sec_init_tx(struct qman_fq *fq)
208 struct qm_mcc_initfq opts;
211 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
212 QMAN_FQ_FLAG_DYNAMIC_FQID;
214 ret = qman_create_fq(0, flags, fq);
216 DPAA_SEC_ERR("qman_create_fq failed");
220 memset(&opts, 0, sizeof(opts));
221 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
222 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
224 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
226 fq->cb.dqrr = dqrr_out_fq_cb_rx;
227 fq->cb.ern = ern_sec_fq_handler;
229 ret = qman_init_fq(fq, 0, &opts);
231 DPAA_SEC_ERR("unable to init caam source fq!");
238 static inline int is_cipher_only(dpaa_sec_session *ses)
240 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
241 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
244 static inline int is_auth_only(dpaa_sec_session *ses)
246 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
247 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
250 static inline int is_aead(dpaa_sec_session *ses)
252 return ((ses->cipher_alg == 0) &&
253 (ses->auth_alg == 0) &&
254 (ses->aead_alg != 0));
257 static inline int is_auth_cipher(dpaa_sec_session *ses)
259 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
260 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
261 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
264 static inline int is_proto_ipsec(dpaa_sec_session *ses)
266 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
269 static inline int is_encode(dpaa_sec_session *ses)
271 return ses->dir == DIR_ENC;
274 static inline int is_decode(dpaa_sec_session *ses)
276 return ses->dir == DIR_DEC;
280 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
282 switch (ses->auth_alg) {
283 case RTE_CRYPTO_AUTH_NULL:
284 ses->digest_length = 0;
286 case RTE_CRYPTO_AUTH_MD5_HMAC:
288 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
289 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
290 alginfo_a->algmode = OP_ALG_AAI_HMAC;
292 case RTE_CRYPTO_AUTH_SHA1_HMAC:
294 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
295 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
296 alginfo_a->algmode = OP_ALG_AAI_HMAC;
298 case RTE_CRYPTO_AUTH_SHA224_HMAC:
300 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
302 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304 case RTE_CRYPTO_AUTH_SHA256_HMAC:
306 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
308 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310 case RTE_CRYPTO_AUTH_SHA384_HMAC:
312 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
314 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316 case RTE_CRYPTO_AUTH_SHA512_HMAC:
318 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
320 alginfo_a->algmode = OP_ALG_AAI_HMAC;
323 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
328 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
330 switch (ses->cipher_alg) {
331 case RTE_CRYPTO_CIPHER_NULL:
333 case RTE_CRYPTO_CIPHER_AES_CBC:
335 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
337 alginfo_c->algmode = OP_ALG_AAI_CBC;
339 case RTE_CRYPTO_CIPHER_3DES_CBC:
341 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
343 alginfo_c->algmode = OP_ALG_AAI_CBC;
345 case RTE_CRYPTO_CIPHER_AES_CTR:
347 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
349 alginfo_c->algmode = OP_ALG_AAI_CTR;
352 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
357 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
359 switch (ses->aead_alg) {
360 case RTE_CRYPTO_AEAD_AES_GCM:
361 alginfo->algtype = OP_ALG_ALGSEL_AES;
362 alginfo->algmode = OP_ALG_AAI_GCM;
365 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
370 /* prepare command block of the session */
372 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
374 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
375 int32_t shared_desc_len = 0;
376 struct sec_cdb *cdb = &ses->cdb;
378 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
384 memset(cdb, 0, sizeof(struct sec_cdb));
386 if (is_cipher_only(ses)) {
387 caam_cipher_alg(ses, &alginfo_c);
388 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
389 DPAA_SEC_ERR("not supported cipher alg");
393 alginfo_c.key = (size_t)ses->cipher_key.data;
394 alginfo_c.keylen = ses->cipher_key.length;
395 alginfo_c.key_enc_flags = 0;
396 alginfo_c.key_type = RTA_DATA_IMM;
398 shared_desc_len = cnstr_shdsc_blkcipher(
404 } else if (is_auth_only(ses)) {
405 caam_auth_alg(ses, &alginfo_a);
406 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
407 DPAA_SEC_ERR("not supported auth alg");
411 alginfo_a.key = (size_t)ses->auth_key.data;
412 alginfo_a.keylen = ses->auth_key.length;
413 alginfo_a.key_enc_flags = 0;
414 alginfo_a.key_type = RTA_DATA_IMM;
416 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
420 } else if (is_aead(ses)) {
421 caam_aead_alg(ses, &alginfo);
422 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 DPAA_SEC_ERR("not supported aead alg");
426 alginfo.key = (size_t)ses->aead_key.data;
427 alginfo.keylen = ses->aead_key.length;
428 alginfo.key_enc_flags = 0;
429 alginfo.key_type = RTA_DATA_IMM;
431 if (ses->dir == DIR_ENC)
432 shared_desc_len = cnstr_shdsc_gcm_encap(
433 cdb->sh_desc, true, swap,
438 shared_desc_len = cnstr_shdsc_gcm_decap(
439 cdb->sh_desc, true, swap,
444 caam_cipher_alg(ses, &alginfo_c);
445 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
446 DPAA_SEC_ERR("not supported cipher alg");
450 alginfo_c.key = (size_t)ses->cipher_key.data;
451 alginfo_c.keylen = ses->cipher_key.length;
452 alginfo_c.key_enc_flags = 0;
453 alginfo_c.key_type = RTA_DATA_IMM;
455 caam_auth_alg(ses, &alginfo_a);
456 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
457 DPAA_SEC_ERR("not supported auth alg");
461 alginfo_a.key = (size_t)ses->auth_key.data;
462 alginfo_a.keylen = ses->auth_key.length;
463 alginfo_a.key_enc_flags = 0;
464 alginfo_a.key_type = RTA_DATA_IMM;
466 cdb->sh_desc[0] = alginfo_c.keylen;
467 cdb->sh_desc[1] = alginfo_a.keylen;
468 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
470 (unsigned int *)cdb->sh_desc,
471 &cdb->sh_desc[2], 2);
474 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
477 if (cdb->sh_desc[2] & 1)
478 alginfo_c.key_type = RTA_DATA_IMM;
480 alginfo_c.key = (size_t)dpaa_mem_vtop(
481 (void *)(size_t)alginfo_c.key);
482 alginfo_c.key_type = RTA_DATA_PTR;
484 if (cdb->sh_desc[2] & (1<<1))
485 alginfo_a.key_type = RTA_DATA_IMM;
487 alginfo_a.key = (size_t)dpaa_mem_vtop(
488 (void *)(size_t)alginfo_a.key);
489 alginfo_a.key_type = RTA_DATA_PTR;
494 if (is_proto_ipsec(ses)) {
495 if (ses->dir == DIR_ENC) {
496 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
498 true, swap, &ses->encap_pdb,
499 (uint8_t *)&ses->ip4_hdr,
500 &alginfo_c, &alginfo_a);
501 } else if (ses->dir == DIR_DEC) {
502 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
504 true, swap, &ses->decap_pdb,
505 &alginfo_c, &alginfo_a);
508 /* Auth_only_len is set as 0 here and it will be
509 * overwritten in fd for each packet.
511 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
512 true, swap, &alginfo_c, &alginfo_a,
514 ses->digest_length, ses->dir);
518 if (shared_desc_len < 0) {
519 DPAA_SEC_ERR("error in preparing command block");
520 return shared_desc_len;
523 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
524 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
525 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
530 /* qp is lockless, should be accessed by only one thread */
532 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
535 unsigned int pkts = 0;
537 struct qm_dqrr_entry *dq;
540 ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
541 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
546 const struct qm_fd *fd;
547 struct dpaa_sec_job *job;
548 struct dpaa_sec_op_ctx *ctx;
549 struct rte_crypto_op *op;
551 dq = qman_dequeue(fq);
556 /* sg is embedded in an op ctx,
557 * sg[0] is for output
560 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
562 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
563 ctx->fd_status = fd->status;
565 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
566 struct qm_sg_entry *sg_out;
569 sg_out = &job->sg[0];
570 hw_sg_to_cpu(sg_out);
571 len = sg_out->length;
572 op->sym->m_src->pkt_len = len;
573 op->sym->m_src->data_len = len;
575 if (!ctx->fd_status) {
576 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
578 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
579 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
583 /* report op status to sym->op and then free the ctx memeory */
584 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
586 qman_dqrr_consume(fq, dq);
587 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
592 static inline struct dpaa_sec_job *
593 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
595 struct rte_crypto_sym_op *sym = op->sym;
596 struct rte_mbuf *mbuf = sym->m_src;
597 struct dpaa_sec_job *cf;
598 struct dpaa_sec_op_ctx *ctx;
599 struct qm_sg_entry *sg, *out_sg, *in_sg;
600 phys_addr_t start_addr;
601 uint8_t *old_digest, extra_segs;
608 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
609 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
613 ctx = dpaa_sec_alloc_ctx(ses);
619 old_digest = ctx->digest;
623 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
624 out_sg->length = ses->digest_length;
625 cpu_to_hw_sg(out_sg);
629 /* need to extend the input to a compound frame */
630 in_sg->extension = 1;
632 in_sg->length = sym->auth.data.length;
633 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
637 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
638 sg->length = mbuf->data_len - sym->auth.data.offset;
639 sg->offset = sym->auth.data.offset;
641 /* Successive segs */
646 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
647 sg->length = mbuf->data_len;
651 if (is_decode(ses)) {
652 /* Digest verification case */
655 rte_memcpy(old_digest, sym->auth.digest.data,
657 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
658 qm_sg_entry_set64(sg, start_addr);
659 sg->length = ses->digest_length;
660 in_sg->length += ses->digest_length;
662 /* Digest calculation case */
663 sg->length -= ses->digest_length;
674 * |<----data_len------->|
675 * |ip_header|ah_header|icv|payload|
680 static inline struct dpaa_sec_job *
681 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
683 struct rte_crypto_sym_op *sym = op->sym;
684 struct rte_mbuf *mbuf = sym->m_src;
685 struct dpaa_sec_job *cf;
686 struct dpaa_sec_op_ctx *ctx;
687 struct qm_sg_entry *sg;
688 rte_iova_t start_addr;
691 ctx = dpaa_sec_alloc_ctx(ses);
697 old_digest = ctx->digest;
699 start_addr = rte_pktmbuf_iova(mbuf);
702 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
703 sg->length = ses->digest_length;
708 if (is_decode(ses)) {
709 /* need to extend the input to a compound frame */
711 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
712 sg->length = sym->auth.data.length + ses->digest_length;
717 /* hash result or digest, save digest first */
718 rte_memcpy(old_digest, sym->auth.digest.data,
720 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
721 sg->length = sym->auth.data.length;
724 /* let's check digest by hw */
725 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
727 qm_sg_entry_set64(sg, start_addr);
728 sg->length = ses->digest_length;
732 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
733 sg->length = sym->auth.data.length;
741 static inline struct dpaa_sec_job *
742 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
744 struct rte_crypto_sym_op *sym = op->sym;
745 struct dpaa_sec_job *cf;
746 struct dpaa_sec_op_ctx *ctx;
747 struct qm_sg_entry *sg, *out_sg, *in_sg;
748 struct rte_mbuf *mbuf;
750 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
755 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
758 req_segs = mbuf->nb_segs * 2 + 3;
761 if (req_segs > MAX_SG_ENTRIES) {
762 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
767 ctx = dpaa_sec_alloc_ctx(ses);
776 out_sg->extension = 1;
777 out_sg->length = sym->cipher.data.length;
778 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
779 cpu_to_hw_sg(out_sg);
783 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
784 sg->length = mbuf->data_len - sym->cipher.data.offset;
785 sg->offset = sym->cipher.data.offset;
787 /* Successive segs */
792 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
793 sg->length = mbuf->data_len;
802 in_sg->extension = 1;
804 in_sg->length = sym->cipher.data.length + ses->iv.length;
807 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
811 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
812 sg->length = ses->iv.length;
817 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
818 sg->length = mbuf->data_len - sym->cipher.data.offset;
819 sg->offset = sym->cipher.data.offset;
821 /* Successive segs */
826 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
827 sg->length = mbuf->data_len;
836 static inline struct dpaa_sec_job *
837 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
839 struct rte_crypto_sym_op *sym = op->sym;
840 struct dpaa_sec_job *cf;
841 struct dpaa_sec_op_ctx *ctx;
842 struct qm_sg_entry *sg;
843 rte_iova_t src_start_addr, dst_start_addr;
844 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
847 ctx = dpaa_sec_alloc_ctx(ses);
854 src_start_addr = rte_pktmbuf_iova(sym->m_src);
857 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
859 dst_start_addr = src_start_addr;
863 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
864 sg->length = sym->cipher.data.length + ses->iv.length;
870 /* need to extend the input to a compound frame */
873 sg->length = sym->cipher.data.length + ses->iv.length;
874 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
878 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
879 sg->length = ses->iv.length;
883 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
884 sg->length = sym->cipher.data.length;
891 static inline struct dpaa_sec_job *
892 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
894 struct rte_crypto_sym_op *sym = op->sym;
895 struct dpaa_sec_job *cf;
896 struct dpaa_sec_op_ctx *ctx;
897 struct qm_sg_entry *sg, *out_sg, *in_sg;
898 struct rte_mbuf *mbuf;
900 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
905 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
908 req_segs = mbuf->nb_segs * 2 + 4;
911 if (ses->auth_only_len)
914 if (req_segs > MAX_SG_ENTRIES) {
915 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
920 ctx = dpaa_sec_alloc_ctx(ses);
927 rte_prefetch0(cf->sg);
931 out_sg->extension = 1;
933 out_sg->length = sym->aead.data.length + ses->auth_only_len
934 + ses->digest_length;
936 out_sg->length = sym->aead.data.length + ses->auth_only_len;
938 /* output sg entries */
940 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
941 cpu_to_hw_sg(out_sg);
944 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
945 sg->length = mbuf->data_len - sym->aead.data.offset +
947 sg->offset = sym->aead.data.offset - ses->auth_only_len;
949 /* Successive segs */
954 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
955 sg->length = mbuf->data_len;
958 sg->length -= ses->digest_length;
960 if (is_encode(ses)) {
962 /* set auth output */
964 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
965 sg->length = ses->digest_length;
973 in_sg->extension = 1;
976 in_sg->length = ses->iv.length + sym->aead.data.length
977 + ses->auth_only_len;
979 in_sg->length = ses->iv.length + sym->aead.data.length
980 + ses->auth_only_len + ses->digest_length;
982 /* input sg entries */
984 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
988 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
989 sg->length = ses->iv.length;
992 /* 2nd seg auth only */
993 if (ses->auth_only_len) {
995 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
996 sg->length = ses->auth_only_len;
1002 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1003 sg->length = mbuf->data_len - sym->aead.data.offset;
1004 sg->offset = sym->aead.data.offset;
1006 /* Successive segs */
1011 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1012 sg->length = mbuf->data_len;
1016 if (is_decode(ses)) {
1019 memcpy(ctx->digest, sym->aead.digest.data,
1020 ses->digest_length);
1021 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1022 sg->length = ses->digest_length;
1030 static inline struct dpaa_sec_job *
1031 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1033 struct rte_crypto_sym_op *sym = op->sym;
1034 struct dpaa_sec_job *cf;
1035 struct dpaa_sec_op_ctx *ctx;
1036 struct qm_sg_entry *sg;
1037 uint32_t length = 0;
1038 rte_iova_t src_start_addr, dst_start_addr;
1039 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1042 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1045 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1047 dst_start_addr = src_start_addr;
1049 ctx = dpaa_sec_alloc_ctx(ses);
1057 rte_prefetch0(cf->sg);
1059 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1060 if (is_encode(ses)) {
1061 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1062 sg->length = ses->iv.length;
1063 length += sg->length;
1067 if (ses->auth_only_len) {
1068 qm_sg_entry_set64(sg,
1069 dpaa_mem_vtop(sym->aead.aad.data));
1070 sg->length = ses->auth_only_len;
1071 length += sg->length;
1075 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1076 sg->length = sym->aead.data.length;
1077 length += sg->length;
1081 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1082 sg->length = ses->iv.length;
1083 length += sg->length;
1087 if (ses->auth_only_len) {
1088 qm_sg_entry_set64(sg,
1089 dpaa_mem_vtop(sym->aead.aad.data));
1090 sg->length = ses->auth_only_len;
1091 length += sg->length;
1095 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1096 sg->length = sym->aead.data.length;
1097 length += sg->length;
1100 memcpy(ctx->digest, sym->aead.digest.data,
1101 ses->digest_length);
1104 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1105 sg->length = ses->digest_length;
1106 length += sg->length;
1110 /* input compound frame */
1111 cf->sg[1].length = length;
1112 cf->sg[1].extension = 1;
1113 cf->sg[1].final = 1;
1114 cpu_to_hw_sg(&cf->sg[1]);
1118 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1119 qm_sg_entry_set64(sg,
1120 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1121 sg->length = sym->aead.data.length + ses->auth_only_len;
1122 length = sg->length;
1123 if (is_encode(ses)) {
1125 /* set auth output */
1127 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1128 sg->length = ses->digest_length;
1129 length += sg->length;
1134 /* output compound frame */
1135 cf->sg[0].length = length;
1136 cf->sg[0].extension = 1;
1137 cpu_to_hw_sg(&cf->sg[0]);
1142 static inline struct dpaa_sec_job *
1143 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1145 struct rte_crypto_sym_op *sym = op->sym;
1146 struct dpaa_sec_job *cf;
1147 struct dpaa_sec_op_ctx *ctx;
1148 struct qm_sg_entry *sg, *out_sg, *in_sg;
1149 struct rte_mbuf *mbuf;
1151 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1156 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1159 req_segs = mbuf->nb_segs * 2 + 4;
1162 if (req_segs > MAX_SG_ENTRIES) {
1163 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1168 ctx = dpaa_sec_alloc_ctx(ses);
1175 rte_prefetch0(cf->sg);
1178 out_sg = &cf->sg[0];
1179 out_sg->extension = 1;
1181 out_sg->length = sym->auth.data.length + ses->digest_length;
1183 out_sg->length = sym->auth.data.length;
1185 /* output sg entries */
1187 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1188 cpu_to_hw_sg(out_sg);
1191 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1192 sg->length = mbuf->data_len - sym->auth.data.offset;
1193 sg->offset = sym->auth.data.offset;
1195 /* Successive segs */
1200 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1201 sg->length = mbuf->data_len;
1204 sg->length -= ses->digest_length;
1206 if (is_encode(ses)) {
1208 /* set auth output */
1210 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1211 sg->length = ses->digest_length;
1219 in_sg->extension = 1;
1222 in_sg->length = ses->iv.length + sym->auth.data.length;
1224 in_sg->length = ses->iv.length + sym->auth.data.length
1225 + ses->digest_length;
1227 /* input sg entries */
1229 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1230 cpu_to_hw_sg(in_sg);
1233 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1234 sg->length = ses->iv.length;
1239 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1240 sg->length = mbuf->data_len - sym->auth.data.offset;
1241 sg->offset = sym->auth.data.offset;
1243 /* Successive segs */
1248 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1249 sg->length = mbuf->data_len;
1253 sg->length -= ses->digest_length;
1254 if (is_decode(ses)) {
1257 memcpy(ctx->digest, sym->auth.digest.data,
1258 ses->digest_length);
1259 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1260 sg->length = ses->digest_length;
1268 static inline struct dpaa_sec_job *
1269 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1271 struct rte_crypto_sym_op *sym = op->sym;
1272 struct dpaa_sec_job *cf;
1273 struct dpaa_sec_op_ctx *ctx;
1274 struct qm_sg_entry *sg;
1275 rte_iova_t src_start_addr, dst_start_addr;
1276 uint32_t length = 0;
1277 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1280 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1282 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1284 dst_start_addr = src_start_addr;
1286 ctx = dpaa_sec_alloc_ctx(ses);
1294 rte_prefetch0(cf->sg);
1296 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1297 if (is_encode(ses)) {
1298 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1299 sg->length = ses->iv.length;
1300 length += sg->length;
1304 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1305 sg->length = sym->auth.data.length;
1306 length += sg->length;
1310 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1311 sg->length = ses->iv.length;
1312 length += sg->length;
1317 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1318 sg->length = sym->auth.data.length;
1319 length += sg->length;
1322 memcpy(ctx->digest, sym->auth.digest.data,
1323 ses->digest_length);
1326 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1327 sg->length = ses->digest_length;
1328 length += sg->length;
1332 /* input compound frame */
1333 cf->sg[1].length = length;
1334 cf->sg[1].extension = 1;
1335 cf->sg[1].final = 1;
1336 cpu_to_hw_sg(&cf->sg[1]);
1340 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1341 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1342 sg->length = sym->cipher.data.length;
1343 length = sg->length;
1344 if (is_encode(ses)) {
1346 /* set auth output */
1348 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1349 sg->length = ses->digest_length;
1350 length += sg->length;
1355 /* output compound frame */
1356 cf->sg[0].length = length;
1357 cf->sg[0].extension = 1;
1358 cpu_to_hw_sg(&cf->sg[0]);
1363 static inline struct dpaa_sec_job *
1364 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1366 struct rte_crypto_sym_op *sym = op->sym;
1367 struct dpaa_sec_job *cf;
1368 struct dpaa_sec_op_ctx *ctx;
1369 struct qm_sg_entry *sg;
1370 phys_addr_t src_start_addr, dst_start_addr;
1372 ctx = dpaa_sec_alloc_ctx(ses);
1378 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1381 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1383 dst_start_addr = src_start_addr;
1387 qm_sg_entry_set64(sg, src_start_addr);
1388 sg->length = sym->m_src->pkt_len;
1392 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1395 qm_sg_entry_set64(sg, dst_start_addr);
1396 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1403 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1406 /* Function to transmit the frames to given device and queuepair */
1408 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1409 uint16_t num_tx = 0;
1410 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1411 uint32_t frames_to_send;
1412 struct rte_crypto_op *op;
1413 struct dpaa_sec_job *cf;
1414 dpaa_sec_session *ses;
1415 struct dpaa_sec_op_ctx *ctx;
1416 uint32_t auth_only_len;
1417 struct qman_fq *inq[DPAA_SEC_BURST];
1420 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1421 DPAA_SEC_BURST : nb_ops;
1422 for (loop = 0; loop < frames_to_send; loop++) {
1424 switch (op->sess_type) {
1425 case RTE_CRYPTO_OP_WITH_SESSION:
1426 ses = (dpaa_sec_session *)
1427 get_session_private_data(
1429 cryptodev_driver_id);
1431 case RTE_CRYPTO_OP_SECURITY_SESSION:
1432 ses = (dpaa_sec_session *)
1433 get_sec_session_private_data(
1434 op->sym->sec_session);
1438 "sessionless crypto op not supported");
1439 frames_to_send = loop;
1443 if (unlikely(!ses->qp || ses->qp != qp)) {
1444 DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1446 if (dpaa_sec_attach_sess_q(qp, ses)) {
1447 frames_to_send = loop;
1453 auth_only_len = op->sym->auth.data.length -
1454 op->sym->cipher.data.length;
1455 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1456 if (is_auth_only(ses)) {
1457 cf = build_auth_only(op, ses);
1458 } else if (is_cipher_only(ses)) {
1459 cf = build_cipher_only(op, ses);
1460 } else if (is_aead(ses)) {
1461 cf = build_cipher_auth_gcm(op, ses);
1462 auth_only_len = ses->auth_only_len;
1463 } else if (is_auth_cipher(ses)) {
1464 cf = build_cipher_auth(op, ses);
1465 } else if (is_proto_ipsec(ses)) {
1466 cf = build_proto(op, ses);
1468 DPAA_SEC_DP_ERR("not supported ops");
1469 frames_to_send = loop;
1474 if (is_auth_only(ses)) {
1475 cf = build_auth_only_sg(op, ses);
1476 } else if (is_cipher_only(ses)) {
1477 cf = build_cipher_only_sg(op, ses);
1478 } else if (is_aead(ses)) {
1479 cf = build_cipher_auth_gcm_sg(op, ses);
1480 auth_only_len = ses->auth_only_len;
1481 } else if (is_auth_cipher(ses)) {
1482 cf = build_cipher_auth_sg(op, ses);
1484 DPAA_SEC_DP_ERR("not supported ops");
1485 frames_to_send = loop;
1490 if (unlikely(!cf)) {
1491 frames_to_send = loop;
1497 inq[loop] = ses->inq;
1498 fd->opaque_addr = 0;
1500 ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1501 qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1502 fd->_format1 = qm_fd_compound;
1503 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1504 /* Auth_only_len is set as 0 in descriptor and it is
1505 * overwritten here in the fd.cmd which will update
1509 fd->cmd = 0x80000000 | auth_only_len;
1514 while (loop < frames_to_send) {
1515 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1516 frames_to_send - loop);
1518 nb_ops -= frames_to_send;
1519 num_tx += frames_to_send;
1522 dpaa_qp->tx_pkts += num_tx;
1523 dpaa_qp->tx_errs += nb_ops - num_tx;
1529 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1533 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1535 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1537 dpaa_qp->rx_pkts += num_rx;
1538 dpaa_qp->rx_errs += nb_ops - num_rx;
1540 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1545 /** Release queue pair */
1547 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1550 struct dpaa_sec_dev_private *internals;
1551 struct dpaa_sec_qp *qp = NULL;
1553 PMD_INIT_FUNC_TRACE();
1555 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1557 internals = dev->data->dev_private;
1558 if (qp_id >= internals->max_nb_queue_pairs) {
1559 DPAA_SEC_ERR("Max supported qpid %d",
1560 internals->max_nb_queue_pairs);
1564 qp = &internals->qps[qp_id];
1565 qp->internals = NULL;
1566 dev->data->queue_pairs[qp_id] = NULL;
1571 /** Setup a queue pair */
1573 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1574 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1575 __rte_unused int socket_id,
1576 __rte_unused struct rte_mempool *session_pool)
1578 struct dpaa_sec_dev_private *internals;
1579 struct dpaa_sec_qp *qp = NULL;
1581 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1583 internals = dev->data->dev_private;
1584 if (qp_id >= internals->max_nb_queue_pairs) {
1585 DPAA_SEC_ERR("Max supported qpid %d",
1586 internals->max_nb_queue_pairs);
1590 qp = &internals->qps[qp_id];
1591 qp->internals = internals;
1592 dev->data->queue_pairs[qp_id] = qp;
1597 /** Start queue pair */
1599 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1600 __rte_unused uint16_t queue_pair_id)
1602 PMD_INIT_FUNC_TRACE();
1607 /** Stop queue pair */
1609 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1610 __rte_unused uint16_t queue_pair_id)
1612 PMD_INIT_FUNC_TRACE();
1617 /** Return the number of allocated queue pairs */
1619 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1621 PMD_INIT_FUNC_TRACE();
1623 return dev->data->nb_queue_pairs;
1626 /** Returns the size of session structure */
1628 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1630 PMD_INIT_FUNC_TRACE();
1632 return sizeof(dpaa_sec_session);
1636 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1637 struct rte_crypto_sym_xform *xform,
1638 dpaa_sec_session *session)
1640 session->cipher_alg = xform->cipher.algo;
1641 session->iv.length = xform->cipher.iv.length;
1642 session->iv.offset = xform->cipher.iv.offset;
1643 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1644 RTE_CACHE_LINE_SIZE);
1645 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1646 DPAA_SEC_ERR("No Memory for cipher key");
1649 session->cipher_key.length = xform->cipher.key.length;
1651 memcpy(session->cipher_key.data, xform->cipher.key.data,
1652 xform->cipher.key.length);
1653 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1660 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1661 struct rte_crypto_sym_xform *xform,
1662 dpaa_sec_session *session)
1664 session->auth_alg = xform->auth.algo;
1665 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1666 RTE_CACHE_LINE_SIZE);
1667 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1668 DPAA_SEC_ERR("No Memory for auth key");
1671 session->auth_key.length = xform->auth.key.length;
1672 session->digest_length = xform->auth.digest_length;
1674 memcpy(session->auth_key.data, xform->auth.key.data,
1675 xform->auth.key.length);
1676 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1683 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1684 struct rte_crypto_sym_xform *xform,
1685 dpaa_sec_session *session)
1687 session->aead_alg = xform->aead.algo;
1688 session->iv.length = xform->aead.iv.length;
1689 session->iv.offset = xform->aead.iv.offset;
1690 session->auth_only_len = xform->aead.aad_length;
1691 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1692 RTE_CACHE_LINE_SIZE);
1693 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1694 DPAA_SEC_ERR("No Memory for aead key\n");
1697 session->aead_key.length = xform->aead.key.length;
1698 session->digest_length = xform->aead.digest_length;
1700 memcpy(session->aead_key.data, xform->aead.key.data,
1701 xform->aead.key.length);
1702 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1708 static struct qman_fq *
1709 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1713 for (i = 0; i < qi->max_nb_sessions; i++) {
1714 if (qi->inq_attach[i] == 0) {
1715 qi->inq_attach[i] = 1;
1719 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1725 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1729 for (i = 0; i < qi->max_nb_sessions; i++) {
1730 if (&qi->inq[i] == fq) {
1731 qman_retire_fq(fq, NULL);
1733 qi->inq_attach[i] = 0;
1741 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1746 ret = dpaa_sec_prep_cdb(sess);
1748 DPAA_SEC_ERR("Unable to prepare sec cdb");
1751 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1752 ret = rte_dpaa_portal_init((void *)0);
1754 DPAA_SEC_ERR("Failure in affining portal");
1758 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1759 qman_fq_fqid(&qp->outq));
1761 DPAA_SEC_ERR("Unable to init sec queue");
1767 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1768 uint16_t qp_id __rte_unused,
1769 void *ses __rte_unused)
1771 PMD_INIT_FUNC_TRACE();
1776 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1777 uint16_t qp_id __rte_unused,
1780 dpaa_sec_session *sess = ses;
1781 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1783 PMD_INIT_FUNC_TRACE();
1786 dpaa_sec_detach_rxq(qi, sess->inq);
1795 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1796 struct rte_crypto_sym_xform *xform, void *sess)
1798 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1799 dpaa_sec_session *session = sess;
1801 PMD_INIT_FUNC_TRACE();
1803 if (unlikely(sess == NULL)) {
1804 DPAA_SEC_ERR("invalid session struct");
1808 /* Default IV length = 0 */
1809 session->iv.length = 0;
1812 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1813 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1814 dpaa_sec_cipher_init(dev, xform, session);
1816 /* Authentication Only */
1817 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1818 xform->next == NULL) {
1819 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1820 dpaa_sec_auth_init(dev, xform, session);
1822 /* Cipher then Authenticate */
1823 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1824 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1825 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1826 dpaa_sec_cipher_init(dev, xform, session);
1827 dpaa_sec_auth_init(dev, xform->next, session);
1829 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1833 /* Authenticate then Cipher */
1834 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1835 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1836 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1837 dpaa_sec_auth_init(dev, xform, session);
1838 dpaa_sec_cipher_init(dev, xform->next, session);
1840 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1844 /* AEAD operation for AES-GCM kind of Algorithms */
1845 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1846 xform->next == NULL) {
1847 dpaa_sec_aead_init(dev, xform, session);
1850 DPAA_SEC_ERR("Invalid crypto type");
1853 session->ctx_pool = internals->ctx_pool;
1854 session->inq = dpaa_sec_attach_rxq(internals);
1855 if (session->inq == NULL) {
1856 DPAA_SEC_ERR("unable to attach sec queue");
1863 rte_free(session->cipher_key.data);
1864 rte_free(session->auth_key.data);
1865 memset(session, 0, sizeof(dpaa_sec_session));
1871 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1872 struct rte_crypto_sym_xform *xform,
1873 struct rte_cryptodev_sym_session *sess,
1874 struct rte_mempool *mempool)
1876 void *sess_private_data;
1879 PMD_INIT_FUNC_TRACE();
1881 if (rte_mempool_get(mempool, &sess_private_data)) {
1882 DPAA_SEC_ERR("Couldn't get object from session mempool");
1886 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1888 DPAA_SEC_ERR("failed to configure session parameters");
1890 /* Return session to mempool */
1891 rte_mempool_put(mempool, sess_private_data);
1895 set_session_private_data(sess, dev->driver_id,
1902 /** Clear the memory of session so it doesn't leave key material behind */
1904 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1905 struct rte_cryptodev_sym_session *sess)
1907 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1908 uint8_t index = dev->driver_id;
1909 void *sess_priv = get_session_private_data(sess, index);
1911 PMD_INIT_FUNC_TRACE();
1913 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1916 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1919 dpaa_sec_detach_rxq(qi, s->inq);
1920 rte_free(s->cipher_key.data);
1921 rte_free(s->auth_key.data);
1922 memset(s, 0, sizeof(dpaa_sec_session));
1923 set_session_private_data(sess, index, NULL);
1924 rte_mempool_put(sess_mp, sess_priv);
1929 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1930 struct rte_security_session_conf *conf,
1933 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1934 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1935 struct rte_crypto_auth_xform *auth_xform;
1936 struct rte_crypto_cipher_xform *cipher_xform;
1937 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1939 PMD_INIT_FUNC_TRACE();
1941 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1942 cipher_xform = &conf->crypto_xform->cipher;
1943 auth_xform = &conf->crypto_xform->next->auth;
1945 auth_xform = &conf->crypto_xform->auth;
1946 cipher_xform = &conf->crypto_xform->next->cipher;
1948 session->proto_alg = conf->protocol;
1949 session->cipher_key.data = rte_zmalloc(NULL,
1950 cipher_xform->key.length,
1951 RTE_CACHE_LINE_SIZE);
1952 if (session->cipher_key.data == NULL &&
1953 cipher_xform->key.length > 0) {
1954 DPAA_SEC_ERR("No Memory for cipher key");
1958 session->cipher_key.length = cipher_xform->key.length;
1959 session->auth_key.data = rte_zmalloc(NULL,
1960 auth_xform->key.length,
1961 RTE_CACHE_LINE_SIZE);
1962 if (session->auth_key.data == NULL &&
1963 auth_xform->key.length > 0) {
1964 DPAA_SEC_ERR("No Memory for auth key");
1965 rte_free(session->cipher_key.data);
1968 session->auth_key.length = auth_xform->key.length;
1969 memcpy(session->cipher_key.data, cipher_xform->key.data,
1970 cipher_xform->key.length);
1971 memcpy(session->auth_key.data, auth_xform->key.data,
1972 auth_xform->key.length);
1974 switch (auth_xform->algo) {
1975 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1976 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1978 case RTE_CRYPTO_AUTH_MD5_HMAC:
1979 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1981 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1982 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1984 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1985 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1987 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1988 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1990 case RTE_CRYPTO_AUTH_AES_CMAC:
1991 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1993 case RTE_CRYPTO_AUTH_NULL:
1994 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1996 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1997 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1998 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1999 case RTE_CRYPTO_AUTH_SHA1:
2000 case RTE_CRYPTO_AUTH_SHA256:
2001 case RTE_CRYPTO_AUTH_SHA512:
2002 case RTE_CRYPTO_AUTH_SHA224:
2003 case RTE_CRYPTO_AUTH_SHA384:
2004 case RTE_CRYPTO_AUTH_MD5:
2005 case RTE_CRYPTO_AUTH_AES_GMAC:
2006 case RTE_CRYPTO_AUTH_KASUMI_F9:
2007 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2008 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2009 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2013 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2018 switch (cipher_xform->algo) {
2019 case RTE_CRYPTO_CIPHER_AES_CBC:
2020 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2022 case RTE_CRYPTO_CIPHER_3DES_CBC:
2023 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2025 case RTE_CRYPTO_CIPHER_AES_CTR:
2026 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2028 case RTE_CRYPTO_CIPHER_NULL:
2029 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2030 case RTE_CRYPTO_CIPHER_3DES_ECB:
2031 case RTE_CRYPTO_CIPHER_AES_ECB:
2032 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2033 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2034 cipher_xform->algo);
2037 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2038 cipher_xform->algo);
2042 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2043 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2044 sizeof(session->ip4_hdr));
2045 session->ip4_hdr.ip_v = IPVERSION;
2046 session->ip4_hdr.ip_hl = 5;
2047 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2048 sizeof(session->ip4_hdr));
2049 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2050 session->ip4_hdr.ip_id = 0;
2051 session->ip4_hdr.ip_off = 0;
2052 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2053 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2054 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2056 session->ip4_hdr.ip_sum = 0;
2057 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2058 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2059 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2060 (void *)&session->ip4_hdr,
2063 session->encap_pdb.options =
2064 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2065 PDBOPTS_ESP_OIHI_PDB_INL |
2067 PDBHMO_ESP_ENCAP_DTTL;
2068 session->encap_pdb.spi = ipsec_xform->spi;
2069 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2071 session->dir = DIR_ENC;
2072 } else if (ipsec_xform->direction ==
2073 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2074 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2075 session->decap_pdb.options = sizeof(struct ip) << 16;
2076 session->dir = DIR_DEC;
2079 session->ctx_pool = internals->ctx_pool;
2080 session->inq = dpaa_sec_attach_rxq(internals);
2081 if (session->inq == NULL) {
2082 DPAA_SEC_ERR("unable to attach sec queue");
2089 rte_free(session->auth_key.data);
2090 rte_free(session->cipher_key.data);
2091 memset(session, 0, sizeof(dpaa_sec_session));
2096 dpaa_sec_security_session_create(void *dev,
2097 struct rte_security_session_conf *conf,
2098 struct rte_security_session *sess,
2099 struct rte_mempool *mempool)
2101 void *sess_private_data;
2102 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2105 if (rte_mempool_get(mempool, &sess_private_data)) {
2106 DPAA_SEC_ERR("Couldn't get object from session mempool");
2110 switch (conf->protocol) {
2111 case RTE_SECURITY_PROTOCOL_IPSEC:
2112 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2115 case RTE_SECURITY_PROTOCOL_MACSEC:
2121 DPAA_SEC_ERR("failed to configure session parameters");
2122 /* Return session to mempool */
2123 rte_mempool_put(mempool, sess_private_data);
2127 set_sec_session_private_data(sess, sess_private_data);
2132 /** Clear the memory of session so it doesn't leave key material behind */
2134 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2135 struct rte_security_session *sess)
2137 PMD_INIT_FUNC_TRACE();
2138 void *sess_priv = get_sec_session_private_data(sess);
2140 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2143 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2145 rte_free(s->cipher_key.data);
2146 rte_free(s->auth_key.data);
2147 memset(sess, 0, sizeof(dpaa_sec_session));
2148 set_sec_session_private_data(sess, NULL);
2149 rte_mempool_put(sess_mp, sess_priv);
2156 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2157 struct rte_cryptodev_config *config __rte_unused)
2161 struct dpaa_sec_dev_private *internals;
2163 PMD_INIT_FUNC_TRACE();
2165 internals = dev->data->dev_private;
2166 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2167 if (!internals->ctx_pool) {
2168 internals->ctx_pool = rte_mempool_create((const char *)str,
2171 CTX_POOL_CACHE_SIZE, 0,
2172 NULL, NULL, NULL, NULL,
2174 if (!internals->ctx_pool) {
2175 DPAA_SEC_ERR("%s create failed\n", str);
2179 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2186 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2188 PMD_INIT_FUNC_TRACE();
2193 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2195 PMD_INIT_FUNC_TRACE();
2199 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2201 struct dpaa_sec_dev_private *internals;
2203 PMD_INIT_FUNC_TRACE();
2208 internals = dev->data->dev_private;
2209 rte_mempool_free(internals->ctx_pool);
2210 internals->ctx_pool = NULL;
2216 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2217 struct rte_cryptodev_info *info)
2219 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2221 PMD_INIT_FUNC_TRACE();
2223 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2224 info->feature_flags = dev->feature_flags;
2225 info->capabilities = dpaa_sec_capabilities;
2226 info->sym.max_nb_sessions = internals->max_nb_sessions;
2227 info->sym.max_nb_sessions_per_qp =
2228 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2229 RTE_DPAA_MAX_NB_SEC_QPS;
2230 info->driver_id = cryptodev_driver_id;
2234 static struct rte_cryptodev_ops crypto_ops = {
2235 .dev_configure = dpaa_sec_dev_configure,
2236 .dev_start = dpaa_sec_dev_start,
2237 .dev_stop = dpaa_sec_dev_stop,
2238 .dev_close = dpaa_sec_dev_close,
2239 .dev_infos_get = dpaa_sec_dev_infos_get,
2240 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2241 .queue_pair_release = dpaa_sec_queue_pair_release,
2242 .queue_pair_start = dpaa_sec_queue_pair_start,
2243 .queue_pair_stop = dpaa_sec_queue_pair_stop,
2244 .queue_pair_count = dpaa_sec_queue_pair_count,
2245 .session_get_size = dpaa_sec_session_get_size,
2246 .session_configure = dpaa_sec_session_configure,
2247 .session_clear = dpaa_sec_session_clear,
2248 .qp_attach_session = dpaa_sec_qp_attach_sess,
2249 .qp_detach_session = dpaa_sec_qp_detach_sess,
2252 static const struct rte_security_capability *
2253 dpaa_sec_capabilities_get(void *device __rte_unused)
2255 return dpaa_sec_security_cap;
2258 struct rte_security_ops dpaa_sec_security_ops = {
2259 .session_create = dpaa_sec_security_session_create,
2260 .session_update = NULL,
2261 .session_stats_get = NULL,
2262 .session_destroy = dpaa_sec_security_session_destroy,
2263 .set_pkt_metadata = NULL,
2264 .capabilities_get = dpaa_sec_capabilities_get
2268 dpaa_sec_uninit(struct rte_cryptodev *dev)
2270 struct dpaa_sec_dev_private *internals;
2275 internals = dev->data->dev_private;
2276 rte_free(dev->security_ctx);
2278 /* In case close has been called, internals->ctx_pool would be NULL */
2279 rte_mempool_free(internals->ctx_pool);
2280 rte_free(internals);
2282 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2283 dev->data->name, rte_socket_id());
2289 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2291 struct dpaa_sec_dev_private *internals;
2292 struct rte_security_ctx *security_instance;
2293 struct dpaa_sec_qp *qp;
2297 PMD_INIT_FUNC_TRACE();
2299 cryptodev->driver_id = cryptodev_driver_id;
2300 cryptodev->dev_ops = &crypto_ops;
2302 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2303 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2304 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2305 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2306 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2307 RTE_CRYPTODEV_FF_SECURITY |
2308 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2310 internals = cryptodev->data->dev_private;
2311 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2312 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2315 * For secondary processes, we don't initialise any further as primary
2316 * has already done this work. Only check we don't need a different
2319 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2320 DPAA_SEC_WARN("Device already init by primary process");
2324 /* Initialize security_ctx only for primary process*/
2325 security_instance = rte_malloc("rte_security_instances_ops",
2326 sizeof(struct rte_security_ctx), 0);
2327 if (security_instance == NULL)
2329 security_instance->device = (void *)cryptodev;
2330 security_instance->ops = &dpaa_sec_security_ops;
2331 security_instance->sess_cnt = 0;
2332 cryptodev->security_ctx = security_instance;
2334 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2335 /* init qman fq for queue pair */
2336 qp = &internals->qps[i];
2337 ret = dpaa_sec_init_tx(&qp->outq);
2339 DPAA_SEC_ERR("config tx of queue pair %d", i);
2344 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2345 QMAN_FQ_FLAG_TO_DCPORTAL;
2346 for (i = 0; i < internals->max_nb_sessions; i++) {
2347 /* create rx qman fq for sessions*/
2348 ret = qman_create_fq(0, flags, &internals->inq[i]);
2349 if (unlikely(ret != 0)) {
2350 DPAA_SEC_ERR("sec qman_create_fq failed");
2355 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2359 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2361 dpaa_sec_uninit(cryptodev);
2366 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2367 struct rte_dpaa_device *dpaa_dev)
2369 struct rte_cryptodev *cryptodev;
2370 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2374 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2376 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2377 if (cryptodev == NULL)
2380 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2381 cryptodev->data->dev_private = rte_zmalloc_socket(
2382 "cryptodev private structure",
2383 sizeof(struct dpaa_sec_dev_private),
2384 RTE_CACHE_LINE_SIZE,
2387 if (cryptodev->data->dev_private == NULL)
2388 rte_panic("Cannot allocate memzone for private "
2392 dpaa_dev->crypto_dev = cryptodev;
2393 cryptodev->device = &dpaa_dev->device;
2394 cryptodev->device->driver = &dpaa_drv->driver;
2396 /* init user callbacks */
2397 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2399 /* if sec device version is not configured */
2400 if (!rta_get_sec_era()) {
2401 const struct device_node *caam_node;
2403 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2404 const uint32_t *prop = of_get_property(caam_node,
2409 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2415 /* Invoke PMD device initialization function */
2416 retval = dpaa_sec_dev_init(cryptodev);
2420 /* In case of error, cleanup is done */
2421 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2422 rte_free(cryptodev->data->dev_private);
2424 rte_cryptodev_pmd_release_device(cryptodev);
2430 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2432 struct rte_cryptodev *cryptodev;
2435 cryptodev = dpaa_dev->crypto_dev;
2436 if (cryptodev == NULL)
2439 ret = dpaa_sec_uninit(cryptodev);
2443 return rte_cryptodev_pmd_destroy(cryptodev);
2446 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2447 .drv_type = FSL_DPAA_CRYPTO,
2449 .name = "DPAA SEC PMD"
2451 .probe = cryptodev_dpaa_sec_probe,
2452 .remove = cryptodev_dpaa_sec_remove,
2455 static struct cryptodev_driver dpaa_sec_crypto_drv;
2457 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2458 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2459 cryptodev_driver_id);
2461 RTE_INIT(dpaa_sec_init_log);
2463 dpaa_sec_init_log(void)
2465 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2466 if (dpaa_logtype_sec >= 0)
2467 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);