1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
42 static uint8_t cryptodev_driver_id;
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 /* report op status to sym->op and then free the ctx memeory */
61 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
86 ctx->ctx_pool = ses->ctx_pool;
87 ctx->vtop_offset = (size_t) ctx
88 - rte_mempool_virt2iova(ctx);
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
96 const struct rte_memseg *ms;
98 ms = rte_mem_virt2memseg(vaddr, NULL);
100 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104 /* virtual address conversin when mempool support is available for ctx */
105 static inline phys_addr_t
106 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
108 return (size_t)vaddr - ctx->vtop_offset;
112 dpaa_mem_ptov(rte_iova_t paddr)
114 return rte_mem_iova2virt(paddr);
118 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
120 const struct qm_mr_entry *msg)
122 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
123 fq->fqid, msg->ern.rc, msg->ern.seqnum);
126 /* initialize the queue with dest chan as caam chan so that
127 * all the packets in this queue could be dispatched into caam
130 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
133 struct qm_mcc_initfq fq_opts;
137 /* Clear FQ options */
138 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
140 flags = QMAN_INITFQ_FLAG_SCHED;
141 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
142 QM_INITFQ_WE_CONTEXTB;
144 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
145 fq_opts.fqd.context_b = fqid_out;
146 fq_opts.fqd.dest.channel = qm_channel_caam;
147 fq_opts.fqd.dest.wq = 0;
149 fq_in->cb.ern = ern_sec_fq_handler;
151 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
153 ret = qman_init_fq(fq_in, flags, &fq_opts);
154 if (unlikely(ret != 0))
155 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
160 /* something is put into in_fq and caam put the crypto result into out_fq */
161 static enum qman_cb_dqrr_result
162 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
163 struct qman_fq *fq __always_unused,
164 const struct qm_dqrr_entry *dqrr)
166 const struct qm_fd *fd;
167 struct dpaa_sec_job *job;
168 struct dpaa_sec_op_ctx *ctx;
170 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
171 return qman_cb_dqrr_defer;
173 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
174 return qman_cb_dqrr_consume;
177 /* sg is embedded in an op ctx,
178 * sg[0] is for output
181 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
183 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
184 ctx->fd_status = fd->status;
185 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
186 struct qm_sg_entry *sg_out;
189 sg_out = &job->sg[0];
190 hw_sg_to_cpu(sg_out);
191 len = sg_out->length;
192 ctx->op->sym->m_src->pkt_len = len;
193 ctx->op->sym->m_src->data_len = len;
195 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
196 dpaa_sec_op_ending(ctx);
198 return qman_cb_dqrr_consume;
201 /* caam result is put into this queue */
203 dpaa_sec_init_tx(struct qman_fq *fq)
206 struct qm_mcc_initfq opts;
209 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
210 QMAN_FQ_FLAG_DYNAMIC_FQID;
212 ret = qman_create_fq(0, flags, fq);
214 PMD_INIT_LOG(ERR, "qman_create_fq failed");
218 memset(&opts, 0, sizeof(opts));
219 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
220 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
222 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
224 fq->cb.dqrr = dqrr_out_fq_cb_rx;
225 fq->cb.ern = ern_sec_fq_handler;
227 ret = qman_init_fq(fq, 0, &opts);
229 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
236 static inline int is_cipher_only(dpaa_sec_session *ses)
238 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
239 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
242 static inline int is_auth_only(dpaa_sec_session *ses)
244 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
245 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
248 static inline int is_aead(dpaa_sec_session *ses)
250 return ((ses->cipher_alg == 0) &&
251 (ses->auth_alg == 0) &&
252 (ses->aead_alg != 0));
255 static inline int is_auth_cipher(dpaa_sec_session *ses)
257 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
258 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
259 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
262 static inline int is_proto_ipsec(dpaa_sec_session *ses)
264 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
267 static inline int is_encode(dpaa_sec_session *ses)
269 return ses->dir == DIR_ENC;
272 static inline int is_decode(dpaa_sec_session *ses)
274 return ses->dir == DIR_DEC;
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
280 switch (ses->auth_alg) {
281 case RTE_CRYPTO_AUTH_NULL:
282 ses->digest_length = 0;
284 case RTE_CRYPTO_AUTH_MD5_HMAC:
286 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
287 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
288 alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 case RTE_CRYPTO_AUTH_SHA1_HMAC:
292 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
293 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
294 alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 case RTE_CRYPTO_AUTH_SHA224_HMAC:
298 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
300 alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 case RTE_CRYPTO_AUTH_SHA256_HMAC:
304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA384_HMAC:
310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
312 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 case RTE_CRYPTO_AUTH_SHA512_HMAC:
316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
318 alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
326 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
328 switch (ses->cipher_alg) {
329 case RTE_CRYPTO_CIPHER_NULL:
331 case RTE_CRYPTO_CIPHER_AES_CBC:
333 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
334 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
335 alginfo_c->algmode = OP_ALG_AAI_CBC;
337 case RTE_CRYPTO_CIPHER_3DES_CBC:
339 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
340 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
341 alginfo_c->algmode = OP_ALG_AAI_CBC;
343 case RTE_CRYPTO_CIPHER_AES_CTR:
345 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
347 alginfo_c->algmode = OP_ALG_AAI_CTR;
350 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
355 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
357 switch (ses->aead_alg) {
358 case RTE_CRYPTO_AEAD_AES_GCM:
359 alginfo->algtype = OP_ALG_ALGSEL_AES;
360 alginfo->algmode = OP_ALG_AAI_GCM;
363 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
368 /* prepare command block of the session */
370 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
372 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
373 uint32_t shared_desc_len = 0;
374 struct sec_cdb *cdb = &ses->cdb;
376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
382 memset(cdb, 0, sizeof(struct sec_cdb));
384 if (is_cipher_only(ses)) {
385 caam_cipher_alg(ses, &alginfo_c);
386 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387 PMD_TX_LOG(ERR, "not supported cipher alg\n");
391 alginfo_c.key = (size_t)ses->cipher_key.data;
392 alginfo_c.keylen = ses->cipher_key.length;
393 alginfo_c.key_enc_flags = 0;
394 alginfo_c.key_type = RTA_DATA_IMM;
396 shared_desc_len = cnstr_shdsc_blkcipher(
402 } else if (is_auth_only(ses)) {
403 caam_auth_alg(ses, &alginfo_a);
404 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 PMD_TX_LOG(ERR, "not supported auth alg\n");
409 alginfo_a.key = (size_t)ses->auth_key.data;
410 alginfo_a.keylen = ses->auth_key.length;
411 alginfo_a.key_enc_flags = 0;
412 alginfo_a.key_type = RTA_DATA_IMM;
414 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
418 } else if (is_aead(ses)) {
419 caam_aead_alg(ses, &alginfo);
420 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421 PMD_TX_LOG(ERR, "not supported aead alg\n");
424 alginfo.key = (size_t)ses->aead_key.data;
425 alginfo.keylen = ses->aead_key.length;
426 alginfo.key_enc_flags = 0;
427 alginfo.key_type = RTA_DATA_IMM;
429 if (ses->dir == DIR_ENC)
430 shared_desc_len = cnstr_shdsc_gcm_encap(
431 cdb->sh_desc, true, swap,
436 shared_desc_len = cnstr_shdsc_gcm_decap(
437 cdb->sh_desc, true, swap,
442 caam_cipher_alg(ses, &alginfo_c);
443 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
444 PMD_TX_LOG(ERR, "not supported cipher alg\n");
448 alginfo_c.key = (size_t)ses->cipher_key.data;
449 alginfo_c.keylen = ses->cipher_key.length;
450 alginfo_c.key_enc_flags = 0;
451 alginfo_c.key_type = RTA_DATA_IMM;
453 caam_auth_alg(ses, &alginfo_a);
454 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
455 PMD_TX_LOG(ERR, "not supported auth alg\n");
459 alginfo_a.key = (size_t)ses->auth_key.data;
460 alginfo_a.keylen = ses->auth_key.length;
461 alginfo_a.key_enc_flags = 0;
462 alginfo_a.key_type = RTA_DATA_IMM;
464 cdb->sh_desc[0] = alginfo_c.keylen;
465 cdb->sh_desc[1] = alginfo_a.keylen;
466 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
468 (unsigned int *)cdb->sh_desc,
469 &cdb->sh_desc[2], 2);
472 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
475 if (cdb->sh_desc[2] & 1)
476 alginfo_c.key_type = RTA_DATA_IMM;
478 alginfo_c.key = (size_t)dpaa_mem_vtop(
479 (void *)(size_t)alginfo_c.key);
480 alginfo_c.key_type = RTA_DATA_PTR;
482 if (cdb->sh_desc[2] & (1<<1))
483 alginfo_a.key_type = RTA_DATA_IMM;
485 alginfo_a.key = (size_t)dpaa_mem_vtop(
486 (void *)(size_t)alginfo_a.key);
487 alginfo_a.key_type = RTA_DATA_PTR;
492 if (is_proto_ipsec(ses)) {
493 if (ses->dir == DIR_ENC) {
494 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
496 true, swap, &ses->encap_pdb,
497 (uint8_t *)&ses->ip4_hdr,
498 &alginfo_c, &alginfo_a);
499 } else if (ses->dir == DIR_DEC) {
500 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
502 true, swap, &ses->decap_pdb,
503 &alginfo_c, &alginfo_a);
506 /* Auth_only_len is set as 0 here and it will be
507 * overwritten in fd for each packet.
509 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
510 true, swap, &alginfo_c, &alginfo_a,
512 ses->digest_length, ses->dir);
515 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
516 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
517 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
522 /* qp is lockless, should be accessed by only one thread */
524 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
527 unsigned int pkts = 0;
529 struct qm_dqrr_entry *dq;
532 ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
533 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
538 const struct qm_fd *fd;
539 struct dpaa_sec_job *job;
540 struct dpaa_sec_op_ctx *ctx;
541 struct rte_crypto_op *op;
543 dq = qman_dequeue(fq);
548 /* sg is embedded in an op ctx,
549 * sg[0] is for output
552 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
554 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
555 ctx->fd_status = fd->status;
557 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
558 struct qm_sg_entry *sg_out;
561 sg_out = &job->sg[0];
562 hw_sg_to_cpu(sg_out);
563 len = sg_out->length;
564 op->sym->m_src->pkt_len = len;
565 op->sym->m_src->data_len = len;
567 if (!ctx->fd_status) {
568 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
570 printf("\nSEC return err: 0x%x", ctx->fd_status);
571 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
575 /* report op status to sym->op and then free the ctx memeory */
576 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
578 qman_dqrr_consume(fq, dq);
579 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
584 static inline struct dpaa_sec_job *
585 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
587 struct rte_crypto_sym_op *sym = op->sym;
588 struct rte_mbuf *mbuf = sym->m_src;
589 struct dpaa_sec_job *cf;
590 struct dpaa_sec_op_ctx *ctx;
591 struct qm_sg_entry *sg, *out_sg, *in_sg;
592 phys_addr_t start_addr;
593 uint8_t *old_digest, extra_segs;
600 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
601 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
605 ctx = dpaa_sec_alloc_ctx(ses);
611 old_digest = ctx->digest;
615 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
616 out_sg->length = ses->digest_length;
617 cpu_to_hw_sg(out_sg);
621 /* need to extend the input to a compound frame */
622 in_sg->extension = 1;
624 in_sg->length = sym->auth.data.length;
625 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
629 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
630 sg->length = mbuf->data_len - sym->auth.data.offset;
631 sg->offset = sym->auth.data.offset;
633 /* Successive segs */
638 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
639 sg->length = mbuf->data_len;
643 if (is_decode(ses)) {
644 /* Digest verification case */
647 rte_memcpy(old_digest, sym->auth.digest.data,
649 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
650 qm_sg_entry_set64(sg, start_addr);
651 sg->length = ses->digest_length;
652 in_sg->length += ses->digest_length;
654 /* Digest calculation case */
655 sg->length -= ses->digest_length;
666 * |<----data_len------->|
667 * |ip_header|ah_header|icv|payload|
672 static inline struct dpaa_sec_job *
673 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
675 struct rte_crypto_sym_op *sym = op->sym;
676 struct rte_mbuf *mbuf = sym->m_src;
677 struct dpaa_sec_job *cf;
678 struct dpaa_sec_op_ctx *ctx;
679 struct qm_sg_entry *sg;
680 rte_iova_t start_addr;
683 ctx = dpaa_sec_alloc_ctx(ses);
689 old_digest = ctx->digest;
691 start_addr = rte_pktmbuf_iova(mbuf);
694 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
695 sg->length = ses->digest_length;
700 if (is_decode(ses)) {
701 /* need to extend the input to a compound frame */
703 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
704 sg->length = sym->auth.data.length + ses->digest_length;
709 /* hash result or digest, save digest first */
710 rte_memcpy(old_digest, sym->auth.digest.data,
712 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
713 sg->length = sym->auth.data.length;
716 /* let's check digest by hw */
717 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
719 qm_sg_entry_set64(sg, start_addr);
720 sg->length = ses->digest_length;
724 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
725 sg->length = sym->auth.data.length;
733 static inline struct dpaa_sec_job *
734 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
736 struct rte_crypto_sym_op *sym = op->sym;
737 struct dpaa_sec_job *cf;
738 struct dpaa_sec_op_ctx *ctx;
739 struct qm_sg_entry *sg, *out_sg, *in_sg;
740 struct rte_mbuf *mbuf;
742 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
747 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
750 req_segs = mbuf->nb_segs * 2 + 3;
753 if (req_segs > MAX_SG_ENTRIES) {
754 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
759 ctx = dpaa_sec_alloc_ctx(ses);
768 out_sg->extension = 1;
769 out_sg->length = sym->cipher.data.length;
770 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
771 cpu_to_hw_sg(out_sg);
775 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
776 sg->length = mbuf->data_len - sym->cipher.data.offset;
777 sg->offset = sym->cipher.data.offset;
779 /* Successive segs */
784 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
785 sg->length = mbuf->data_len;
794 in_sg->extension = 1;
796 in_sg->length = sym->cipher.data.length + ses->iv.length;
799 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
803 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
804 sg->length = ses->iv.length;
809 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
810 sg->length = mbuf->data_len - sym->cipher.data.offset;
811 sg->offset = sym->cipher.data.offset;
813 /* Successive segs */
818 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
819 sg->length = mbuf->data_len;
828 static inline struct dpaa_sec_job *
829 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
831 struct rte_crypto_sym_op *sym = op->sym;
832 struct dpaa_sec_job *cf;
833 struct dpaa_sec_op_ctx *ctx;
834 struct qm_sg_entry *sg;
835 rte_iova_t src_start_addr, dst_start_addr;
836 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
839 ctx = dpaa_sec_alloc_ctx(ses);
846 src_start_addr = rte_pktmbuf_iova(sym->m_src);
849 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
851 dst_start_addr = src_start_addr;
855 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
856 sg->length = sym->cipher.data.length + ses->iv.length;
862 /* need to extend the input to a compound frame */
865 sg->length = sym->cipher.data.length + ses->iv.length;
866 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
870 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
871 sg->length = ses->iv.length;
875 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
876 sg->length = sym->cipher.data.length;
883 static inline struct dpaa_sec_job *
884 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
886 struct rte_crypto_sym_op *sym = op->sym;
887 struct dpaa_sec_job *cf;
888 struct dpaa_sec_op_ctx *ctx;
889 struct qm_sg_entry *sg, *out_sg, *in_sg;
890 struct rte_mbuf *mbuf;
892 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
897 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
900 req_segs = mbuf->nb_segs * 2 + 4;
903 if (ses->auth_only_len)
906 if (req_segs > MAX_SG_ENTRIES) {
907 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
912 ctx = dpaa_sec_alloc_ctx(ses);
919 rte_prefetch0(cf->sg);
923 out_sg->extension = 1;
925 out_sg->length = sym->aead.data.length + ses->auth_only_len
926 + ses->digest_length;
928 out_sg->length = sym->aead.data.length + ses->auth_only_len;
930 /* output sg entries */
932 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
933 cpu_to_hw_sg(out_sg);
936 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
937 sg->length = mbuf->data_len - sym->aead.data.offset +
939 sg->offset = sym->aead.data.offset - ses->auth_only_len;
941 /* Successive segs */
946 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
947 sg->length = mbuf->data_len;
950 sg->length -= ses->digest_length;
952 if (is_encode(ses)) {
954 /* set auth output */
956 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
957 sg->length = ses->digest_length;
965 in_sg->extension = 1;
968 in_sg->length = ses->iv.length + sym->aead.data.length
969 + ses->auth_only_len;
971 in_sg->length = ses->iv.length + sym->aead.data.length
972 + ses->auth_only_len + ses->digest_length;
974 /* input sg entries */
976 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
980 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
981 sg->length = ses->iv.length;
984 /* 2nd seg auth only */
985 if (ses->auth_only_len) {
987 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
988 sg->length = ses->auth_only_len;
994 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
995 sg->length = mbuf->data_len - sym->aead.data.offset;
996 sg->offset = sym->aead.data.offset;
998 /* Successive segs */
1003 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004 sg->length = mbuf->data_len;
1008 if (is_decode(ses)) {
1011 memcpy(ctx->digest, sym->aead.digest.data,
1012 ses->digest_length);
1013 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1014 sg->length = ses->digest_length;
1022 static inline struct dpaa_sec_job *
1023 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1025 struct rte_crypto_sym_op *sym = op->sym;
1026 struct dpaa_sec_job *cf;
1027 struct dpaa_sec_op_ctx *ctx;
1028 struct qm_sg_entry *sg;
1029 uint32_t length = 0;
1030 rte_iova_t src_start_addr, dst_start_addr;
1031 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1034 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1037 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1039 dst_start_addr = src_start_addr;
1041 ctx = dpaa_sec_alloc_ctx(ses);
1049 rte_prefetch0(cf->sg);
1051 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1052 if (is_encode(ses)) {
1053 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1054 sg->length = ses->iv.length;
1055 length += sg->length;
1059 if (ses->auth_only_len) {
1060 qm_sg_entry_set64(sg,
1061 dpaa_mem_vtop(sym->aead.aad.data));
1062 sg->length = ses->auth_only_len;
1063 length += sg->length;
1067 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1068 sg->length = sym->aead.data.length;
1069 length += sg->length;
1073 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1074 sg->length = ses->iv.length;
1075 length += sg->length;
1079 if (ses->auth_only_len) {
1080 qm_sg_entry_set64(sg,
1081 dpaa_mem_vtop(sym->aead.aad.data));
1082 sg->length = ses->auth_only_len;
1083 length += sg->length;
1087 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1088 sg->length = sym->aead.data.length;
1089 length += sg->length;
1092 memcpy(ctx->digest, sym->aead.digest.data,
1093 ses->digest_length);
1096 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1097 sg->length = ses->digest_length;
1098 length += sg->length;
1102 /* input compound frame */
1103 cf->sg[1].length = length;
1104 cf->sg[1].extension = 1;
1105 cf->sg[1].final = 1;
1106 cpu_to_hw_sg(&cf->sg[1]);
1110 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1111 qm_sg_entry_set64(sg,
1112 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1113 sg->length = sym->aead.data.length + ses->auth_only_len;
1114 length = sg->length;
1115 if (is_encode(ses)) {
1117 /* set auth output */
1119 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1120 sg->length = ses->digest_length;
1121 length += sg->length;
1126 /* output compound frame */
1127 cf->sg[0].length = length;
1128 cf->sg[0].extension = 1;
1129 cpu_to_hw_sg(&cf->sg[0]);
1134 static inline struct dpaa_sec_job *
1135 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 struct rte_crypto_sym_op *sym = op->sym;
1138 struct dpaa_sec_job *cf;
1139 struct dpaa_sec_op_ctx *ctx;
1140 struct qm_sg_entry *sg, *out_sg, *in_sg;
1141 struct rte_mbuf *mbuf;
1143 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1148 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1151 req_segs = mbuf->nb_segs * 2 + 4;
1154 if (req_segs > MAX_SG_ENTRIES) {
1155 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1160 ctx = dpaa_sec_alloc_ctx(ses);
1167 rte_prefetch0(cf->sg);
1170 out_sg = &cf->sg[0];
1171 out_sg->extension = 1;
1173 out_sg->length = sym->auth.data.length + ses->digest_length;
1175 out_sg->length = sym->auth.data.length;
1177 /* output sg entries */
1179 qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1180 cpu_to_hw_sg(out_sg);
1183 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1184 sg->length = mbuf->data_len - sym->auth.data.offset;
1185 sg->offset = sym->auth.data.offset;
1187 /* Successive segs */
1192 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1193 sg->length = mbuf->data_len;
1196 sg->length -= ses->digest_length;
1198 if (is_encode(ses)) {
1200 /* set auth output */
1202 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1203 sg->length = ses->digest_length;
1211 in_sg->extension = 1;
1214 in_sg->length = ses->iv.length + sym->auth.data.length;
1216 in_sg->length = ses->iv.length + sym->auth.data.length
1217 + ses->digest_length;
1219 /* input sg entries */
1221 qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1222 cpu_to_hw_sg(in_sg);
1225 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1226 sg->length = ses->iv.length;
1231 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1232 sg->length = mbuf->data_len - sym->auth.data.offset;
1233 sg->offset = sym->auth.data.offset;
1235 /* Successive segs */
1240 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1241 sg->length = mbuf->data_len;
1245 sg->length -= ses->digest_length;
1246 if (is_decode(ses)) {
1249 memcpy(ctx->digest, sym->auth.digest.data,
1250 ses->digest_length);
1251 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1252 sg->length = ses->digest_length;
1260 static inline struct dpaa_sec_job *
1261 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1263 struct rte_crypto_sym_op *sym = op->sym;
1264 struct dpaa_sec_job *cf;
1265 struct dpaa_sec_op_ctx *ctx;
1266 struct qm_sg_entry *sg;
1267 rte_iova_t src_start_addr, dst_start_addr;
1268 uint32_t length = 0;
1269 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1272 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1274 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1276 dst_start_addr = src_start_addr;
1278 ctx = dpaa_sec_alloc_ctx(ses);
1286 rte_prefetch0(cf->sg);
1288 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1289 if (is_encode(ses)) {
1290 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1291 sg->length = ses->iv.length;
1292 length += sg->length;
1296 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1297 sg->length = sym->auth.data.length;
1298 length += sg->length;
1302 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1303 sg->length = ses->iv.length;
1304 length += sg->length;
1309 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1310 sg->length = sym->auth.data.length;
1311 length += sg->length;
1314 memcpy(ctx->digest, sym->auth.digest.data,
1315 ses->digest_length);
1318 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1319 sg->length = ses->digest_length;
1320 length += sg->length;
1324 /* input compound frame */
1325 cf->sg[1].length = length;
1326 cf->sg[1].extension = 1;
1327 cf->sg[1].final = 1;
1328 cpu_to_hw_sg(&cf->sg[1]);
1332 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1333 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1334 sg->length = sym->cipher.data.length;
1335 length = sg->length;
1336 if (is_encode(ses)) {
1338 /* set auth output */
1340 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1341 sg->length = ses->digest_length;
1342 length += sg->length;
1347 /* output compound frame */
1348 cf->sg[0].length = length;
1349 cf->sg[0].extension = 1;
1350 cpu_to_hw_sg(&cf->sg[0]);
1355 static inline struct dpaa_sec_job *
1356 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1358 struct rte_crypto_sym_op *sym = op->sym;
1359 struct dpaa_sec_job *cf;
1360 struct dpaa_sec_op_ctx *ctx;
1361 struct qm_sg_entry *sg;
1362 phys_addr_t src_start_addr, dst_start_addr;
1364 ctx = dpaa_sec_alloc_ctx(ses);
1370 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1373 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1375 dst_start_addr = src_start_addr;
1379 qm_sg_entry_set64(sg, src_start_addr);
1380 sg->length = sym->m_src->pkt_len;
1384 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1387 qm_sg_entry_set64(sg, dst_start_addr);
1388 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1395 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1398 /* Function to transmit the frames to given device and queuepair */
1400 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1401 uint16_t num_tx = 0;
1402 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1403 uint32_t frames_to_send;
1404 struct rte_crypto_op *op;
1405 struct dpaa_sec_job *cf;
1406 dpaa_sec_session *ses;
1407 struct dpaa_sec_op_ctx *ctx;
1408 uint32_t auth_only_len;
1409 struct qman_fq *inq[DPAA_SEC_BURST];
1412 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1413 DPAA_SEC_BURST : nb_ops;
1414 for (loop = 0; loop < frames_to_send; loop++) {
1416 switch (op->sess_type) {
1417 case RTE_CRYPTO_OP_WITH_SESSION:
1418 ses = (dpaa_sec_session *)
1419 get_session_private_data(
1421 cryptodev_driver_id);
1423 case RTE_CRYPTO_OP_SECURITY_SESSION:
1424 ses = (dpaa_sec_session *)
1425 get_sec_session_private_data(
1426 op->sym->sec_session);
1430 "sessionless crypto op not supported");
1431 frames_to_send = loop;
1435 if (unlikely(!ses->qp || ses->qp != qp)) {
1436 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1438 if (dpaa_sec_attach_sess_q(qp, ses)) {
1439 frames_to_send = loop;
1445 auth_only_len = op->sym->auth.data.length -
1446 op->sym->cipher.data.length;
1447 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1448 if (is_auth_only(ses)) {
1449 cf = build_auth_only(op, ses);
1450 } else if (is_cipher_only(ses)) {
1451 cf = build_cipher_only(op, ses);
1452 } else if (is_aead(ses)) {
1453 cf = build_cipher_auth_gcm(op, ses);
1454 auth_only_len = ses->auth_only_len;
1455 } else if (is_auth_cipher(ses)) {
1456 cf = build_cipher_auth(op, ses);
1457 } else if (is_proto_ipsec(ses)) {
1458 cf = build_proto(op, ses);
1460 PMD_TX_LOG(ERR, "not supported sec op");
1461 frames_to_send = loop;
1466 if (is_auth_only(ses)) {
1467 cf = build_auth_only_sg(op, ses);
1468 } else if (is_cipher_only(ses)) {
1469 cf = build_cipher_only_sg(op, ses);
1470 } else if (is_aead(ses)) {
1471 cf = build_cipher_auth_gcm_sg(op, ses);
1472 auth_only_len = ses->auth_only_len;
1473 } else if (is_auth_cipher(ses)) {
1474 cf = build_cipher_auth_sg(op, ses);
1476 PMD_TX_LOG(ERR, "not supported sec op");
1477 frames_to_send = loop;
1482 if (unlikely(!cf)) {
1483 frames_to_send = loop;
1489 inq[loop] = ses->inq;
1490 fd->opaque_addr = 0;
1492 ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1493 qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1494 fd->_format1 = qm_fd_compound;
1495 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1496 /* Auth_only_len is set as 0 in descriptor and it is
1497 * overwritten here in the fd.cmd which will update
1501 fd->cmd = 0x80000000 | auth_only_len;
1506 while (loop < frames_to_send) {
1507 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1508 frames_to_send - loop);
1510 nb_ops -= frames_to_send;
1511 num_tx += frames_to_send;
1514 dpaa_qp->tx_pkts += num_tx;
1515 dpaa_qp->tx_errs += nb_ops - num_tx;
1521 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1525 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1527 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1529 dpaa_qp->rx_pkts += num_rx;
1530 dpaa_qp->rx_errs += nb_ops - num_rx;
1532 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1537 /** Release queue pair */
1539 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1542 struct dpaa_sec_dev_private *internals;
1543 struct dpaa_sec_qp *qp = NULL;
1545 PMD_INIT_FUNC_TRACE();
1547 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1549 internals = dev->data->dev_private;
1550 if (qp_id >= internals->max_nb_queue_pairs) {
1551 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1552 internals->max_nb_queue_pairs);
1556 qp = &internals->qps[qp_id];
1557 qp->internals = NULL;
1558 dev->data->queue_pairs[qp_id] = NULL;
1563 /** Setup a queue pair */
1565 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1566 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1567 __rte_unused int socket_id,
1568 __rte_unused struct rte_mempool *session_pool)
1570 struct dpaa_sec_dev_private *internals;
1571 struct dpaa_sec_qp *qp = NULL;
1573 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1574 dev, qp_id, qp_conf);
1576 internals = dev->data->dev_private;
1577 if (qp_id >= internals->max_nb_queue_pairs) {
1578 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1579 internals->max_nb_queue_pairs);
1583 qp = &internals->qps[qp_id];
1584 qp->internals = internals;
1585 dev->data->queue_pairs[qp_id] = qp;
1590 /** Start queue pair */
1592 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1593 __rte_unused uint16_t queue_pair_id)
1595 PMD_INIT_FUNC_TRACE();
1600 /** Stop queue pair */
1602 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1603 __rte_unused uint16_t queue_pair_id)
1605 PMD_INIT_FUNC_TRACE();
1610 /** Return the number of allocated queue pairs */
1612 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1614 PMD_INIT_FUNC_TRACE();
1616 return dev->data->nb_queue_pairs;
1619 /** Returns the size of session structure */
1621 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1623 PMD_INIT_FUNC_TRACE();
1625 return sizeof(dpaa_sec_session);
1629 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1630 struct rte_crypto_sym_xform *xform,
1631 dpaa_sec_session *session)
1633 session->cipher_alg = xform->cipher.algo;
1634 session->iv.length = xform->cipher.iv.length;
1635 session->iv.offset = xform->cipher.iv.offset;
1636 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1637 RTE_CACHE_LINE_SIZE);
1638 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1639 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1642 session->cipher_key.length = xform->cipher.key.length;
1644 memcpy(session->cipher_key.data, xform->cipher.key.data,
1645 xform->cipher.key.length);
1646 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1653 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1654 struct rte_crypto_sym_xform *xform,
1655 dpaa_sec_session *session)
1657 session->auth_alg = xform->auth.algo;
1658 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1659 RTE_CACHE_LINE_SIZE);
1660 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1661 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1664 session->auth_key.length = xform->auth.key.length;
1665 session->digest_length = xform->auth.digest_length;
1667 memcpy(session->auth_key.data, xform->auth.key.data,
1668 xform->auth.key.length);
1669 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1676 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1677 struct rte_crypto_sym_xform *xform,
1678 dpaa_sec_session *session)
1680 session->aead_alg = xform->aead.algo;
1681 session->iv.length = xform->aead.iv.length;
1682 session->iv.offset = xform->aead.iv.offset;
1683 session->auth_only_len = xform->aead.aad_length;
1684 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1685 RTE_CACHE_LINE_SIZE);
1686 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1687 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1690 session->aead_key.length = xform->aead.key.length;
1691 session->digest_length = xform->aead.digest_length;
1693 memcpy(session->aead_key.data, xform->aead.key.data,
1694 xform->aead.key.length);
1695 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1701 static struct qman_fq *
1702 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1706 for (i = 0; i < qi->max_nb_sessions; i++) {
1707 if (qi->inq_attach[i] == 0) {
1708 qi->inq_attach[i] = 1;
1712 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1718 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1722 for (i = 0; i < qi->max_nb_sessions; i++) {
1723 if (&qi->inq[i] == fq) {
1724 qman_retire_fq(fq, NULL);
1726 qi->inq_attach[i] = 0;
1734 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1739 ret = dpaa_sec_prep_cdb(sess);
1741 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1744 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1745 ret = rte_dpaa_portal_init((void *)0);
1747 PMD_DRV_LOG(ERR, "Failure in affining portal");
1751 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1752 qman_fq_fqid(&qp->outq));
1754 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1760 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1761 uint16_t qp_id __rte_unused,
1762 void *ses __rte_unused)
1764 PMD_INIT_FUNC_TRACE();
1769 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1770 uint16_t qp_id __rte_unused,
1773 dpaa_sec_session *sess = ses;
1774 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1776 PMD_INIT_FUNC_TRACE();
1779 dpaa_sec_detach_rxq(qi, sess->inq);
1788 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1789 struct rte_crypto_sym_xform *xform, void *sess)
1791 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1792 dpaa_sec_session *session = sess;
1794 PMD_INIT_FUNC_TRACE();
1796 if (unlikely(sess == NULL)) {
1797 RTE_LOG(ERR, PMD, "invalid session struct\n");
1801 /* Default IV length = 0 */
1802 session->iv.length = 0;
1805 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1806 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1807 dpaa_sec_cipher_init(dev, xform, session);
1809 /* Authentication Only */
1810 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1811 xform->next == NULL) {
1812 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1813 dpaa_sec_auth_init(dev, xform, session);
1815 /* Cipher then Authenticate */
1816 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1817 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1818 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1819 dpaa_sec_cipher_init(dev, xform, session);
1820 dpaa_sec_auth_init(dev, xform->next, session);
1822 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1826 /* Authenticate then Cipher */
1827 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1828 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1829 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1830 dpaa_sec_auth_init(dev, xform, session);
1831 dpaa_sec_cipher_init(dev, xform->next, session);
1833 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1837 /* AEAD operation for AES-GCM kind of Algorithms */
1838 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1839 xform->next == NULL) {
1840 dpaa_sec_aead_init(dev, xform, session);
1843 PMD_DRV_LOG(ERR, "Invalid crypto type");
1846 session->ctx_pool = internals->ctx_pool;
1847 session->inq = dpaa_sec_attach_rxq(internals);
1848 if (session->inq == NULL) {
1849 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1856 rte_free(session->cipher_key.data);
1857 rte_free(session->auth_key.data);
1858 memset(session, 0, sizeof(dpaa_sec_session));
1864 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1865 struct rte_crypto_sym_xform *xform,
1866 struct rte_cryptodev_sym_session *sess,
1867 struct rte_mempool *mempool)
1869 void *sess_private_data;
1872 PMD_INIT_FUNC_TRACE();
1874 if (rte_mempool_get(mempool, &sess_private_data)) {
1876 "Couldn't get object from session mempool");
1880 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1882 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1883 "session parameters");
1885 /* Return session to mempool */
1886 rte_mempool_put(mempool, sess_private_data);
1890 set_session_private_data(sess, dev->driver_id,
1897 /** Clear the memory of session so it doesn't leave key material behind */
1899 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1900 struct rte_cryptodev_sym_session *sess)
1902 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1903 uint8_t index = dev->driver_id;
1904 void *sess_priv = get_session_private_data(sess, index);
1906 PMD_INIT_FUNC_TRACE();
1908 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1911 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1914 dpaa_sec_detach_rxq(qi, s->inq);
1915 rte_free(s->cipher_key.data);
1916 rte_free(s->auth_key.data);
1917 memset(s, 0, sizeof(dpaa_sec_session));
1918 set_session_private_data(sess, index, NULL);
1919 rte_mempool_put(sess_mp, sess_priv);
1924 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1925 struct rte_security_session_conf *conf,
1928 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1929 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1930 struct rte_crypto_auth_xform *auth_xform;
1931 struct rte_crypto_cipher_xform *cipher_xform;
1932 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1934 PMD_INIT_FUNC_TRACE();
1936 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1937 cipher_xform = &conf->crypto_xform->cipher;
1938 auth_xform = &conf->crypto_xform->next->auth;
1940 auth_xform = &conf->crypto_xform->auth;
1941 cipher_xform = &conf->crypto_xform->next->cipher;
1943 session->proto_alg = conf->protocol;
1944 session->cipher_key.data = rte_zmalloc(NULL,
1945 cipher_xform->key.length,
1946 RTE_CACHE_LINE_SIZE);
1947 if (session->cipher_key.data == NULL &&
1948 cipher_xform->key.length > 0) {
1949 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1953 session->cipher_key.length = cipher_xform->key.length;
1954 session->auth_key.data = rte_zmalloc(NULL,
1955 auth_xform->key.length,
1956 RTE_CACHE_LINE_SIZE);
1957 if (session->auth_key.data == NULL &&
1958 auth_xform->key.length > 0) {
1959 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1960 rte_free(session->cipher_key.data);
1963 session->auth_key.length = auth_xform->key.length;
1964 memcpy(session->cipher_key.data, cipher_xform->key.data,
1965 cipher_xform->key.length);
1966 memcpy(session->auth_key.data, auth_xform->key.data,
1967 auth_xform->key.length);
1969 switch (auth_xform->algo) {
1970 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1971 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1973 case RTE_CRYPTO_AUTH_MD5_HMAC:
1974 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1976 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1977 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1979 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1980 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1982 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1983 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1985 case RTE_CRYPTO_AUTH_AES_CMAC:
1986 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1988 case RTE_CRYPTO_AUTH_NULL:
1989 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1991 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1992 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1993 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1994 case RTE_CRYPTO_AUTH_SHA1:
1995 case RTE_CRYPTO_AUTH_SHA256:
1996 case RTE_CRYPTO_AUTH_SHA512:
1997 case RTE_CRYPTO_AUTH_SHA224:
1998 case RTE_CRYPTO_AUTH_SHA384:
1999 case RTE_CRYPTO_AUTH_MD5:
2000 case RTE_CRYPTO_AUTH_AES_GMAC:
2001 case RTE_CRYPTO_AUTH_KASUMI_F9:
2002 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2003 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2004 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2008 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2013 switch (cipher_xform->algo) {
2014 case RTE_CRYPTO_CIPHER_AES_CBC:
2015 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2017 case RTE_CRYPTO_CIPHER_3DES_CBC:
2018 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2020 case RTE_CRYPTO_CIPHER_AES_CTR:
2021 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2023 case RTE_CRYPTO_CIPHER_NULL:
2024 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2025 case RTE_CRYPTO_CIPHER_3DES_ECB:
2026 case RTE_CRYPTO_CIPHER_AES_ECB:
2027 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2028 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2029 cipher_xform->algo);
2032 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2033 cipher_xform->algo);
2037 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2038 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2039 sizeof(session->ip4_hdr));
2040 session->ip4_hdr.ip_v = IPVERSION;
2041 session->ip4_hdr.ip_hl = 5;
2042 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2043 sizeof(session->ip4_hdr));
2044 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2045 session->ip4_hdr.ip_id = 0;
2046 session->ip4_hdr.ip_off = 0;
2047 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2048 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2049 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2051 session->ip4_hdr.ip_sum = 0;
2052 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2053 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2054 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2055 (void *)&session->ip4_hdr,
2058 session->encap_pdb.options =
2059 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2060 PDBOPTS_ESP_OIHI_PDB_INL |
2062 PDBHMO_ESP_ENCAP_DTTL;
2063 session->encap_pdb.spi = ipsec_xform->spi;
2064 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2066 session->dir = DIR_ENC;
2067 } else if (ipsec_xform->direction ==
2068 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2069 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2070 session->decap_pdb.options = sizeof(struct ip) << 16;
2071 session->dir = DIR_DEC;
2074 session->ctx_pool = internals->ctx_pool;
2075 session->inq = dpaa_sec_attach_rxq(internals);
2076 if (session->inq == NULL) {
2077 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2084 rte_free(session->auth_key.data);
2085 rte_free(session->cipher_key.data);
2086 memset(session, 0, sizeof(dpaa_sec_session));
2091 dpaa_sec_security_session_create(void *dev,
2092 struct rte_security_session_conf *conf,
2093 struct rte_security_session *sess,
2094 struct rte_mempool *mempool)
2096 void *sess_private_data;
2097 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2100 if (rte_mempool_get(mempool, &sess_private_data)) {
2102 "Couldn't get object from session mempool");
2106 switch (conf->protocol) {
2107 case RTE_SECURITY_PROTOCOL_IPSEC:
2108 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2111 case RTE_SECURITY_PROTOCOL_MACSEC:
2118 "DPAA2 PMD: failed to configure session parameters");
2120 /* Return session to mempool */
2121 rte_mempool_put(mempool, sess_private_data);
2125 set_sec_session_private_data(sess, sess_private_data);
2130 /** Clear the memory of session so it doesn't leave key material behind */
2132 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2133 struct rte_security_session *sess)
2135 PMD_INIT_FUNC_TRACE();
2136 void *sess_priv = get_sec_session_private_data(sess);
2138 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2141 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2143 rte_free(s->cipher_key.data);
2144 rte_free(s->auth_key.data);
2145 memset(sess, 0, sizeof(dpaa_sec_session));
2146 set_sec_session_private_data(sess, NULL);
2147 rte_mempool_put(sess_mp, sess_priv);
2154 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2155 struct rte_cryptodev_config *config __rte_unused)
2159 struct dpaa_sec_dev_private *internals;
2161 PMD_INIT_FUNC_TRACE();
2163 internals = dev->data->dev_private;
2164 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2165 if (!internals->ctx_pool) {
2166 internals->ctx_pool = rte_mempool_create((const char *)str,
2169 CTX_POOL_CACHE_SIZE, 0,
2170 NULL, NULL, NULL, NULL,
2172 if (!internals->ctx_pool) {
2173 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2177 RTE_LOG(INFO, PMD, "mempool already created for dev_id : %d\n",
2184 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2186 PMD_INIT_FUNC_TRACE();
2191 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2193 PMD_INIT_FUNC_TRACE();
2197 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2199 struct dpaa_sec_dev_private *internals;
2201 PMD_INIT_FUNC_TRACE();
2206 internals = dev->data->dev_private;
2207 rte_mempool_free(internals->ctx_pool);
2208 internals->ctx_pool = NULL;
2214 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2215 struct rte_cryptodev_info *info)
2217 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2219 PMD_INIT_FUNC_TRACE();
2221 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2222 info->feature_flags = dev->feature_flags;
2223 info->capabilities = dpaa_sec_capabilities;
2224 info->sym.max_nb_sessions = internals->max_nb_sessions;
2225 info->sym.max_nb_sessions_per_qp =
2226 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2227 RTE_DPAA_MAX_NB_SEC_QPS;
2228 info->driver_id = cryptodev_driver_id;
2232 static struct rte_cryptodev_ops crypto_ops = {
2233 .dev_configure = dpaa_sec_dev_configure,
2234 .dev_start = dpaa_sec_dev_start,
2235 .dev_stop = dpaa_sec_dev_stop,
2236 .dev_close = dpaa_sec_dev_close,
2237 .dev_infos_get = dpaa_sec_dev_infos_get,
2238 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2239 .queue_pair_release = dpaa_sec_queue_pair_release,
2240 .queue_pair_start = dpaa_sec_queue_pair_start,
2241 .queue_pair_stop = dpaa_sec_queue_pair_stop,
2242 .queue_pair_count = dpaa_sec_queue_pair_count,
2243 .session_get_size = dpaa_sec_session_get_size,
2244 .session_configure = dpaa_sec_session_configure,
2245 .session_clear = dpaa_sec_session_clear,
2246 .qp_attach_session = dpaa_sec_qp_attach_sess,
2247 .qp_detach_session = dpaa_sec_qp_detach_sess,
2250 static const struct rte_security_capability *
2251 dpaa_sec_capabilities_get(void *device __rte_unused)
2253 return dpaa_sec_security_cap;
2256 struct rte_security_ops dpaa_sec_security_ops = {
2257 .session_create = dpaa_sec_security_session_create,
2258 .session_update = NULL,
2259 .session_stats_get = NULL,
2260 .session_destroy = dpaa_sec_security_session_destroy,
2261 .set_pkt_metadata = NULL,
2262 .capabilities_get = dpaa_sec_capabilities_get
2266 dpaa_sec_uninit(struct rte_cryptodev *dev)
2268 struct dpaa_sec_dev_private *internals;
2273 internals = dev->data->dev_private;
2274 rte_free(dev->security_ctx);
2276 /* In case close has been called, internals->ctx_pool would be NULL */
2277 rte_mempool_free(internals->ctx_pool);
2278 rte_free(internals);
2280 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2281 dev->data->name, rte_socket_id());
2287 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2289 struct dpaa_sec_dev_private *internals;
2290 struct rte_security_ctx *security_instance;
2291 struct dpaa_sec_qp *qp;
2295 PMD_INIT_FUNC_TRACE();
2297 cryptodev->driver_id = cryptodev_driver_id;
2298 cryptodev->dev_ops = &crypto_ops;
2300 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2301 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2302 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2303 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2304 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2305 RTE_CRYPTODEV_FF_SECURITY |
2306 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2308 internals = cryptodev->data->dev_private;
2309 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2310 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2313 * For secondary processes, we don't initialise any further as primary
2314 * has already done this work. Only check we don't need a different
2317 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2318 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2322 /* Initialize security_ctx only for primary process*/
2323 security_instance = rte_malloc("rte_security_instances_ops",
2324 sizeof(struct rte_security_ctx), 0);
2325 if (security_instance == NULL)
2327 security_instance->device = (void *)cryptodev;
2328 security_instance->ops = &dpaa_sec_security_ops;
2329 security_instance->sess_cnt = 0;
2330 cryptodev->security_ctx = security_instance;
2332 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2333 /* init qman fq for queue pair */
2334 qp = &internals->qps[i];
2335 ret = dpaa_sec_init_tx(&qp->outq);
2337 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
2342 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2343 QMAN_FQ_FLAG_TO_DCPORTAL;
2344 for (i = 0; i < internals->max_nb_sessions; i++) {
2345 /* create rx qman fq for sessions*/
2346 ret = qman_create_fq(0, flags, &internals->inq[i]);
2347 if (unlikely(ret != 0)) {
2348 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2353 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2357 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2359 dpaa_sec_uninit(cryptodev);
2364 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2365 struct rte_dpaa_device *dpaa_dev)
2367 struct rte_cryptodev *cryptodev;
2368 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2372 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2374 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2375 if (cryptodev == NULL)
2378 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2379 cryptodev->data->dev_private = rte_zmalloc_socket(
2380 "cryptodev private structure",
2381 sizeof(struct dpaa_sec_dev_private),
2382 RTE_CACHE_LINE_SIZE,
2385 if (cryptodev->data->dev_private == NULL)
2386 rte_panic("Cannot allocate memzone for private "
2390 dpaa_dev->crypto_dev = cryptodev;
2391 cryptodev->device = &dpaa_dev->device;
2392 cryptodev->device->driver = &dpaa_drv->driver;
2394 /* init user callbacks */
2395 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2397 /* if sec device version is not configured */
2398 if (!rta_get_sec_era()) {
2399 const struct device_node *caam_node;
2401 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2402 const uint32_t *prop = of_get_property(caam_node,
2407 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2413 /* Invoke PMD device initialization function */
2414 retval = dpaa_sec_dev_init(cryptodev);
2418 /* In case of error, cleanup is done */
2419 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2420 rte_free(cryptodev->data->dev_private);
2422 rte_cryptodev_pmd_release_device(cryptodev);
2428 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2430 struct rte_cryptodev *cryptodev;
2433 cryptodev = dpaa_dev->crypto_dev;
2434 if (cryptodev == NULL)
2437 ret = dpaa_sec_uninit(cryptodev);
2441 return rte_cryptodev_pmd_destroy(cryptodev);
2444 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2445 .drv_type = FSL_DPAA_CRYPTO,
2447 .name = "DPAA SEC PMD"
2449 .probe = cryptodev_dpaa_sec_probe,
2450 .remove = cryptodev_dpaa_sec_remove,
2453 static struct cryptodev_driver dpaa_sec_crypto_drv;
2455 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2456 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2457 cryptodev_driver_id);