1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017-2018 NXP
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
37 #include <rte_dpaa_bus.h>
39 #include <dpaa_sec_log.h>
41 enum rta_sec_era rta_sec_era;
45 static uint8_t cryptodev_driver_id;
47 static __thread struct rte_crypto_op **dpaa_sec_ops;
48 static __thread int dpaa_sec_op_nb;
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 if (!ctx->fd_status) {
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
63 /* report op status to sym->op and then free the ctx memeory */
64 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
70 struct dpaa_sec_op_ctx *ctx;
73 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
75 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
79 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82 * each packet, memset is costlier than dcbz_64().
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
85 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
86 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
87 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
89 ctx->ctx_pool = ses->ctx_pool;
90 ctx->vtop_offset = (size_t) ctx
91 - rte_mempool_virt2iova(ctx);
96 static inline rte_iova_t
97 dpaa_mem_vtop(void *vaddr)
99 const struct rte_memseg *ms;
101 ms = rte_mem_virt2memseg(vaddr, NULL);
103 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
108 dpaa_mem_ptov(rte_iova_t paddr)
110 return rte_mem_iova2virt(paddr);
114 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
116 const struct qm_mr_entry *msg)
118 DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
119 fq->fqid, msg->ern.rc, msg->ern.seqnum);
122 /* initialize the queue with dest chan as caam chan so that
123 * all the packets in this queue could be dispatched into caam
126 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
129 struct qm_mcc_initfq fq_opts;
133 /* Clear FQ options */
134 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
136 flags = QMAN_INITFQ_FLAG_SCHED;
137 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
138 QM_INITFQ_WE_CONTEXTB;
140 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
141 fq_opts.fqd.context_b = fqid_out;
142 fq_opts.fqd.dest.channel = qm_channel_caam;
143 fq_opts.fqd.dest.wq = 0;
145 fq_in->cb.ern = ern_sec_fq_handler;
147 DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
149 ret = qman_init_fq(fq_in, flags, &fq_opts);
150 if (unlikely(ret != 0))
151 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
156 /* something is put into in_fq and caam put the crypto result into out_fq */
157 static enum qman_cb_dqrr_result
158 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
159 struct qman_fq *fq __always_unused,
160 const struct qm_dqrr_entry *dqrr)
162 const struct qm_fd *fd;
163 struct dpaa_sec_job *job;
164 struct dpaa_sec_op_ctx *ctx;
166 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
167 return qman_cb_dqrr_defer;
169 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
170 return qman_cb_dqrr_consume;
173 /* sg is embedded in an op ctx,
174 * sg[0] is for output
177 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
179 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
180 ctx->fd_status = fd->status;
181 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
182 struct qm_sg_entry *sg_out;
185 sg_out = &job->sg[0];
186 hw_sg_to_cpu(sg_out);
187 len = sg_out->length;
188 ctx->op->sym->m_src->pkt_len = len;
189 ctx->op->sym->m_src->data_len = len;
191 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
192 dpaa_sec_op_ending(ctx);
194 return qman_cb_dqrr_consume;
197 /* caam result is put into this queue */
199 dpaa_sec_init_tx(struct qman_fq *fq)
202 struct qm_mcc_initfq opts;
205 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
206 QMAN_FQ_FLAG_DYNAMIC_FQID;
208 ret = qman_create_fq(0, flags, fq);
210 DPAA_SEC_ERR("qman_create_fq failed");
214 memset(&opts, 0, sizeof(opts));
215 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
216 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
218 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
220 fq->cb.dqrr = dqrr_out_fq_cb_rx;
221 fq->cb.ern = ern_sec_fq_handler;
223 ret = qman_init_fq(fq, 0, &opts);
225 DPAA_SEC_ERR("unable to init caam source fq!");
232 static inline int is_cipher_only(dpaa_sec_session *ses)
234 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
235 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
238 static inline int is_auth_only(dpaa_sec_session *ses)
240 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
241 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
244 static inline int is_aead(dpaa_sec_session *ses)
246 return ((ses->cipher_alg == 0) &&
247 (ses->auth_alg == 0) &&
248 (ses->aead_alg != 0));
251 static inline int is_auth_cipher(dpaa_sec_session *ses)
253 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
254 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
255 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
258 static inline int is_proto_ipsec(dpaa_sec_session *ses)
260 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
263 static inline int is_encode(dpaa_sec_session *ses)
265 return ses->dir == DIR_ENC;
268 static inline int is_decode(dpaa_sec_session *ses)
270 return ses->dir == DIR_DEC;
274 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
276 switch (ses->auth_alg) {
277 case RTE_CRYPTO_AUTH_NULL:
279 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
280 OP_PCL_IPSEC_HMAC_NULL : 0;
281 ses->digest_length = 0;
283 case RTE_CRYPTO_AUTH_MD5_HMAC:
285 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
286 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
287 alginfo_a->algmode = OP_ALG_AAI_HMAC;
289 case RTE_CRYPTO_AUTH_SHA1_HMAC:
291 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
293 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295 case RTE_CRYPTO_AUTH_SHA224_HMAC:
297 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
299 alginfo_a->algmode = OP_ALG_AAI_HMAC;
301 case RTE_CRYPTO_AUTH_SHA256_HMAC:
303 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
305 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 case RTE_CRYPTO_AUTH_SHA384_HMAC:
309 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
311 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 case RTE_CRYPTO_AUTH_SHA512_HMAC:
315 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
317 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
325 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
327 switch (ses->cipher_alg) {
328 case RTE_CRYPTO_CIPHER_NULL:
330 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331 OP_PCL_IPSEC_NULL : 0;
333 case RTE_CRYPTO_CIPHER_AES_CBC:
335 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
337 alginfo_c->algmode = OP_ALG_AAI_CBC;
339 case RTE_CRYPTO_CIPHER_3DES_CBC:
341 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
343 alginfo_c->algmode = OP_ALG_AAI_CBC;
345 case RTE_CRYPTO_CIPHER_AES_CTR:
347 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
349 alginfo_c->algmode = OP_ALG_AAI_CTR;
352 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
357 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
359 switch (ses->aead_alg) {
360 case RTE_CRYPTO_AEAD_AES_GCM:
361 alginfo->algtype = OP_ALG_ALGSEL_AES;
362 alginfo->algmode = OP_ALG_AAI_GCM;
365 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
369 /* prepare ipsec proto command block of the session */
371 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
373 struct alginfo cipherdata = {0}, authdata = {0};
374 struct sec_cdb *cdb = &ses->cdb;
375 int32_t shared_desc_len = 0;
377 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
383 caam_cipher_alg(ses, &cipherdata);
384 if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
385 DPAA_SEC_ERR("not supported cipher alg");
389 cipherdata.key = (size_t)ses->cipher_key.data;
390 cipherdata.keylen = ses->cipher_key.length;
391 cipherdata.key_enc_flags = 0;
392 cipherdata.key_type = RTA_DATA_IMM;
394 caam_auth_alg(ses, &authdata);
395 if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
396 DPAA_SEC_ERR("not supported auth alg");
400 authdata.key = (size_t)ses->auth_key.data;
401 authdata.keylen = ses->auth_key.length;
402 authdata.key_enc_flags = 0;
403 authdata.key_type = RTA_DATA_IMM;
405 cdb->sh_desc[0] = cipherdata.keylen;
406 cdb->sh_desc[1] = authdata.keylen;
407 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
409 (unsigned int *)cdb->sh_desc,
410 &cdb->sh_desc[2], 2);
413 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
416 if (cdb->sh_desc[2] & 1)
417 cipherdata.key_type = RTA_DATA_IMM;
419 cipherdata.key = (size_t)dpaa_mem_vtop(
420 (void *)(size_t)cipherdata.key);
421 cipherdata.key_type = RTA_DATA_PTR;
423 if (cdb->sh_desc[2] & (1<<1))
424 authdata.key_type = RTA_DATA_IMM;
426 authdata.key = (size_t)dpaa_mem_vtop(
427 (void *)(size_t)authdata.key);
428 authdata.key_type = RTA_DATA_PTR;
434 if (ses->dir == DIR_ENC) {
435 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
437 true, swap, SHR_SERIAL,
439 (uint8_t *)&ses->ip4_hdr,
440 &cipherdata, &authdata);
441 } else if (ses->dir == DIR_DEC) {
442 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
444 true, swap, SHR_SERIAL,
446 &cipherdata, &authdata);
448 return shared_desc_len;
451 /* prepare command block of the session */
453 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
455 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
456 int32_t shared_desc_len = 0;
457 struct sec_cdb *cdb = &ses->cdb;
459 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
465 memset(cdb, 0, sizeof(struct sec_cdb));
467 if (is_proto_ipsec(ses)) {
468 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
469 } else if (is_cipher_only(ses)) {
470 caam_cipher_alg(ses, &alginfo_c);
471 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
472 DPAA_SEC_ERR("not supported cipher alg");
476 alginfo_c.key = (size_t)ses->cipher_key.data;
477 alginfo_c.keylen = ses->cipher_key.length;
478 alginfo_c.key_enc_flags = 0;
479 alginfo_c.key_type = RTA_DATA_IMM;
481 shared_desc_len = cnstr_shdsc_blkcipher(
487 } else if (is_auth_only(ses)) {
488 caam_auth_alg(ses, &alginfo_a);
489 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
490 DPAA_SEC_ERR("not supported auth alg");
494 alginfo_a.key = (size_t)ses->auth_key.data;
495 alginfo_a.keylen = ses->auth_key.length;
496 alginfo_a.key_enc_flags = 0;
497 alginfo_a.key_type = RTA_DATA_IMM;
499 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
503 } else if (is_aead(ses)) {
504 caam_aead_alg(ses, &alginfo);
505 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
506 DPAA_SEC_ERR("not supported aead alg");
509 alginfo.key = (size_t)ses->aead_key.data;
510 alginfo.keylen = ses->aead_key.length;
511 alginfo.key_enc_flags = 0;
512 alginfo.key_type = RTA_DATA_IMM;
514 if (ses->dir == DIR_ENC)
515 shared_desc_len = cnstr_shdsc_gcm_encap(
516 cdb->sh_desc, true, swap,
521 shared_desc_len = cnstr_shdsc_gcm_decap(
522 cdb->sh_desc, true, swap,
527 caam_cipher_alg(ses, &alginfo_c);
528 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
529 DPAA_SEC_ERR("not supported cipher alg");
533 alginfo_c.key = (size_t)ses->cipher_key.data;
534 alginfo_c.keylen = ses->cipher_key.length;
535 alginfo_c.key_enc_flags = 0;
536 alginfo_c.key_type = RTA_DATA_IMM;
538 caam_auth_alg(ses, &alginfo_a);
539 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
540 DPAA_SEC_ERR("not supported auth alg");
544 alginfo_a.key = (size_t)ses->auth_key.data;
545 alginfo_a.keylen = ses->auth_key.length;
546 alginfo_a.key_enc_flags = 0;
547 alginfo_a.key_type = RTA_DATA_IMM;
549 cdb->sh_desc[0] = alginfo_c.keylen;
550 cdb->sh_desc[1] = alginfo_a.keylen;
551 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
553 (unsigned int *)cdb->sh_desc,
554 &cdb->sh_desc[2], 2);
557 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
560 if (cdb->sh_desc[2] & 1)
561 alginfo_c.key_type = RTA_DATA_IMM;
563 alginfo_c.key = (size_t)dpaa_mem_vtop(
564 (void *)(size_t)alginfo_c.key);
565 alginfo_c.key_type = RTA_DATA_PTR;
567 if (cdb->sh_desc[2] & (1<<1))
568 alginfo_a.key_type = RTA_DATA_IMM;
570 alginfo_a.key = (size_t)dpaa_mem_vtop(
571 (void *)(size_t)alginfo_a.key);
572 alginfo_a.key_type = RTA_DATA_PTR;
577 /* Auth_only_len is set as 0 here and it will be
578 * overwritten in fd for each packet.
580 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
581 true, swap, &alginfo_c, &alginfo_a,
583 ses->digest_length, ses->dir);
586 if (shared_desc_len < 0) {
587 DPAA_SEC_ERR("error in preparing command block");
588 return shared_desc_len;
591 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
592 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
593 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
598 /* qp is lockless, should be accessed by only one thread */
600 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
603 unsigned int pkts = 0;
604 int num_rx_bufs, ret;
605 struct qm_dqrr_entry *dq;
606 uint32_t vdqcr_flags = 0;
610 * Until request for four buffers, we provide exact number of buffers.
611 * Otherwise we do not set the QM_VDQCR_EXACT flag.
612 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
613 * requested, so we request two less in this case.
616 vdqcr_flags = QM_VDQCR_EXACT;
617 num_rx_bufs = nb_ops;
619 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
620 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
622 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
627 const struct qm_fd *fd;
628 struct dpaa_sec_job *job;
629 struct dpaa_sec_op_ctx *ctx;
630 struct rte_crypto_op *op;
632 dq = qman_dequeue(fq);
637 /* sg is embedded in an op ctx,
638 * sg[0] is for output
641 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
643 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
644 ctx->fd_status = fd->status;
646 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
647 struct qm_sg_entry *sg_out;
650 sg_out = &job->sg[0];
651 hw_sg_to_cpu(sg_out);
652 len = sg_out->length;
653 op->sym->m_src->pkt_len = len;
654 op->sym->m_src->data_len = len;
656 if (!ctx->fd_status) {
657 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
659 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
660 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
664 /* report op status to sym->op and then free the ctx memeory */
665 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
667 qman_dqrr_consume(fq, dq);
668 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
673 static inline struct dpaa_sec_job *
674 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
676 struct rte_crypto_sym_op *sym = op->sym;
677 struct rte_mbuf *mbuf = sym->m_src;
678 struct dpaa_sec_job *cf;
679 struct dpaa_sec_op_ctx *ctx;
680 struct qm_sg_entry *sg, *out_sg, *in_sg;
681 phys_addr_t start_addr;
682 uint8_t *old_digest, extra_segs;
689 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
690 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
694 ctx = dpaa_sec_alloc_ctx(ses);
700 old_digest = ctx->digest;
704 qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
705 out_sg->length = ses->digest_length;
706 cpu_to_hw_sg(out_sg);
710 /* need to extend the input to a compound frame */
711 in_sg->extension = 1;
713 in_sg->length = sym->auth.data.length;
714 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
718 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
719 sg->length = mbuf->data_len - sym->auth.data.offset;
720 sg->offset = sym->auth.data.offset;
722 /* Successive segs */
727 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
728 sg->length = mbuf->data_len;
732 if (is_decode(ses)) {
733 /* Digest verification case */
736 rte_memcpy(old_digest, sym->auth.digest.data,
738 start_addr = dpaa_mem_vtop(old_digest);
739 qm_sg_entry_set64(sg, start_addr);
740 sg->length = ses->digest_length;
741 in_sg->length += ses->digest_length;
743 /* Digest calculation case */
744 sg->length -= ses->digest_length;
755 * |<----data_len------->|
756 * |ip_header|ah_header|icv|payload|
761 static inline struct dpaa_sec_job *
762 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
764 struct rte_crypto_sym_op *sym = op->sym;
765 struct rte_mbuf *mbuf = sym->m_src;
766 struct dpaa_sec_job *cf;
767 struct dpaa_sec_op_ctx *ctx;
768 struct qm_sg_entry *sg;
769 rte_iova_t start_addr;
772 ctx = dpaa_sec_alloc_ctx(ses);
778 old_digest = ctx->digest;
780 start_addr = rte_pktmbuf_iova(mbuf);
783 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
784 sg->length = ses->digest_length;
789 if (is_decode(ses)) {
790 /* need to extend the input to a compound frame */
792 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
793 sg->length = sym->auth.data.length + ses->digest_length;
798 /* hash result or digest, save digest first */
799 rte_memcpy(old_digest, sym->auth.digest.data,
801 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
802 sg->length = sym->auth.data.length;
805 /* let's check digest by hw */
806 start_addr = dpaa_mem_vtop(old_digest);
808 qm_sg_entry_set64(sg, start_addr);
809 sg->length = ses->digest_length;
813 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
814 sg->length = sym->auth.data.length;
822 static inline struct dpaa_sec_job *
823 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
825 struct rte_crypto_sym_op *sym = op->sym;
826 struct dpaa_sec_job *cf;
827 struct dpaa_sec_op_ctx *ctx;
828 struct qm_sg_entry *sg, *out_sg, *in_sg;
829 struct rte_mbuf *mbuf;
831 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
836 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
839 req_segs = mbuf->nb_segs * 2 + 3;
842 if (req_segs > MAX_SG_ENTRIES) {
843 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
848 ctx = dpaa_sec_alloc_ctx(ses);
857 out_sg->extension = 1;
858 out_sg->length = sym->cipher.data.length;
859 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
860 cpu_to_hw_sg(out_sg);
864 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
865 sg->length = mbuf->data_len - sym->cipher.data.offset;
866 sg->offset = sym->cipher.data.offset;
868 /* Successive segs */
873 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
874 sg->length = mbuf->data_len;
883 in_sg->extension = 1;
885 in_sg->length = sym->cipher.data.length + ses->iv.length;
888 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
892 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
893 sg->length = ses->iv.length;
898 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
899 sg->length = mbuf->data_len - sym->cipher.data.offset;
900 sg->offset = sym->cipher.data.offset;
902 /* Successive segs */
907 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
908 sg->length = mbuf->data_len;
917 static inline struct dpaa_sec_job *
918 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
920 struct rte_crypto_sym_op *sym = op->sym;
921 struct dpaa_sec_job *cf;
922 struct dpaa_sec_op_ctx *ctx;
923 struct qm_sg_entry *sg;
924 rte_iova_t src_start_addr, dst_start_addr;
925 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
928 ctx = dpaa_sec_alloc_ctx(ses);
935 src_start_addr = rte_pktmbuf_iova(sym->m_src);
938 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
940 dst_start_addr = src_start_addr;
944 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
945 sg->length = sym->cipher.data.length + ses->iv.length;
951 /* need to extend the input to a compound frame */
954 sg->length = sym->cipher.data.length + ses->iv.length;
955 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
959 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
960 sg->length = ses->iv.length;
964 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
965 sg->length = sym->cipher.data.length;
972 static inline struct dpaa_sec_job *
973 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
975 struct rte_crypto_sym_op *sym = op->sym;
976 struct dpaa_sec_job *cf;
977 struct dpaa_sec_op_ctx *ctx;
978 struct qm_sg_entry *sg, *out_sg, *in_sg;
979 struct rte_mbuf *mbuf;
981 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
986 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
989 req_segs = mbuf->nb_segs * 2 + 4;
992 if (ses->auth_only_len)
995 if (req_segs > MAX_SG_ENTRIES) {
996 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1001 ctx = dpaa_sec_alloc_ctx(ses);
1008 rte_prefetch0(cf->sg);
1011 out_sg = &cf->sg[0];
1012 out_sg->extension = 1;
1014 out_sg->length = sym->aead.data.length + ses->auth_only_len
1015 + ses->digest_length;
1017 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1019 /* output sg entries */
1021 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1022 cpu_to_hw_sg(out_sg);
1025 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1026 sg->length = mbuf->data_len - sym->aead.data.offset +
1028 sg->offset = sym->aead.data.offset - ses->auth_only_len;
1030 /* Successive segs */
1035 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1036 sg->length = mbuf->data_len;
1039 sg->length -= ses->digest_length;
1041 if (is_encode(ses)) {
1043 /* set auth output */
1045 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1046 sg->length = ses->digest_length;
1054 in_sg->extension = 1;
1057 in_sg->length = ses->iv.length + sym->aead.data.length
1058 + ses->auth_only_len;
1060 in_sg->length = ses->iv.length + sym->aead.data.length
1061 + ses->auth_only_len + ses->digest_length;
1063 /* input sg entries */
1065 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1066 cpu_to_hw_sg(in_sg);
1069 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1070 sg->length = ses->iv.length;
1073 /* 2nd seg auth only */
1074 if (ses->auth_only_len) {
1076 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1077 sg->length = ses->auth_only_len;
1083 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1084 sg->length = mbuf->data_len - sym->aead.data.offset;
1085 sg->offset = sym->aead.data.offset;
1087 /* Successive segs */
1092 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1093 sg->length = mbuf->data_len;
1097 if (is_decode(ses)) {
1100 memcpy(ctx->digest, sym->aead.digest.data,
1101 ses->digest_length);
1102 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1103 sg->length = ses->digest_length;
1111 static inline struct dpaa_sec_job *
1112 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1114 struct rte_crypto_sym_op *sym = op->sym;
1115 struct dpaa_sec_job *cf;
1116 struct dpaa_sec_op_ctx *ctx;
1117 struct qm_sg_entry *sg;
1118 uint32_t length = 0;
1119 rte_iova_t src_start_addr, dst_start_addr;
1120 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1123 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1126 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1128 dst_start_addr = src_start_addr;
1130 ctx = dpaa_sec_alloc_ctx(ses);
1138 rte_prefetch0(cf->sg);
1140 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1141 if (is_encode(ses)) {
1142 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1143 sg->length = ses->iv.length;
1144 length += sg->length;
1148 if (ses->auth_only_len) {
1149 qm_sg_entry_set64(sg,
1150 dpaa_mem_vtop(sym->aead.aad.data));
1151 sg->length = ses->auth_only_len;
1152 length += sg->length;
1156 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1157 sg->length = sym->aead.data.length;
1158 length += sg->length;
1162 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1163 sg->length = ses->iv.length;
1164 length += sg->length;
1168 if (ses->auth_only_len) {
1169 qm_sg_entry_set64(sg,
1170 dpaa_mem_vtop(sym->aead.aad.data));
1171 sg->length = ses->auth_only_len;
1172 length += sg->length;
1176 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1177 sg->length = sym->aead.data.length;
1178 length += sg->length;
1181 memcpy(ctx->digest, sym->aead.digest.data,
1182 ses->digest_length);
1185 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1186 sg->length = ses->digest_length;
1187 length += sg->length;
1191 /* input compound frame */
1192 cf->sg[1].length = length;
1193 cf->sg[1].extension = 1;
1194 cf->sg[1].final = 1;
1195 cpu_to_hw_sg(&cf->sg[1]);
1199 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1200 qm_sg_entry_set64(sg,
1201 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1202 sg->length = sym->aead.data.length + ses->auth_only_len;
1203 length = sg->length;
1204 if (is_encode(ses)) {
1206 /* set auth output */
1208 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209 sg->length = ses->digest_length;
1210 length += sg->length;
1215 /* output compound frame */
1216 cf->sg[0].length = length;
1217 cf->sg[0].extension = 1;
1218 cpu_to_hw_sg(&cf->sg[0]);
1223 static inline struct dpaa_sec_job *
1224 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1226 struct rte_crypto_sym_op *sym = op->sym;
1227 struct dpaa_sec_job *cf;
1228 struct dpaa_sec_op_ctx *ctx;
1229 struct qm_sg_entry *sg, *out_sg, *in_sg;
1230 struct rte_mbuf *mbuf;
1232 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1237 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1240 req_segs = mbuf->nb_segs * 2 + 4;
1243 if (req_segs > MAX_SG_ENTRIES) {
1244 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1249 ctx = dpaa_sec_alloc_ctx(ses);
1256 rte_prefetch0(cf->sg);
1259 out_sg = &cf->sg[0];
1260 out_sg->extension = 1;
1262 out_sg->length = sym->auth.data.length + ses->digest_length;
1264 out_sg->length = sym->auth.data.length;
1266 /* output sg entries */
1268 qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1269 cpu_to_hw_sg(out_sg);
1272 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1273 sg->length = mbuf->data_len - sym->auth.data.offset;
1274 sg->offset = sym->auth.data.offset;
1276 /* Successive segs */
1281 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1282 sg->length = mbuf->data_len;
1285 sg->length -= ses->digest_length;
1287 if (is_encode(ses)) {
1289 /* set auth output */
1291 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1292 sg->length = ses->digest_length;
1300 in_sg->extension = 1;
1303 in_sg->length = ses->iv.length + sym->auth.data.length;
1305 in_sg->length = ses->iv.length + sym->auth.data.length
1306 + ses->digest_length;
1308 /* input sg entries */
1310 qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1311 cpu_to_hw_sg(in_sg);
1314 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1315 sg->length = ses->iv.length;
1320 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1321 sg->length = mbuf->data_len - sym->auth.data.offset;
1322 sg->offset = sym->auth.data.offset;
1324 /* Successive segs */
1329 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1330 sg->length = mbuf->data_len;
1334 sg->length -= ses->digest_length;
1335 if (is_decode(ses)) {
1338 memcpy(ctx->digest, sym->auth.digest.data,
1339 ses->digest_length);
1340 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1341 sg->length = ses->digest_length;
1349 static inline struct dpaa_sec_job *
1350 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1352 struct rte_crypto_sym_op *sym = op->sym;
1353 struct dpaa_sec_job *cf;
1354 struct dpaa_sec_op_ctx *ctx;
1355 struct qm_sg_entry *sg;
1356 rte_iova_t src_start_addr, dst_start_addr;
1357 uint32_t length = 0;
1358 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1361 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1363 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1365 dst_start_addr = src_start_addr;
1367 ctx = dpaa_sec_alloc_ctx(ses);
1375 rte_prefetch0(cf->sg);
1377 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1378 if (is_encode(ses)) {
1379 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1380 sg->length = ses->iv.length;
1381 length += sg->length;
1385 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1386 sg->length = sym->auth.data.length;
1387 length += sg->length;
1391 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1392 sg->length = ses->iv.length;
1393 length += sg->length;
1398 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1399 sg->length = sym->auth.data.length;
1400 length += sg->length;
1403 memcpy(ctx->digest, sym->auth.digest.data,
1404 ses->digest_length);
1407 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1408 sg->length = ses->digest_length;
1409 length += sg->length;
1413 /* input compound frame */
1414 cf->sg[1].length = length;
1415 cf->sg[1].extension = 1;
1416 cf->sg[1].final = 1;
1417 cpu_to_hw_sg(&cf->sg[1]);
1421 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1422 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1423 sg->length = sym->cipher.data.length;
1424 length = sg->length;
1425 if (is_encode(ses)) {
1427 /* set auth output */
1429 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1430 sg->length = ses->digest_length;
1431 length += sg->length;
1436 /* output compound frame */
1437 cf->sg[0].length = length;
1438 cf->sg[0].extension = 1;
1439 cpu_to_hw_sg(&cf->sg[0]);
1444 static inline struct dpaa_sec_job *
1445 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1447 struct rte_crypto_sym_op *sym = op->sym;
1448 struct dpaa_sec_job *cf;
1449 struct dpaa_sec_op_ctx *ctx;
1450 struct qm_sg_entry *sg;
1451 phys_addr_t src_start_addr, dst_start_addr;
1453 ctx = dpaa_sec_alloc_ctx(ses);
1459 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1462 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1464 dst_start_addr = src_start_addr;
1468 qm_sg_entry_set64(sg, src_start_addr);
1469 sg->length = sym->m_src->pkt_len;
1473 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1476 qm_sg_entry_set64(sg, dst_start_addr);
1477 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1484 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1487 /* Function to transmit the frames to given device and queuepair */
1489 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1490 uint16_t num_tx = 0;
1491 struct qm_fd fds[DPAA_SEC_BURST], *fd;
1492 uint32_t frames_to_send;
1493 struct rte_crypto_op *op;
1494 struct dpaa_sec_job *cf;
1495 dpaa_sec_session *ses;
1496 uint32_t auth_only_len;
1497 struct qman_fq *inq[DPAA_SEC_BURST];
1500 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1501 DPAA_SEC_BURST : nb_ops;
1502 for (loop = 0; loop < frames_to_send; loop++) {
1504 switch (op->sess_type) {
1505 case RTE_CRYPTO_OP_WITH_SESSION:
1506 ses = (dpaa_sec_session *)
1507 get_sym_session_private_data(
1509 cryptodev_driver_id);
1511 case RTE_CRYPTO_OP_SECURITY_SESSION:
1512 ses = (dpaa_sec_session *)
1513 get_sec_session_private_data(
1514 op->sym->sec_session);
1518 "sessionless crypto op not supported");
1519 frames_to_send = loop;
1523 if (unlikely(!ses->qp)) {
1524 if (dpaa_sec_attach_sess_q(qp, ses)) {
1525 frames_to_send = loop;
1529 } else if (unlikely(ses->qp != qp)) {
1530 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1531 " New qp = %p\n", ses->qp, qp);
1532 frames_to_send = loop;
1537 auth_only_len = op->sym->auth.data.length -
1538 op->sym->cipher.data.length;
1539 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1540 if (is_proto_ipsec(ses)) {
1541 cf = build_proto(op, ses);
1542 } else if (is_auth_only(ses)) {
1543 cf = build_auth_only(op, ses);
1544 } else if (is_cipher_only(ses)) {
1545 cf = build_cipher_only(op, ses);
1546 } else if (is_aead(ses)) {
1547 cf = build_cipher_auth_gcm(op, ses);
1548 auth_only_len = ses->auth_only_len;
1549 } else if (is_auth_cipher(ses)) {
1550 cf = build_cipher_auth(op, ses);
1552 DPAA_SEC_DP_ERR("not supported ops");
1553 frames_to_send = loop;
1558 if (is_auth_only(ses)) {
1559 cf = build_auth_only_sg(op, ses);
1560 } else if (is_cipher_only(ses)) {
1561 cf = build_cipher_only_sg(op, ses);
1562 } else if (is_aead(ses)) {
1563 cf = build_cipher_auth_gcm_sg(op, ses);
1564 auth_only_len = ses->auth_only_len;
1565 } else if (is_auth_cipher(ses)) {
1566 cf = build_cipher_auth_sg(op, ses);
1568 DPAA_SEC_DP_ERR("not supported ops");
1569 frames_to_send = loop;
1574 if (unlikely(!cf)) {
1575 frames_to_send = loop;
1581 inq[loop] = ses->inq;
1582 fd->opaque_addr = 0;
1584 qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1585 fd->_format1 = qm_fd_compound;
1586 fd->length29 = 2 * sizeof(struct qm_sg_entry);
1587 /* Auth_only_len is set as 0 in descriptor and it is
1588 * overwritten here in the fd.cmd which will update
1592 fd->cmd = 0x80000000 | auth_only_len;
1597 while (loop < frames_to_send) {
1598 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1599 frames_to_send - loop);
1601 nb_ops -= frames_to_send;
1602 num_tx += frames_to_send;
1605 dpaa_qp->tx_pkts += num_tx;
1606 dpaa_qp->tx_errs += nb_ops - num_tx;
1612 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1616 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1618 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1620 dpaa_qp->rx_pkts += num_rx;
1621 dpaa_qp->rx_errs += nb_ops - num_rx;
1623 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1628 /** Release queue pair */
1630 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1633 struct dpaa_sec_dev_private *internals;
1634 struct dpaa_sec_qp *qp = NULL;
1636 PMD_INIT_FUNC_TRACE();
1638 DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1640 internals = dev->data->dev_private;
1641 if (qp_id >= internals->max_nb_queue_pairs) {
1642 DPAA_SEC_ERR("Max supported qpid %d",
1643 internals->max_nb_queue_pairs);
1647 qp = &internals->qps[qp_id];
1648 qp->internals = NULL;
1649 dev->data->queue_pairs[qp_id] = NULL;
1654 /** Setup a queue pair */
1656 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1657 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1658 __rte_unused int socket_id,
1659 __rte_unused struct rte_mempool *session_pool)
1661 struct dpaa_sec_dev_private *internals;
1662 struct dpaa_sec_qp *qp = NULL;
1664 DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1666 internals = dev->data->dev_private;
1667 if (qp_id >= internals->max_nb_queue_pairs) {
1668 DPAA_SEC_ERR("Max supported qpid %d",
1669 internals->max_nb_queue_pairs);
1673 qp = &internals->qps[qp_id];
1674 qp->internals = internals;
1675 dev->data->queue_pairs[qp_id] = qp;
1680 /** Return the number of allocated queue pairs */
1682 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1684 PMD_INIT_FUNC_TRACE();
1686 return dev->data->nb_queue_pairs;
1689 /** Returns the size of session structure */
1691 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1693 PMD_INIT_FUNC_TRACE();
1695 return sizeof(dpaa_sec_session);
1699 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1700 struct rte_crypto_sym_xform *xform,
1701 dpaa_sec_session *session)
1703 session->cipher_alg = xform->cipher.algo;
1704 session->iv.length = xform->cipher.iv.length;
1705 session->iv.offset = xform->cipher.iv.offset;
1706 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1707 RTE_CACHE_LINE_SIZE);
1708 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1709 DPAA_SEC_ERR("No Memory for cipher key");
1712 session->cipher_key.length = xform->cipher.key.length;
1714 memcpy(session->cipher_key.data, xform->cipher.key.data,
1715 xform->cipher.key.length);
1716 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1723 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1724 struct rte_crypto_sym_xform *xform,
1725 dpaa_sec_session *session)
1727 session->auth_alg = xform->auth.algo;
1728 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1729 RTE_CACHE_LINE_SIZE);
1730 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1731 DPAA_SEC_ERR("No Memory for auth key");
1734 session->auth_key.length = xform->auth.key.length;
1735 session->digest_length = xform->auth.digest_length;
1737 memcpy(session->auth_key.data, xform->auth.key.data,
1738 xform->auth.key.length);
1739 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1746 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1747 struct rte_crypto_sym_xform *xform,
1748 dpaa_sec_session *session)
1750 session->aead_alg = xform->aead.algo;
1751 session->iv.length = xform->aead.iv.length;
1752 session->iv.offset = xform->aead.iv.offset;
1753 session->auth_only_len = xform->aead.aad_length;
1754 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1755 RTE_CACHE_LINE_SIZE);
1756 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1757 DPAA_SEC_ERR("No Memory for aead key\n");
1760 session->aead_key.length = xform->aead.key.length;
1761 session->digest_length = xform->aead.digest_length;
1763 memcpy(session->aead_key.data, xform->aead.key.data,
1764 xform->aead.key.length);
1765 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1771 static struct qman_fq *
1772 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1776 for (i = 0; i < qi->max_nb_sessions; i++) {
1777 if (qi->inq_attach[i] == 0) {
1778 qi->inq_attach[i] = 1;
1782 DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1788 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1792 for (i = 0; i < qi->max_nb_sessions; i++) {
1793 if (&qi->inq[i] == fq) {
1794 qman_retire_fq(fq, NULL);
1796 qi->inq_attach[i] = 0;
1804 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1809 ret = dpaa_sec_prep_cdb(sess);
1811 DPAA_SEC_ERR("Unable to prepare sec cdb");
1814 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1815 ret = rte_dpaa_portal_init((void *)0);
1817 DPAA_SEC_ERR("Failure in affining portal");
1821 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1822 qman_fq_fqid(&qp->outq));
1824 DPAA_SEC_ERR("Unable to init sec queue");
1830 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1831 struct rte_crypto_sym_xform *xform, void *sess)
1833 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1834 dpaa_sec_session *session = sess;
1836 PMD_INIT_FUNC_TRACE();
1838 if (unlikely(sess == NULL)) {
1839 DPAA_SEC_ERR("invalid session struct");
1842 memset(session, 0, sizeof(dpaa_sec_session));
1844 /* Default IV length = 0 */
1845 session->iv.length = 0;
1848 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1849 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1850 dpaa_sec_cipher_init(dev, xform, session);
1852 /* Authentication Only */
1853 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1854 xform->next == NULL) {
1855 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1856 dpaa_sec_auth_init(dev, xform, session);
1858 /* Cipher then Authenticate */
1859 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1860 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1861 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1862 dpaa_sec_cipher_init(dev, xform, session);
1863 dpaa_sec_auth_init(dev, xform->next, session);
1865 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1869 /* Authenticate then Cipher */
1870 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1871 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1872 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1873 dpaa_sec_auth_init(dev, xform, session);
1874 dpaa_sec_cipher_init(dev, xform->next, session);
1876 DPAA_SEC_ERR("Not supported: Auth then Cipher");
1880 /* AEAD operation for AES-GCM kind of Algorithms */
1881 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1882 xform->next == NULL) {
1883 dpaa_sec_aead_init(dev, xform, session);
1886 DPAA_SEC_ERR("Invalid crypto type");
1889 session->ctx_pool = internals->ctx_pool;
1890 rte_spinlock_lock(&internals->lock);
1891 session->inq = dpaa_sec_attach_rxq(internals);
1892 rte_spinlock_unlock(&internals->lock);
1893 if (session->inq == NULL) {
1894 DPAA_SEC_ERR("unable to attach sec queue");
1901 rte_free(session->cipher_key.data);
1902 rte_free(session->auth_key.data);
1903 memset(session, 0, sizeof(dpaa_sec_session));
1909 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1910 struct rte_crypto_sym_xform *xform,
1911 struct rte_cryptodev_sym_session *sess,
1912 struct rte_mempool *mempool)
1914 void *sess_private_data;
1917 PMD_INIT_FUNC_TRACE();
1919 if (rte_mempool_get(mempool, &sess_private_data)) {
1920 DPAA_SEC_ERR("Couldn't get object from session mempool");
1924 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1926 DPAA_SEC_ERR("failed to configure session parameters");
1928 /* Return session to mempool */
1929 rte_mempool_put(mempool, sess_private_data);
1933 set_sym_session_private_data(sess, dev->driver_id,
1940 /** Clear the memory of session so it doesn't leave key material behind */
1942 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1943 struct rte_cryptodev_sym_session *sess)
1945 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1946 uint8_t index = dev->driver_id;
1947 void *sess_priv = get_sym_session_private_data(sess, index);
1949 PMD_INIT_FUNC_TRACE();
1951 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1954 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1957 dpaa_sec_detach_rxq(qi, s->inq);
1958 rte_free(s->cipher_key.data);
1959 rte_free(s->auth_key.data);
1960 memset(s, 0, sizeof(dpaa_sec_session));
1961 set_sym_session_private_data(sess, index, NULL);
1962 rte_mempool_put(sess_mp, sess_priv);
1967 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1968 struct rte_security_session_conf *conf,
1971 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1972 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1973 struct rte_crypto_auth_xform *auth_xform = NULL;
1974 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1975 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1977 PMD_INIT_FUNC_TRACE();
1979 memset(session, 0, sizeof(dpaa_sec_session));
1980 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1981 cipher_xform = &conf->crypto_xform->cipher;
1982 if (conf->crypto_xform->next)
1983 auth_xform = &conf->crypto_xform->next->auth;
1985 auth_xform = &conf->crypto_xform->auth;
1986 if (conf->crypto_xform->next)
1987 cipher_xform = &conf->crypto_xform->next->cipher;
1989 session->proto_alg = conf->protocol;
1991 if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
1992 session->cipher_key.data = rte_zmalloc(NULL,
1993 cipher_xform->key.length,
1994 RTE_CACHE_LINE_SIZE);
1995 if (session->cipher_key.data == NULL &&
1996 cipher_xform->key.length > 0) {
1997 DPAA_SEC_ERR("No Memory for cipher key");
2000 memcpy(session->cipher_key.data, cipher_xform->key.data,
2001 cipher_xform->key.length);
2002 session->cipher_key.length = cipher_xform->key.length;
2004 switch (cipher_xform->algo) {
2005 case RTE_CRYPTO_CIPHER_AES_CBC:
2006 case RTE_CRYPTO_CIPHER_3DES_CBC:
2007 case RTE_CRYPTO_CIPHER_AES_CTR:
2010 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2011 cipher_xform->algo);
2014 session->cipher_alg = cipher_xform->algo;
2016 session->cipher_key.data = NULL;
2017 session->cipher_key.length = 0;
2018 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2021 if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2022 session->auth_key.data = rte_zmalloc(NULL,
2023 auth_xform->key.length,
2024 RTE_CACHE_LINE_SIZE);
2025 if (session->auth_key.data == NULL &&
2026 auth_xform->key.length > 0) {
2027 DPAA_SEC_ERR("No Memory for auth key");
2028 rte_free(session->cipher_key.data);
2031 memcpy(session->auth_key.data, auth_xform->key.data,
2032 auth_xform->key.length);
2033 session->auth_key.length = auth_xform->key.length;
2035 switch (auth_xform->algo) {
2036 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2037 case RTE_CRYPTO_AUTH_MD5_HMAC:
2038 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2039 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2040 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2041 case RTE_CRYPTO_AUTH_AES_CMAC:
2044 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2048 session->auth_alg = auth_xform->algo;
2050 session->auth_key.data = NULL;
2051 session->auth_key.length = 0;
2052 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2055 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2056 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2057 sizeof(session->ip4_hdr));
2058 session->ip4_hdr.ip_v = IPVERSION;
2059 session->ip4_hdr.ip_hl = 5;
2060 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2061 sizeof(session->ip4_hdr));
2062 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2063 session->ip4_hdr.ip_id = 0;
2064 session->ip4_hdr.ip_off = 0;
2065 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2066 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2067 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2069 session->ip4_hdr.ip_sum = 0;
2070 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2071 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2072 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2073 (void *)&session->ip4_hdr,
2076 session->encap_pdb.options =
2077 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2078 PDBOPTS_ESP_OIHI_PDB_INL |
2080 PDBHMO_ESP_ENCAP_DTTL |
2082 session->encap_pdb.spi = ipsec_xform->spi;
2083 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2085 session->dir = DIR_ENC;
2086 } else if (ipsec_xform->direction ==
2087 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2088 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2089 session->decap_pdb.options = sizeof(struct ip) << 16;
2090 session->dir = DIR_DEC;
2093 session->ctx_pool = internals->ctx_pool;
2094 rte_spinlock_lock(&internals->lock);
2095 session->inq = dpaa_sec_attach_rxq(internals);
2096 rte_spinlock_unlock(&internals->lock);
2097 if (session->inq == NULL) {
2098 DPAA_SEC_ERR("unable to attach sec queue");
2105 rte_free(session->auth_key.data);
2106 rte_free(session->cipher_key.data);
2107 memset(session, 0, sizeof(dpaa_sec_session));
2112 dpaa_sec_security_session_create(void *dev,
2113 struct rte_security_session_conf *conf,
2114 struct rte_security_session *sess,
2115 struct rte_mempool *mempool)
2117 void *sess_private_data;
2118 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2121 if (rte_mempool_get(mempool, &sess_private_data)) {
2122 DPAA_SEC_ERR("Couldn't get object from session mempool");
2126 switch (conf->protocol) {
2127 case RTE_SECURITY_PROTOCOL_IPSEC:
2128 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2131 case RTE_SECURITY_PROTOCOL_MACSEC:
2137 DPAA_SEC_ERR("failed to configure session parameters");
2138 /* Return session to mempool */
2139 rte_mempool_put(mempool, sess_private_data);
2143 set_sec_session_private_data(sess, sess_private_data);
2148 /** Clear the memory of session so it doesn't leave key material behind */
2150 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2151 struct rte_security_session *sess)
2153 PMD_INIT_FUNC_TRACE();
2154 void *sess_priv = get_sec_session_private_data(sess);
2156 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2159 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2161 rte_free(s->cipher_key.data);
2162 rte_free(s->auth_key.data);
2163 memset(sess, 0, sizeof(dpaa_sec_session));
2164 set_sec_session_private_data(sess, NULL);
2165 rte_mempool_put(sess_mp, sess_priv);
2172 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2173 struct rte_cryptodev_config *config __rte_unused)
2177 struct dpaa_sec_dev_private *internals;
2179 PMD_INIT_FUNC_TRACE();
2181 internals = dev->data->dev_private;
2182 sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2183 if (!internals->ctx_pool) {
2184 internals->ctx_pool = rte_mempool_create((const char *)str,
2187 CTX_POOL_CACHE_SIZE, 0,
2188 NULL, NULL, NULL, NULL,
2190 if (!internals->ctx_pool) {
2191 DPAA_SEC_ERR("%s create failed\n", str);
2195 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2202 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2204 PMD_INIT_FUNC_TRACE();
2209 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2211 PMD_INIT_FUNC_TRACE();
2215 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2217 struct dpaa_sec_dev_private *internals;
2219 PMD_INIT_FUNC_TRACE();
2224 internals = dev->data->dev_private;
2225 rte_mempool_free(internals->ctx_pool);
2226 internals->ctx_pool = NULL;
2232 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2233 struct rte_cryptodev_info *info)
2235 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2237 PMD_INIT_FUNC_TRACE();
2239 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2240 info->feature_flags = dev->feature_flags;
2241 info->capabilities = dpaa_sec_capabilities;
2242 info->sym.max_nb_sessions = internals->max_nb_sessions;
2243 info->driver_id = cryptodev_driver_id;
2247 static struct rte_cryptodev_ops crypto_ops = {
2248 .dev_configure = dpaa_sec_dev_configure,
2249 .dev_start = dpaa_sec_dev_start,
2250 .dev_stop = dpaa_sec_dev_stop,
2251 .dev_close = dpaa_sec_dev_close,
2252 .dev_infos_get = dpaa_sec_dev_infos_get,
2253 .queue_pair_setup = dpaa_sec_queue_pair_setup,
2254 .queue_pair_release = dpaa_sec_queue_pair_release,
2255 .queue_pair_count = dpaa_sec_queue_pair_count,
2256 .sym_session_get_size = dpaa_sec_sym_session_get_size,
2257 .sym_session_configure = dpaa_sec_sym_session_configure,
2258 .sym_session_clear = dpaa_sec_sym_session_clear
2261 static const struct rte_security_capability *
2262 dpaa_sec_capabilities_get(void *device __rte_unused)
2264 return dpaa_sec_security_cap;
2267 struct rte_security_ops dpaa_sec_security_ops = {
2268 .session_create = dpaa_sec_security_session_create,
2269 .session_update = NULL,
2270 .session_stats_get = NULL,
2271 .session_destroy = dpaa_sec_security_session_destroy,
2272 .set_pkt_metadata = NULL,
2273 .capabilities_get = dpaa_sec_capabilities_get
2277 dpaa_sec_uninit(struct rte_cryptodev *dev)
2279 struct dpaa_sec_dev_private *internals;
2284 internals = dev->data->dev_private;
2285 rte_free(dev->security_ctx);
2287 /* In case close has been called, internals->ctx_pool would be NULL */
2288 rte_mempool_free(internals->ctx_pool);
2289 rte_free(internals);
2291 DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2292 dev->data->name, rte_socket_id());
2298 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2300 struct dpaa_sec_dev_private *internals;
2301 struct rte_security_ctx *security_instance;
2302 struct dpaa_sec_qp *qp;
2306 PMD_INIT_FUNC_TRACE();
2308 cryptodev->driver_id = cryptodev_driver_id;
2309 cryptodev->dev_ops = &crypto_ops;
2311 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2312 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2313 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2314 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2315 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2316 RTE_CRYPTODEV_FF_SECURITY |
2317 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2318 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2319 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2320 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2321 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2323 internals = cryptodev->data->dev_private;
2324 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2325 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2328 * For secondary processes, we don't initialise any further as primary
2329 * has already done this work. Only check we don't need a different
2332 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2333 DPAA_SEC_WARN("Device already init by primary process");
2337 /* Initialize security_ctx only for primary process*/
2338 security_instance = rte_malloc("rte_security_instances_ops",
2339 sizeof(struct rte_security_ctx), 0);
2340 if (security_instance == NULL)
2342 security_instance->device = (void *)cryptodev;
2343 security_instance->ops = &dpaa_sec_security_ops;
2344 security_instance->sess_cnt = 0;
2345 cryptodev->security_ctx = security_instance;
2347 rte_spinlock_init(&internals->lock);
2348 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2349 /* init qman fq for queue pair */
2350 qp = &internals->qps[i];
2351 ret = dpaa_sec_init_tx(&qp->outq);
2353 DPAA_SEC_ERR("config tx of queue pair %d", i);
2358 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2359 QMAN_FQ_FLAG_TO_DCPORTAL;
2360 for (i = 0; i < internals->max_nb_sessions; i++) {
2361 /* create rx qman fq for sessions*/
2362 ret = qman_create_fq(0, flags, &internals->inq[i]);
2363 if (unlikely(ret != 0)) {
2364 DPAA_SEC_ERR("sec qman_create_fq failed");
2369 RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2373 DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2375 dpaa_sec_uninit(cryptodev);
2380 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2381 struct rte_dpaa_device *dpaa_dev)
2383 struct rte_cryptodev *cryptodev;
2384 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2388 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2390 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2391 if (cryptodev == NULL)
2394 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2395 cryptodev->data->dev_private = rte_zmalloc_socket(
2396 "cryptodev private structure",
2397 sizeof(struct dpaa_sec_dev_private),
2398 RTE_CACHE_LINE_SIZE,
2401 if (cryptodev->data->dev_private == NULL)
2402 rte_panic("Cannot allocate memzone for private "
2406 dpaa_dev->crypto_dev = cryptodev;
2407 cryptodev->device = &dpaa_dev->device;
2409 /* init user callbacks */
2410 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2412 /* if sec device version is not configured */
2413 if (!rta_get_sec_era()) {
2414 const struct device_node *caam_node;
2416 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2417 const uint32_t *prop = of_get_property(caam_node,
2422 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2428 /* Invoke PMD device initialization function */
2429 retval = dpaa_sec_dev_init(cryptodev);
2433 /* In case of error, cleanup is done */
2434 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2435 rte_free(cryptodev->data->dev_private);
2437 rte_cryptodev_pmd_release_device(cryptodev);
2443 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2445 struct rte_cryptodev *cryptodev;
2448 cryptodev = dpaa_dev->crypto_dev;
2449 if (cryptodev == NULL)
2452 ret = dpaa_sec_uninit(cryptodev);
2456 return rte_cryptodev_pmd_destroy(cryptodev);
2459 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2460 .drv_type = FSL_DPAA_CRYPTO,
2462 .name = "DPAA SEC PMD"
2464 .probe = cryptodev_dpaa_sec_probe,
2465 .remove = cryptodev_dpaa_sec_remove,
2468 static struct cryptodev_driver dpaa_sec_crypto_drv;
2470 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2471 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2472 cryptodev_driver_id);
2474 RTE_INIT(dpaa_sec_init_log)
2476 dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2477 if (dpaa_logtype_sec >= 0)
2478 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);