1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
36 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec_log.h>
40 enum rta_sec_era rta_sec_era;
42 static uint8_t cryptodev_driver_id;
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
53 if (!ctx->fd_status) {
54 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
56 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 /* report op status to sym->op and then free the ctx memeory */
61 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
67 struct dpaa_sec_op_ctx *ctx;
70 retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
72 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
76 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 * each packet, memset is costlier than dcbz_64().
81 dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
86 ctx->ctx_pool = ses->ctx_pool;
87 ctx->vtop_offset = (uint64_t) ctx
88 - rte_mempool_virt2iova(ctx);
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
96 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97 uint64_t vaddr_64, paddr;
100 vaddr_64 = (uint64_t)vaddr;
101 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102 if (vaddr_64 >= memseg[i].addr_64 &&
103 vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104 paddr = memseg[i].iova +
105 (vaddr_64 - memseg[i].addr_64);
107 return (rte_iova_t)paddr;
110 return (rte_iova_t)(NULL);
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
117 return (uint64_t)vaddr - ctx->vtop_offset;
121 dpaa_mem_ptov(rte_iova_t paddr)
123 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
126 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
127 if (paddr >= memseg[i].iova &&
128 (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
129 return (void *)(memseg[i].addr_64 +
130 (paddr - memseg[i].iova));
136 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
138 const struct qm_mr_entry *msg)
140 RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
141 fq->fqid, msg->ern.rc, msg->ern.seqnum);
144 /* initialize the queue with dest chan as caam chan so that
145 * all the packets in this queue could be dispatched into caam
148 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
151 struct qm_mcc_initfq fq_opts;
155 /* Clear FQ options */
156 memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
158 flags = QMAN_INITFQ_FLAG_SCHED;
159 fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
160 QM_INITFQ_WE_CONTEXTB;
162 qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
163 fq_opts.fqd.context_b = fqid_out;
164 fq_opts.fqd.dest.channel = qm_channel_caam;
165 fq_opts.fqd.dest.wq = 0;
167 fq_in->cb.ern = ern_sec_fq_handler;
169 PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
171 ret = qman_init_fq(fq_in, flags, &fq_opts);
172 if (unlikely(ret != 0))
173 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
178 /* something is put into in_fq and caam put the crypto result into out_fq */
179 static enum qman_cb_dqrr_result
180 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
181 struct qman_fq *fq __always_unused,
182 const struct qm_dqrr_entry *dqrr)
184 const struct qm_fd *fd;
185 struct dpaa_sec_job *job;
186 struct dpaa_sec_op_ctx *ctx;
188 if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
189 return qman_cb_dqrr_defer;
191 if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
192 return qman_cb_dqrr_consume;
195 /* sg is embedded in an op ctx,
196 * sg[0] is for output
199 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
201 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
202 ctx->fd_status = fd->status;
203 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
204 struct qm_sg_entry *sg_out;
207 sg_out = &job->sg[0];
208 hw_sg_to_cpu(sg_out);
209 len = sg_out->length;
210 ctx->op->sym->m_src->pkt_len = len;
211 ctx->op->sym->m_src->data_len = len;
213 dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
214 dpaa_sec_op_ending(ctx);
216 return qman_cb_dqrr_consume;
219 /* caam result is put into this queue */
221 dpaa_sec_init_tx(struct qman_fq *fq)
224 struct qm_mcc_initfq opts;
227 flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
228 QMAN_FQ_FLAG_DYNAMIC_FQID;
230 ret = qman_create_fq(0, flags, fq);
232 PMD_INIT_LOG(ERR, "qman_create_fq failed");
236 memset(&opts, 0, sizeof(opts));
237 opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
238 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
240 /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
242 fq->cb.dqrr = dqrr_out_fq_cb_rx;
243 fq->cb.ern = ern_sec_fq_handler;
245 ret = qman_init_fq(fq, 0, &opts);
247 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
254 static inline int is_cipher_only(dpaa_sec_session *ses)
256 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
257 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
260 static inline int is_auth_only(dpaa_sec_session *ses)
262 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
263 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
266 static inline int is_aead(dpaa_sec_session *ses)
268 return ((ses->cipher_alg == 0) &&
269 (ses->auth_alg == 0) &&
270 (ses->aead_alg != 0));
273 static inline int is_auth_cipher(dpaa_sec_session *ses)
275 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
276 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
277 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
280 static inline int is_proto_ipsec(dpaa_sec_session *ses)
282 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
285 static inline int is_encode(dpaa_sec_session *ses)
287 return ses->dir == DIR_ENC;
290 static inline int is_decode(dpaa_sec_session *ses)
292 return ses->dir == DIR_DEC;
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
298 switch (ses->auth_alg) {
299 case RTE_CRYPTO_AUTH_NULL:
300 ses->digest_length = 0;
302 case RTE_CRYPTO_AUTH_MD5_HMAC:
304 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 case RTE_CRYPTO_AUTH_SHA1_HMAC:
310 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 case RTE_CRYPTO_AUTH_SHA224_HMAC:
316 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320 case RTE_CRYPTO_AUTH_SHA256_HMAC:
322 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 alginfo_a->algmode = OP_ALG_AAI_HMAC;
326 case RTE_CRYPTO_AUTH_SHA384_HMAC:
328 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 alginfo_a->algmode = OP_ALG_AAI_HMAC;
332 case RTE_CRYPTO_AUTH_SHA512_HMAC:
334 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 alginfo_a->algmode = OP_ALG_AAI_HMAC;
339 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
346 switch (ses->cipher_alg) {
347 case RTE_CRYPTO_CIPHER_NULL:
349 case RTE_CRYPTO_CIPHER_AES_CBC:
351 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
353 alginfo_c->algmode = OP_ALG_AAI_CBC;
355 case RTE_CRYPTO_CIPHER_3DES_CBC:
357 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
359 alginfo_c->algmode = OP_ALG_AAI_CBC;
361 case RTE_CRYPTO_CIPHER_AES_CTR:
363 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
365 alginfo_c->algmode = OP_ALG_AAI_CTR;
368 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
373 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
375 switch (ses->aead_alg) {
376 case RTE_CRYPTO_AEAD_AES_GCM:
377 alginfo->algtype = OP_ALG_ALGSEL_AES;
378 alginfo->algmode = OP_ALG_AAI_GCM;
381 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
386 /* prepare command block of the session */
388 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
390 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
391 uint32_t shared_desc_len = 0;
392 struct sec_cdb *cdb = &ses->cdb;
394 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
400 memset(cdb, 0, sizeof(struct sec_cdb));
402 if (is_cipher_only(ses)) {
403 caam_cipher_alg(ses, &alginfo_c);
404 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 PMD_TX_LOG(ERR, "not supported cipher alg\n");
409 alginfo_c.key = (uint64_t)ses->cipher_key.data;
410 alginfo_c.keylen = ses->cipher_key.length;
411 alginfo_c.key_enc_flags = 0;
412 alginfo_c.key_type = RTA_DATA_IMM;
414 shared_desc_len = cnstr_shdsc_blkcipher(
420 } else if (is_auth_only(ses)) {
421 caam_auth_alg(ses, &alginfo_a);
422 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 PMD_TX_LOG(ERR, "not supported auth alg\n");
427 alginfo_a.key = (uint64_t)ses->auth_key.data;
428 alginfo_a.keylen = ses->auth_key.length;
429 alginfo_a.key_enc_flags = 0;
430 alginfo_a.key_type = RTA_DATA_IMM;
432 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
436 } else if (is_aead(ses)) {
437 caam_aead_alg(ses, &alginfo);
438 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 PMD_TX_LOG(ERR, "not supported aead alg\n");
442 alginfo.key = (uint64_t)ses->aead_key.data;
443 alginfo.keylen = ses->aead_key.length;
444 alginfo.key_enc_flags = 0;
445 alginfo.key_type = RTA_DATA_IMM;
447 if (ses->dir == DIR_ENC)
448 shared_desc_len = cnstr_shdsc_gcm_encap(
449 cdb->sh_desc, true, swap,
454 shared_desc_len = cnstr_shdsc_gcm_decap(
455 cdb->sh_desc, true, swap,
460 caam_cipher_alg(ses, &alginfo_c);
461 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
462 PMD_TX_LOG(ERR, "not supported cipher alg\n");
466 alginfo_c.key = (uint64_t)ses->cipher_key.data;
467 alginfo_c.keylen = ses->cipher_key.length;
468 alginfo_c.key_enc_flags = 0;
469 alginfo_c.key_type = RTA_DATA_IMM;
471 caam_auth_alg(ses, &alginfo_a);
472 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
473 PMD_TX_LOG(ERR, "not supported auth alg\n");
477 alginfo_a.key = (uint64_t)ses->auth_key.data;
478 alginfo_a.keylen = ses->auth_key.length;
479 alginfo_a.key_enc_flags = 0;
480 alginfo_a.key_type = RTA_DATA_IMM;
482 cdb->sh_desc[0] = alginfo_c.keylen;
483 cdb->sh_desc[1] = alginfo_a.keylen;
484 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
486 (unsigned int *)cdb->sh_desc,
487 &cdb->sh_desc[2], 2);
490 PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
493 if (cdb->sh_desc[2] & 1)
494 alginfo_c.key_type = RTA_DATA_IMM;
496 alginfo_c.key = (uint64_t)dpaa_mem_vtop(
497 (void *)alginfo_c.key);
498 alginfo_c.key_type = RTA_DATA_PTR;
500 if (cdb->sh_desc[2] & (1<<1))
501 alginfo_a.key_type = RTA_DATA_IMM;
503 alginfo_a.key = (uint64_t)dpaa_mem_vtop(
504 (void *)alginfo_a.key);
505 alginfo_a.key_type = RTA_DATA_PTR;
510 if (is_proto_ipsec(ses)) {
511 if (ses->dir == DIR_ENC) {
512 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
514 true, swap, &ses->encap_pdb,
515 (uint8_t *)&ses->ip4_hdr,
516 &alginfo_c, &alginfo_a);
517 } else if (ses->dir == DIR_DEC) {
518 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
520 true, swap, &ses->decap_pdb,
521 &alginfo_c, &alginfo_a);
524 /* Auth_only_len is set as 0 here and it will be
525 * overwritten in fd for each packet.
527 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
528 true, swap, &alginfo_c, &alginfo_a,
530 ses->digest_length, ses->dir);
533 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
534 cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
535 cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
540 static inline unsigned int
541 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
543 unsigned int pkts = 0;
545 struct qm_mcr_queryfq_np np;
546 enum qman_fq_state state;
550 qman_query_fq_np(fq, &np);
552 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
554 vdqcr |= QM_VDQCR_EXACT;
555 ret = qman_volatile_dequeue(fq, 0, vdqcr);
559 pkts += qman_poll_dqrr(len);
560 qman_fq_state(fq, &state, &flags);
561 } while (flags & QMAN_FQ_STATE_VDQCR);
566 /* qp is lockless, should be accessed by only one thread */
568 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
576 if (unlikely(nb_ops > DPAA_SEC_BURST))
577 nb_ops = DPAA_SEC_BURST;
579 return dpaa_volatile_deq(fq, nb_ops, 1);
584 * |<----data_len------->|
585 * |ip_header|ah_header|icv|payload|
590 static inline struct dpaa_sec_job *
591 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
593 struct rte_crypto_sym_op *sym = op->sym;
594 struct rte_mbuf *mbuf = sym->m_src;
595 struct dpaa_sec_job *cf;
596 struct dpaa_sec_op_ctx *ctx;
597 struct qm_sg_entry *sg;
598 rte_iova_t start_addr;
601 ctx = dpaa_sec_alloc_ctx(ses);
607 old_digest = ctx->digest;
609 start_addr = rte_pktmbuf_iova(mbuf);
612 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
613 sg->length = ses->digest_length;
618 if (is_decode(ses)) {
619 /* need to extend the input to a compound frame */
621 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
622 sg->length = sym->auth.data.length + ses->digest_length;
627 /* hash result or digest, save digest first */
628 rte_memcpy(old_digest, sym->auth.digest.data,
630 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
631 sg->length = sym->auth.data.length;
634 /* let's check digest by hw */
635 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
637 qm_sg_entry_set64(sg, start_addr);
638 sg->length = ses->digest_length;
642 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
643 sg->length = sym->auth.data.length;
651 static inline struct dpaa_sec_job *
652 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
654 struct rte_crypto_sym_op *sym = op->sym;
655 struct dpaa_sec_job *cf;
656 struct dpaa_sec_op_ctx *ctx;
657 struct qm_sg_entry *sg;
658 rte_iova_t src_start_addr, dst_start_addr;
659 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
662 ctx = dpaa_sec_alloc_ctx(ses);
669 src_start_addr = rte_pktmbuf_iova(sym->m_src);
672 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
674 dst_start_addr = src_start_addr;
678 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
679 sg->length = sym->cipher.data.length + ses->iv.length;
685 /* need to extend the input to a compound frame */
688 sg->length = sym->cipher.data.length + ses->iv.length;
689 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
693 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
694 sg->length = ses->iv.length;
698 qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
699 sg->length = sym->cipher.data.length;
706 static inline struct dpaa_sec_job *
707 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
709 struct rte_crypto_sym_op *sym = op->sym;
710 struct dpaa_sec_job *cf;
711 struct dpaa_sec_op_ctx *ctx;
712 struct qm_sg_entry *sg;
714 rte_iova_t src_start_addr, dst_start_addr;
715 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
718 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
721 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
723 dst_start_addr = src_start_addr;
725 ctx = dpaa_sec_alloc_ctx(ses);
733 rte_prefetch0(cf->sg);
735 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
736 if (is_encode(ses)) {
737 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
738 sg->length = ses->iv.length;
739 length += sg->length;
743 if (ses->auth_only_len) {
744 qm_sg_entry_set64(sg,
745 dpaa_mem_vtop(sym->aead.aad.data));
746 sg->length = ses->auth_only_len;
747 length += sg->length;
751 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
752 sg->length = sym->aead.data.length;
753 length += sg->length;
757 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
758 sg->length = ses->iv.length;
759 length += sg->length;
763 if (ses->auth_only_len) {
764 qm_sg_entry_set64(sg,
765 dpaa_mem_vtop(sym->aead.aad.data));
766 sg->length = ses->auth_only_len;
767 length += sg->length;
771 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
772 sg->length = sym->aead.data.length;
773 length += sg->length;
776 memcpy(ctx->digest, sym->aead.digest.data,
780 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
781 sg->length = ses->digest_length;
782 length += sg->length;
786 /* input compound frame */
787 cf->sg[1].length = length;
788 cf->sg[1].extension = 1;
790 cpu_to_hw_sg(&cf->sg[1]);
794 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
795 qm_sg_entry_set64(sg,
796 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
797 sg->length = sym->aead.data.length + ses->auth_only_len;
799 if (is_encode(ses)) {
801 /* set auth output */
803 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
804 sg->length = ses->digest_length;
805 length += sg->length;
810 /* output compound frame */
811 cf->sg[0].length = length;
812 cf->sg[0].extension = 1;
813 cpu_to_hw_sg(&cf->sg[0]);
818 static inline struct dpaa_sec_job *
819 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
821 struct rte_crypto_sym_op *sym = op->sym;
822 struct dpaa_sec_job *cf;
823 struct dpaa_sec_op_ctx *ctx;
824 struct qm_sg_entry *sg;
825 rte_iova_t src_start_addr, dst_start_addr;
827 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
830 src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
832 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
834 dst_start_addr = src_start_addr;
836 ctx = dpaa_sec_alloc_ctx(ses);
844 rte_prefetch0(cf->sg);
846 qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
847 if (is_encode(ses)) {
848 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
849 sg->length = ses->iv.length;
850 length += sg->length;
854 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
855 sg->length = sym->auth.data.length;
856 length += sg->length;
860 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
861 sg->length = ses->iv.length;
862 length += sg->length;
867 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
868 sg->length = sym->auth.data.length;
869 length += sg->length;
872 memcpy(ctx->digest, sym->auth.digest.data,
876 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
877 sg->length = ses->digest_length;
878 length += sg->length;
882 /* input compound frame */
883 cf->sg[1].length = length;
884 cf->sg[1].extension = 1;
886 cpu_to_hw_sg(&cf->sg[1]);
890 qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
891 qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
892 sg->length = sym->cipher.data.length;
894 if (is_encode(ses)) {
896 /* set auth output */
898 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
899 sg->length = ses->digest_length;
900 length += sg->length;
905 /* output compound frame */
906 cf->sg[0].length = length;
907 cf->sg[0].extension = 1;
908 cpu_to_hw_sg(&cf->sg[0]);
913 static inline struct dpaa_sec_job *
914 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
916 struct rte_crypto_sym_op *sym = op->sym;
917 struct dpaa_sec_job *cf;
918 struct dpaa_sec_op_ctx *ctx;
919 struct qm_sg_entry *sg;
920 phys_addr_t src_start_addr, dst_start_addr;
922 ctx = dpaa_sec_alloc_ctx(ses);
928 src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
931 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
933 dst_start_addr = src_start_addr;
937 qm_sg_entry_set64(sg, src_start_addr);
938 sg->length = sym->m_src->pkt_len;
942 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
945 qm_sg_entry_set64(sg, dst_start_addr);
946 sg->length = sym->m_src->buf_len - sym->m_src->data_off;
953 dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
955 struct dpaa_sec_job *cf;
956 dpaa_sec_session *ses;
959 uint32_t auth_only_len = op->sym->auth.data.length -
960 op->sym->cipher.data.length;
962 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
963 ses = (dpaa_sec_session *)get_session_private_data(
964 op->sym->session, cryptodev_driver_id);
965 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
966 ses = (dpaa_sec_session *)get_sec_session_private_data(
967 op->sym->sec_session);
971 if (unlikely(!ses->qp || ses->qp != qp)) {
972 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
973 if (dpaa_sec_attach_sess_q(qp, ses))
978 * Segmented buffer is not supported.
980 if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
981 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
984 if (is_auth_only(ses)) {
985 cf = build_auth_only(op, ses);
986 } else if (is_cipher_only(ses)) {
987 cf = build_cipher_only(op, ses);
988 } else if (is_aead(ses)) {
989 cf = build_cipher_auth_gcm(op, ses);
990 auth_only_len = ses->auth_only_len;
991 } else if (is_auth_cipher(ses)) {
992 cf = build_cipher_auth(op, ses);
993 } else if (is_proto_ipsec(ses)) {
994 cf = build_proto(op, ses);
996 PMD_TX_LOG(ERR, "not supported sec op");
1002 memset(&fd, 0, sizeof(struct qm_fd));
1003 qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
1004 fd._format1 = qm_fd_compound;
1005 fd.length29 = 2 * sizeof(struct qm_sg_entry);
1006 /* Auth_only_len is set as 0 in descriptor and it is overwritten
1007 * here in the fd.cmd which will update the DPOVRD reg.
1010 fd.cmd = 0x80000000 | auth_only_len;
1012 ret = qman_enqueue(ses->inq, &fd, 0);
1019 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1022 /* Function to transmit the frames to given device and queuepair */
1025 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1026 uint16_t num_tx = 0;
1028 if (unlikely(nb_ops == 0))
1031 /*Prepare each packet which is to be sent*/
1032 for (loop = 0; loop < nb_ops; loop++) {
1033 if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1034 PMD_TX_LOG(ERR, "sessionless crypto op not supported");
1037 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
1041 dpaa_qp->tx_pkts += num_tx;
1042 dpaa_qp->tx_errs += nb_ops - num_tx;
1048 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1052 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1054 num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1056 dpaa_qp->rx_pkts += num_rx;
1057 dpaa_qp->rx_errs += nb_ops - num_rx;
1059 PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1064 /** Release queue pair */
1066 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1069 struct dpaa_sec_dev_private *internals;
1070 struct dpaa_sec_qp *qp = NULL;
1072 PMD_INIT_FUNC_TRACE();
1074 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1076 internals = dev->data->dev_private;
1077 if (qp_id >= internals->max_nb_queue_pairs) {
1078 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1079 internals->max_nb_queue_pairs);
1083 qp = &internals->qps[qp_id];
1084 qp->internals = NULL;
1085 dev->data->queue_pairs[qp_id] = NULL;
1090 /** Setup a queue pair */
1092 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1093 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1094 __rte_unused int socket_id,
1095 __rte_unused struct rte_mempool *session_pool)
1097 struct dpaa_sec_dev_private *internals;
1098 struct dpaa_sec_qp *qp = NULL;
1100 PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1101 dev, qp_id, qp_conf);
1103 internals = dev->data->dev_private;
1104 if (qp_id >= internals->max_nb_queue_pairs) {
1105 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1106 internals->max_nb_queue_pairs);
1110 qp = &internals->qps[qp_id];
1111 qp->internals = internals;
1112 dev->data->queue_pairs[qp_id] = qp;
1117 /** Start queue pair */
1119 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1120 __rte_unused uint16_t queue_pair_id)
1122 PMD_INIT_FUNC_TRACE();
1127 /** Stop queue pair */
1129 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1130 __rte_unused uint16_t queue_pair_id)
1132 PMD_INIT_FUNC_TRACE();
1137 /** Return the number of allocated queue pairs */
1139 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1141 PMD_INIT_FUNC_TRACE();
1143 return dev->data->nb_queue_pairs;
1146 /** Returns the size of session structure */
1148 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1150 PMD_INIT_FUNC_TRACE();
1152 return sizeof(dpaa_sec_session);
1156 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1157 struct rte_crypto_sym_xform *xform,
1158 dpaa_sec_session *session)
1160 session->cipher_alg = xform->cipher.algo;
1161 session->iv.length = xform->cipher.iv.length;
1162 session->iv.offset = xform->cipher.iv.offset;
1163 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1164 RTE_CACHE_LINE_SIZE);
1165 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1166 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1169 session->cipher_key.length = xform->cipher.key.length;
1171 memcpy(session->cipher_key.data, xform->cipher.key.data,
1172 xform->cipher.key.length);
1173 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1180 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1181 struct rte_crypto_sym_xform *xform,
1182 dpaa_sec_session *session)
1184 session->auth_alg = xform->auth.algo;
1185 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1186 RTE_CACHE_LINE_SIZE);
1187 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1188 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1191 session->auth_key.length = xform->auth.key.length;
1192 session->digest_length = xform->auth.digest_length;
1194 memcpy(session->auth_key.data, xform->auth.key.data,
1195 xform->auth.key.length);
1196 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1203 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1204 struct rte_crypto_sym_xform *xform,
1205 dpaa_sec_session *session)
1207 session->aead_alg = xform->aead.algo;
1208 session->iv.length = xform->aead.iv.length;
1209 session->iv.offset = xform->aead.iv.offset;
1210 session->auth_only_len = xform->aead.aad_length;
1211 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1212 RTE_CACHE_LINE_SIZE);
1213 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1214 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1217 session->aead_key.length = xform->aead.key.length;
1218 session->digest_length = xform->aead.digest_length;
1220 memcpy(session->aead_key.data, xform->aead.key.data,
1221 xform->aead.key.length);
1222 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1228 static struct qman_fq *
1229 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1233 for (i = 0; i < qi->max_nb_sessions; i++) {
1234 if (qi->inq_attach[i] == 0) {
1235 qi->inq_attach[i] = 1;
1239 PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1245 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1249 for (i = 0; i < qi->max_nb_sessions; i++) {
1250 if (&qi->inq[i] == fq) {
1251 qman_retire_fq(fq, NULL);
1253 qi->inq_attach[i] = 0;
1261 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1266 ret = dpaa_sec_prep_cdb(sess);
1268 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1272 ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1273 qman_fq_fqid(&qp->outq));
1275 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1281 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1282 uint16_t qp_id __rte_unused,
1283 void *ses __rte_unused)
1285 PMD_INIT_FUNC_TRACE();
1290 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1291 uint16_t qp_id __rte_unused,
1294 dpaa_sec_session *sess = ses;
1295 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1297 PMD_INIT_FUNC_TRACE();
1300 dpaa_sec_detach_rxq(qi, sess->inq);
1309 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1310 struct rte_crypto_sym_xform *xform, void *sess)
1312 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1313 dpaa_sec_session *session = sess;
1315 PMD_INIT_FUNC_TRACE();
1317 if (unlikely(sess == NULL)) {
1318 RTE_LOG(ERR, PMD, "invalid session struct\n");
1322 /* Default IV length = 0 */
1323 session->iv.length = 0;
1326 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1327 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1328 dpaa_sec_cipher_init(dev, xform, session);
1330 /* Authentication Only */
1331 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1332 xform->next == NULL) {
1333 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1334 dpaa_sec_auth_init(dev, xform, session);
1336 /* Cipher then Authenticate */
1337 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1338 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1339 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1340 dpaa_sec_cipher_init(dev, xform, session);
1341 dpaa_sec_auth_init(dev, xform->next, session);
1343 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1347 /* Authenticate then Cipher */
1348 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1349 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1350 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1351 dpaa_sec_auth_init(dev, xform, session);
1352 dpaa_sec_cipher_init(dev, xform->next, session);
1354 PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1358 /* AEAD operation for AES-GCM kind of Algorithms */
1359 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1360 xform->next == NULL) {
1361 dpaa_sec_aead_init(dev, xform, session);
1364 PMD_DRV_LOG(ERR, "Invalid crypto type");
1367 session->ctx_pool = internals->ctx_pool;
1368 session->inq = dpaa_sec_attach_rxq(internals);
1369 if (session->inq == NULL) {
1370 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1377 rte_free(session->cipher_key.data);
1378 rte_free(session->auth_key.data);
1379 memset(session, 0, sizeof(dpaa_sec_session));
1385 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1386 struct rte_crypto_sym_xform *xform,
1387 struct rte_cryptodev_sym_session *sess,
1388 struct rte_mempool *mempool)
1390 void *sess_private_data;
1393 PMD_INIT_FUNC_TRACE();
1395 if (rte_mempool_get(mempool, &sess_private_data)) {
1397 "Couldn't get object from session mempool");
1401 ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1403 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1404 "session parameters");
1406 /* Return session to mempool */
1407 rte_mempool_put(mempool, sess_private_data);
1411 set_session_private_data(sess, dev->driver_id,
1418 /** Clear the memory of session so it doesn't leave key material behind */
1420 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1421 struct rte_cryptodev_sym_session *sess)
1423 struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1424 uint8_t index = dev->driver_id;
1425 void *sess_priv = get_session_private_data(sess, index);
1427 PMD_INIT_FUNC_TRACE();
1429 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1432 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1435 dpaa_sec_detach_rxq(qi, s->inq);
1436 rte_free(s->cipher_key.data);
1437 rte_free(s->auth_key.data);
1438 memset(s, 0, sizeof(dpaa_sec_session));
1439 set_session_private_data(sess, index, NULL);
1440 rte_mempool_put(sess_mp, sess_priv);
1445 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1446 struct rte_security_session_conf *conf,
1449 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1450 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1451 struct rte_crypto_auth_xform *auth_xform;
1452 struct rte_crypto_cipher_xform *cipher_xform;
1453 dpaa_sec_session *session = (dpaa_sec_session *)sess;
1455 PMD_INIT_FUNC_TRACE();
1457 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1458 cipher_xform = &conf->crypto_xform->cipher;
1459 auth_xform = &conf->crypto_xform->next->auth;
1461 auth_xform = &conf->crypto_xform->auth;
1462 cipher_xform = &conf->crypto_xform->next->cipher;
1464 session->proto_alg = conf->protocol;
1465 session->cipher_key.data = rte_zmalloc(NULL,
1466 cipher_xform->key.length,
1467 RTE_CACHE_LINE_SIZE);
1468 if (session->cipher_key.data == NULL &&
1469 cipher_xform->key.length > 0) {
1470 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1474 session->cipher_key.length = cipher_xform->key.length;
1475 session->auth_key.data = rte_zmalloc(NULL,
1476 auth_xform->key.length,
1477 RTE_CACHE_LINE_SIZE);
1478 if (session->auth_key.data == NULL &&
1479 auth_xform->key.length > 0) {
1480 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1481 rte_free(session->cipher_key.data);
1484 session->auth_key.length = auth_xform->key.length;
1485 memcpy(session->cipher_key.data, cipher_xform->key.data,
1486 cipher_xform->key.length);
1487 memcpy(session->auth_key.data, auth_xform->key.data,
1488 auth_xform->key.length);
1490 switch (auth_xform->algo) {
1491 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1492 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1494 case RTE_CRYPTO_AUTH_MD5_HMAC:
1495 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1497 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1498 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1500 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1501 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1503 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1504 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1506 case RTE_CRYPTO_AUTH_AES_CMAC:
1507 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1509 case RTE_CRYPTO_AUTH_NULL:
1510 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1512 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1513 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1514 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1515 case RTE_CRYPTO_AUTH_SHA1:
1516 case RTE_CRYPTO_AUTH_SHA256:
1517 case RTE_CRYPTO_AUTH_SHA512:
1518 case RTE_CRYPTO_AUTH_SHA224:
1519 case RTE_CRYPTO_AUTH_SHA384:
1520 case RTE_CRYPTO_AUTH_MD5:
1521 case RTE_CRYPTO_AUTH_AES_GMAC:
1522 case RTE_CRYPTO_AUTH_KASUMI_F9:
1523 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1524 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1525 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1529 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1534 switch (cipher_xform->algo) {
1535 case RTE_CRYPTO_CIPHER_AES_CBC:
1536 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1538 case RTE_CRYPTO_CIPHER_3DES_CBC:
1539 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1541 case RTE_CRYPTO_CIPHER_AES_CTR:
1542 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1544 case RTE_CRYPTO_CIPHER_NULL:
1545 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1546 case RTE_CRYPTO_CIPHER_3DES_ECB:
1547 case RTE_CRYPTO_CIPHER_AES_ECB:
1548 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1549 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
1550 cipher_xform->algo);
1553 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1554 cipher_xform->algo);
1558 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1559 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1560 sizeof(session->ip4_hdr));
1561 session->ip4_hdr.ip_v = IPVERSION;
1562 session->ip4_hdr.ip_hl = 5;
1563 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1564 sizeof(session->ip4_hdr));
1565 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1566 session->ip4_hdr.ip_id = 0;
1567 session->ip4_hdr.ip_off = 0;
1568 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1569 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1570 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1572 session->ip4_hdr.ip_sum = 0;
1573 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1574 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1575 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1576 (void *)&session->ip4_hdr,
1579 session->encap_pdb.options =
1580 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1581 PDBOPTS_ESP_OIHI_PDB_INL |
1583 PDBHMO_ESP_ENCAP_DTTL;
1584 session->encap_pdb.spi = ipsec_xform->spi;
1585 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1587 session->dir = DIR_ENC;
1588 } else if (ipsec_xform->direction ==
1589 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1590 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1591 session->decap_pdb.options = sizeof(struct ip) << 16;
1592 session->dir = DIR_DEC;
1595 session->ctx_pool = internals->ctx_pool;
1596 session->inq = dpaa_sec_attach_rxq(internals);
1597 if (session->inq == NULL) {
1598 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1605 rte_free(session->auth_key.data);
1606 rte_free(session->cipher_key.data);
1607 memset(session, 0, sizeof(dpaa_sec_session));
1612 dpaa_sec_security_session_create(void *dev,
1613 struct rte_security_session_conf *conf,
1614 struct rte_security_session *sess,
1615 struct rte_mempool *mempool)
1617 void *sess_private_data;
1618 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1621 if (rte_mempool_get(mempool, &sess_private_data)) {
1623 "Couldn't get object from session mempool");
1627 switch (conf->protocol) {
1628 case RTE_SECURITY_PROTOCOL_IPSEC:
1629 ret = dpaa_sec_set_ipsec_session(cdev, conf,
1632 case RTE_SECURITY_PROTOCOL_MACSEC:
1639 "DPAA2 PMD: failed to configure session parameters");
1641 /* Return session to mempool */
1642 rte_mempool_put(mempool, sess_private_data);
1646 set_sec_session_private_data(sess, sess_private_data);
1651 /** Clear the memory of session so it doesn't leave key material behind */
1653 dpaa_sec_security_session_destroy(void *dev __rte_unused,
1654 struct rte_security_session *sess)
1656 PMD_INIT_FUNC_TRACE();
1657 void *sess_priv = get_sec_session_private_data(sess);
1659 dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1662 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1664 rte_free(s->cipher_key.data);
1665 rte_free(s->auth_key.data);
1666 memset(sess, 0, sizeof(dpaa_sec_session));
1667 set_sec_session_private_data(sess, NULL);
1668 rte_mempool_put(sess_mp, sess_priv);
1675 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1676 struct rte_cryptodev_config *config __rte_unused)
1678 PMD_INIT_FUNC_TRACE();
1684 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1686 PMD_INIT_FUNC_TRACE();
1691 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1693 PMD_INIT_FUNC_TRACE();
1697 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1699 PMD_INIT_FUNC_TRACE();
1704 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1705 struct rte_cryptodev_info *info)
1707 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1709 PMD_INIT_FUNC_TRACE();
1711 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1712 info->feature_flags = dev->feature_flags;
1713 info->capabilities = dpaa_sec_capabilities;
1714 info->sym.max_nb_sessions = internals->max_nb_sessions;
1715 info->sym.max_nb_sessions_per_qp =
1716 RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
1717 RTE_DPAA_MAX_NB_SEC_QPS;
1718 info->driver_id = cryptodev_driver_id;
1722 static struct rte_cryptodev_ops crypto_ops = {
1723 .dev_configure = dpaa_sec_dev_configure,
1724 .dev_start = dpaa_sec_dev_start,
1725 .dev_stop = dpaa_sec_dev_stop,
1726 .dev_close = dpaa_sec_dev_close,
1727 .dev_infos_get = dpaa_sec_dev_infos_get,
1728 .queue_pair_setup = dpaa_sec_queue_pair_setup,
1729 .queue_pair_release = dpaa_sec_queue_pair_release,
1730 .queue_pair_start = dpaa_sec_queue_pair_start,
1731 .queue_pair_stop = dpaa_sec_queue_pair_stop,
1732 .queue_pair_count = dpaa_sec_queue_pair_count,
1733 .session_get_size = dpaa_sec_session_get_size,
1734 .session_configure = dpaa_sec_session_configure,
1735 .session_clear = dpaa_sec_session_clear,
1736 .qp_attach_session = dpaa_sec_qp_attach_sess,
1737 .qp_detach_session = dpaa_sec_qp_detach_sess,
1740 static const struct rte_security_capability *
1741 dpaa_sec_capabilities_get(void *device __rte_unused)
1743 return dpaa_sec_security_cap;
1746 struct rte_security_ops dpaa_sec_security_ops = {
1747 .session_create = dpaa_sec_security_session_create,
1748 .session_update = NULL,
1749 .session_stats_get = NULL,
1750 .session_destroy = dpaa_sec_security_session_destroy,
1751 .set_pkt_metadata = NULL,
1752 .capabilities_get = dpaa_sec_capabilities_get
1756 dpaa_sec_uninit(struct rte_cryptodev *dev)
1758 struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1763 rte_free(dev->security_ctx);
1765 rte_mempool_free(internals->ctx_pool);
1766 rte_free(internals);
1768 PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1769 dev->data->name, rte_socket_id());
1775 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1777 struct dpaa_sec_dev_private *internals;
1778 struct rte_security_ctx *security_instance;
1779 struct dpaa_sec_qp *qp;
1784 PMD_INIT_FUNC_TRACE();
1786 cryptodev->driver_id = cryptodev_driver_id;
1787 cryptodev->dev_ops = &crypto_ops;
1789 cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1790 cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1791 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1792 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1793 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1794 RTE_CRYPTODEV_FF_SECURITY;
1796 internals = cryptodev->data->dev_private;
1797 internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
1798 internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1801 * For secondary processes, we don't initialise any further as primary
1802 * has already done this work. Only check we don't need a different
1805 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1806 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1810 /* Initialize security_ctx only for primary process*/
1811 security_instance = rte_malloc("rte_security_instances_ops",
1812 sizeof(struct rte_security_ctx), 0);
1813 if (security_instance == NULL)
1815 security_instance->device = (void *)cryptodev;
1816 security_instance->ops = &dpaa_sec_security_ops;
1817 security_instance->sess_cnt = 0;
1818 cryptodev->security_ctx = security_instance;
1820 for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1821 /* init qman fq for queue pair */
1822 qp = &internals->qps[i];
1823 ret = dpaa_sec_init_tx(&qp->outq);
1825 PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
1830 flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
1831 QMAN_FQ_FLAG_TO_DCPORTAL;
1832 for (i = 0; i < internals->max_nb_sessions; i++) {
1833 /* create rx qman fq for sessions*/
1834 ret = qman_create_fq(0, flags, &internals->inq[i]);
1835 if (unlikely(ret != 0)) {
1836 PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
1841 sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1842 internals->ctx_pool = rte_mempool_create((const char *)str,
1845 CTX_POOL_CACHE_SIZE, 0,
1846 NULL, NULL, NULL, NULL,
1848 if (!internals->ctx_pool) {
1849 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1853 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1857 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1859 dpaa_sec_uninit(cryptodev);
1864 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1865 struct rte_dpaa_device *dpaa_dev)
1867 struct rte_cryptodev *cryptodev;
1868 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1872 sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1874 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1875 if (cryptodev == NULL)
1878 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1879 cryptodev->data->dev_private = rte_zmalloc_socket(
1880 "cryptodev private structure",
1881 sizeof(struct dpaa_sec_dev_private),
1882 RTE_CACHE_LINE_SIZE,
1885 if (cryptodev->data->dev_private == NULL)
1886 rte_panic("Cannot allocate memzone for private "
1890 dpaa_dev->crypto_dev = cryptodev;
1891 cryptodev->device = &dpaa_dev->device;
1892 cryptodev->device->driver = &dpaa_drv->driver;
1894 /* init user callbacks */
1895 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1897 /* if sec device version is not configured */
1898 if (!rta_get_sec_era()) {
1899 const struct device_node *caam_node;
1901 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1902 const uint32_t *prop = of_get_property(caam_node,
1907 INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1913 /* Invoke PMD device initialization function */
1914 retval = dpaa_sec_dev_init(cryptodev);
1918 /* In case of error, cleanup is done */
1919 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1920 rte_free(cryptodev->data->dev_private);
1922 rte_cryptodev_pmd_release_device(cryptodev);
1928 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1930 struct rte_cryptodev *cryptodev;
1933 cryptodev = dpaa_dev->crypto_dev;
1934 if (cryptodev == NULL)
1937 ret = dpaa_sec_uninit(cryptodev);
1941 return rte_cryptodev_pmd_destroy(cryptodev);
1944 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1945 .drv_type = FSL_DPAA_CRYPTO,
1947 .name = "DPAA SEC PMD"
1949 .probe = cryptodev_dpaa_sec_probe,
1950 .remove = cryptodev_dpaa_sec_remove,
1953 static struct cryptodev_driver dpaa_sec_crypto_drv;
1955 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1956 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1957 cryptodev_driver_id);