1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2020 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43 * a pointer to the shared descriptor
45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID 0x1957
47 #define FSL_DEVICE_ID 0x410
48 #define FSL_SUBSYSTEM_SEC 1
49 #define FSL_MC_DPSECI_DEVID 3
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS 32000
54 #define FLE_POOL_BUF_SIZE 256
55 #define FLE_POOL_CACHE_SIZE 512
56 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND -114
58 #define SEC_FLC_DHR_INBOUND 0
60 static uint8_t cryptodev_driver_id;
62 #ifdef RTE_LIBRTE_SECURITY
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 struct rte_crypto_op *op,
66 struct qbman_fd *fd, uint16_t bpid)
68 struct rte_crypto_sym_op *sym_op = op->sym;
69 struct ctxt_priv *priv = sess->ctxt;
70 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 struct sec_flow_context *flc;
72 struct rte_mbuf *mbuf;
73 uint32_t in_len = 0, out_len = 0;
80 /* first FLE entry used to store mbuf and session ctxt */
81 fle = (struct qbman_fle *)rte_malloc(NULL,
82 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
85 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
88 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
92 /* Save the shared descriptor */
93 flc = &priv->flc_desc[0].flc;
99 if (likely(bpid < MAX_BPID)) {
100 DPAA2_SET_FD_BPID(fd, bpid);
101 DPAA2_SET_FLE_BPID(op_fle, bpid);
102 DPAA2_SET_FLE_BPID(ip_fle, bpid);
104 DPAA2_SET_FD_IVP(fd);
105 DPAA2_SET_FLE_IVP(op_fle);
106 DPAA2_SET_FLE_IVP(ip_fle);
109 /* Configure FD as a FRAME LIST */
110 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 DPAA2_SET_FD_COMPOUND_FMT(fd);
112 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
114 /* Configure Output FLE with Scatter/Gather Entry */
115 DPAA2_SET_FLE_SG_EXT(op_fle);
116 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
118 /* Configure Output SGE for Encap/Decap */
119 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
123 sge->length = mbuf->data_len;
124 out_len += sge->length;
127 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
130 /* using buf_len for last buf - so that extra data can be added */
131 sge->length = mbuf->buf_len - mbuf->data_off;
132 out_len += sge->length;
134 DPAA2_SET_FLE_FIN(sge);
135 op_fle->length = out_len;
138 mbuf = sym_op->m_src;
140 /* Configure Input FLE with Scatter/Gather Entry */
141 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 DPAA2_SET_FLE_SG_EXT(ip_fle);
143 DPAA2_SET_FLE_FIN(ip_fle);
145 /* Configure input SGE for Encap/Decap */
146 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 sge->length = mbuf->data_len;
149 in_len += sge->length;
155 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 sge->length = mbuf->data_len;
158 in_len += sge->length;
161 ip_fle->length = in_len;
162 DPAA2_SET_FLE_FIN(sge);
164 /* In case of PDCP, per packet HFN is stored in
165 * mbuf priv after sym_op.
167 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 sess->pdcp.hfn_ovd_offset);
170 /*enable HFN override override */
171 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
175 DPAA2_SET_FD_LEN(fd, ip_fle->length);
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 struct rte_crypto_op *op,
183 struct qbman_fd *fd, uint16_t bpid)
185 struct rte_crypto_sym_op *sym_op = op->sym;
186 struct ctxt_priv *priv = sess->ctxt;
187 struct qbman_fle *fle, *ip_fle, *op_fle;
188 struct sec_flow_context *flc;
189 struct rte_mbuf *src_mbuf = sym_op->m_src;
190 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
196 /* Save the shared descriptor */
197 flc = &priv->flc_desc[0].flc;
199 /* we are using the first FLE entry to store Mbuf */
200 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
202 DPAA2_SEC_DP_ERR("Memory alloc failed");
205 memset(fle, 0, FLE_POOL_BUF_SIZE);
206 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
212 if (likely(bpid < MAX_BPID)) {
213 DPAA2_SET_FD_BPID(fd, bpid);
214 DPAA2_SET_FLE_BPID(op_fle, bpid);
215 DPAA2_SET_FLE_BPID(ip_fle, bpid);
217 DPAA2_SET_FD_IVP(fd);
218 DPAA2_SET_FLE_IVP(op_fle);
219 DPAA2_SET_FLE_IVP(ip_fle);
222 /* Configure FD as a FRAME LIST */
223 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 DPAA2_SET_FD_COMPOUND_FMT(fd);
225 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
227 /* Configure Output FLE with dst mbuf data */
228 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
232 /* Configure Input FLE with src mbuf data */
233 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
237 DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 DPAA2_SET_FLE_FIN(ip_fle);
240 /* In case of PDCP, per packet HFN is stored in
241 * mbuf priv after sym_op.
243 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 sess->pdcp.hfn_ovd_offset);
246 /*enable HFN override override */
247 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
257 build_proto_fd(dpaa2_sec_session *sess,
258 struct rte_crypto_op *op,
259 struct qbman_fd *fd, uint16_t bpid)
261 struct rte_crypto_sym_op *sym_op = op->sym;
263 return build_proto_compound_fd(sess, op, fd, bpid);
265 struct ctxt_priv *priv = sess->ctxt;
266 struct sec_flow_context *flc;
267 struct rte_mbuf *mbuf = sym_op->m_src;
269 if (likely(bpid < MAX_BPID))
270 DPAA2_SET_FD_BPID(fd, bpid);
272 DPAA2_SET_FD_IVP(fd);
274 /* Save the shared descriptor */
275 flc = &priv->flc_desc[0].flc;
277 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
282 /* save physical address of mbuf */
283 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 mbuf->buf_iova = (size_t)op;
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 struct rte_crypto_op *op,
293 struct qbman_fd *fd, __rte_unused uint16_t bpid)
295 struct rte_crypto_sym_op *sym_op = op->sym;
296 struct ctxt_priv *priv = sess->ctxt;
297 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 struct sec_flow_context *flc;
299 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 int icv_len = sess->digest_length;
302 struct rte_mbuf *mbuf;
303 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
307 mbuf = sym_op->m_dst;
309 mbuf = sym_op->m_src;
311 /* first FLE entry used to store mbuf and session ctxt */
312 fle = (struct qbman_fle *)rte_malloc(NULL,
313 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 RTE_CACHE_LINE_SIZE);
315 if (unlikely(!fle)) {
316 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
319 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
327 /* Save the shared descriptor */
328 flc = &priv->flc_desc[0].flc;
330 /* Configure FD as a FRAME LIST */
331 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 DPAA2_SET_FD_COMPOUND_FMT(fd);
333 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
335 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 "iv-len=%d data_off: 0x%x\n",
337 sym_op->aead.data.offset,
338 sym_op->aead.data.length,
341 sym_op->m_src->data_off);
343 /* Configure Output FLE with Scatter/Gather Entry */
344 DPAA2_SET_FLE_SG_EXT(op_fle);
345 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
348 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
350 op_fle->length = (sess->dir == DIR_ENC) ?
351 (sym_op->aead.data.length + icv_len) :
352 sym_op->aead.data.length;
354 /* Configure Output SGE for Encap/Decap */
355 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 sge->length = mbuf->data_len - sym_op->aead.data.offset;
363 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 sge->length = mbuf->data_len;
368 sge->length -= icv_len;
370 if (sess->dir == DIR_ENC) {
372 DPAA2_SET_FLE_ADDR(sge,
373 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 sge->length = icv_len;
376 DPAA2_SET_FLE_FIN(sge);
379 mbuf = sym_op->m_src;
381 /* Configure Input FLE with Scatter/Gather Entry */
382 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 DPAA2_SET_FLE_SG_EXT(ip_fle);
384 DPAA2_SET_FLE_FIN(ip_fle);
385 ip_fle->length = (sess->dir == DIR_ENC) ?
386 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
390 /* Configure Input SGE for Encap/Decap */
391 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 sge->length = sess->iv.length;
396 DPAA2_SET_FLE_ADDR(sge,
397 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 sge->length = auth_only_len;
402 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
405 sge->length = mbuf->data_len - sym_op->aead.data.offset;
411 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 sge->length = mbuf->data_len;
417 if (sess->dir == DIR_DEC) {
419 old_icv = (uint8_t *)(sge + 1);
420 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
421 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 sge->length = icv_len;
425 DPAA2_SET_FLE_FIN(sge);
427 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
430 DPAA2_SET_FD_LEN(fd, ip_fle->length);
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 struct rte_crypto_op *op,
438 struct qbman_fd *fd, uint16_t bpid)
440 struct rte_crypto_sym_op *sym_op = op->sym;
441 struct ctxt_priv *priv = sess->ctxt;
442 struct qbman_fle *fle, *sge;
443 struct sec_flow_context *flc;
444 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 int icv_len = sess->digest_length, retval;
447 struct rte_mbuf *dst;
448 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
456 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 * Currently we donot know which FLE has the mbuf stored.
458 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 * to get the MBUF Addr from the previous FLE.
460 * We can have a better approach to use the inline Mbuf
462 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
464 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
467 memset(fle, 0, FLE_POOL_BUF_SIZE);
468 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
472 if (likely(bpid < MAX_BPID)) {
473 DPAA2_SET_FD_BPID(fd, bpid);
474 DPAA2_SET_FLE_BPID(fle, bpid);
475 DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 DPAA2_SET_FLE_BPID(sge, bpid);
477 DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 DPAA2_SET_FLE_BPID(sge + 3, bpid);
481 DPAA2_SET_FD_IVP(fd);
482 DPAA2_SET_FLE_IVP(fle);
483 DPAA2_SET_FLE_IVP((fle + 1));
484 DPAA2_SET_FLE_IVP(sge);
485 DPAA2_SET_FLE_IVP((sge + 1));
486 DPAA2_SET_FLE_IVP((sge + 2));
487 DPAA2_SET_FLE_IVP((sge + 3));
490 /* Save the shared descriptor */
491 flc = &priv->flc_desc[0].flc;
492 /* Configure FD as a FRAME LIST */
493 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 DPAA2_SET_FD_COMPOUND_FMT(fd);
495 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
497 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 "iv-len=%d data_off: 0x%x\n",
499 sym_op->aead.data.offset,
500 sym_op->aead.data.length,
503 sym_op->m_src->data_off);
505 /* Configure Output FLE with Scatter/Gather Entry */
506 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
508 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 fle->length = (sess->dir == DIR_ENC) ?
510 (sym_op->aead.data.length + icv_len) :
511 sym_op->aead.data.length;
513 DPAA2_SET_FLE_SG_EXT(fle);
515 /* Configure Output SGE for Encap/Decap */
516 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 sge->length = sym_op->aead.data.length;
520 if (sess->dir == DIR_ENC) {
522 DPAA2_SET_FLE_ADDR(sge,
523 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 sge->length = sess->digest_length;
526 DPAA2_SET_FLE_FIN(sge);
531 /* Configure Input FLE with Scatter/Gather Entry */
532 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 DPAA2_SET_FLE_SG_EXT(fle);
534 DPAA2_SET_FLE_FIN(fle);
535 fle->length = (sess->dir == DIR_ENC) ?
536 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 sess->digest_length);
540 /* Configure Input SGE for Encap/Decap */
541 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 sge->length = sess->iv.length;
545 DPAA2_SET_FLE_ADDR(sge,
546 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 sge->length = auth_only_len;
548 DPAA2_SET_FLE_BPID(sge, bpid);
552 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 sym_op->m_src->data_off);
555 sge->length = sym_op->aead.data.length;
556 if (sess->dir == DIR_DEC) {
558 old_icv = (uint8_t *)(sge + 1);
559 memcpy(old_icv, sym_op->aead.digest.data,
560 sess->digest_length);
561 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 sge->length = sess->digest_length;
564 DPAA2_SET_FLE_FIN(sge);
567 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
571 DPAA2_SET_FD_LEN(fd, fle->length);
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 struct rte_crypto_op *op,
578 struct qbman_fd *fd, __rte_unused uint16_t bpid)
580 struct rte_crypto_sym_op *sym_op = op->sym;
581 struct ctxt_priv *priv = sess->ctxt;
582 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 struct sec_flow_context *flc;
584 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 sym_op->auth.data.offset;
586 uint16_t auth_tail_len = sym_op->auth.data.length -
587 sym_op->cipher.data.length - auth_hdr_len;
588 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 int icv_len = sess->digest_length;
591 struct rte_mbuf *mbuf;
592 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
596 mbuf = sym_op->m_dst;
598 mbuf = sym_op->m_src;
600 /* first FLE entry used to store mbuf and session ctxt */
601 fle = (struct qbman_fle *)rte_malloc(NULL,
602 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 RTE_CACHE_LINE_SIZE);
604 if (unlikely(!fle)) {
605 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
608 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
616 /* Save the shared descriptor */
617 flc = &priv->flc_desc[0].flc;
619 /* Configure FD as a FRAME LIST */
620 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 DPAA2_SET_FD_COMPOUND_FMT(fd);
622 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
625 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 sym_op->auth.data.offset,
628 sym_op->auth.data.length,
630 sym_op->cipher.data.offset,
631 sym_op->cipher.data.length,
633 sym_op->m_src->data_off);
635 /* Configure Output FLE with Scatter/Gather Entry */
636 DPAA2_SET_FLE_SG_EXT(op_fle);
637 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
640 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
642 op_fle->length = (sess->dir == DIR_ENC) ?
643 (sym_op->cipher.data.length + icv_len) :
644 sym_op->cipher.data.length;
646 /* Configure Output SGE for Encap/Decap */
647 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 sge->length = mbuf->data_len - sym_op->auth.data.offset;
655 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 sge->length = mbuf->data_len;
660 sge->length -= icv_len;
662 if (sess->dir == DIR_ENC) {
664 DPAA2_SET_FLE_ADDR(sge,
665 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 sge->length = icv_len;
668 DPAA2_SET_FLE_FIN(sge);
671 mbuf = sym_op->m_src;
673 /* Configure Input FLE with Scatter/Gather Entry */
674 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 DPAA2_SET_FLE_SG_EXT(ip_fle);
676 DPAA2_SET_FLE_FIN(ip_fle);
677 ip_fle->length = (sess->dir == DIR_ENC) ?
678 (sym_op->auth.data.length + sess->iv.length) :
679 (sym_op->auth.data.length + sess->iv.length +
682 /* Configure Input SGE for Encap/Decap */
683 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 sge->length = sess->iv.length;
687 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
690 sge->length = mbuf->data_len - sym_op->auth.data.offset;
696 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 sge->length = mbuf->data_len;
701 sge->length -= icv_len;
703 if (sess->dir == DIR_DEC) {
705 old_icv = (uint8_t *)(sge + 1);
706 memcpy(old_icv, sym_op->auth.digest.data,
708 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 sge->length = icv_len;
712 DPAA2_SET_FLE_FIN(sge);
714 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
717 DPAA2_SET_FD_LEN(fd, ip_fle->length);
723 build_authenc_fd(dpaa2_sec_session *sess,
724 struct rte_crypto_op *op,
725 struct qbman_fd *fd, uint16_t bpid)
727 struct rte_crypto_sym_op *sym_op = op->sym;
728 struct ctxt_priv *priv = sess->ctxt;
729 struct qbman_fle *fle, *sge;
730 struct sec_flow_context *flc;
731 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 sym_op->auth.data.offset;
733 uint16_t auth_tail_len = sym_op->auth.data.length -
734 sym_op->cipher.data.length - auth_hdr_len;
735 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
737 int icv_len = sess->digest_length, retval;
739 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
741 struct rte_mbuf *dst;
748 /* we are using the first FLE entry to store Mbuf.
749 * Currently we donot know which FLE has the mbuf stored.
750 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 * to get the MBUF Addr from the previous FLE.
752 * We can have a better approach to use the inline Mbuf
754 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
756 DPAA2_SEC_ERR("Memory alloc failed for SGE");
759 memset(fle, 0, FLE_POOL_BUF_SIZE);
760 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
764 if (likely(bpid < MAX_BPID)) {
765 DPAA2_SET_FD_BPID(fd, bpid);
766 DPAA2_SET_FLE_BPID(fle, bpid);
767 DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 DPAA2_SET_FLE_BPID(sge, bpid);
769 DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 DPAA2_SET_FLE_BPID(sge + 3, bpid);
773 DPAA2_SET_FD_IVP(fd);
774 DPAA2_SET_FLE_IVP(fle);
775 DPAA2_SET_FLE_IVP((fle + 1));
776 DPAA2_SET_FLE_IVP(sge);
777 DPAA2_SET_FLE_IVP((sge + 1));
778 DPAA2_SET_FLE_IVP((sge + 2));
779 DPAA2_SET_FLE_IVP((sge + 3));
782 /* Save the shared descriptor */
783 flc = &priv->flc_desc[0].flc;
784 /* Configure FD as a FRAME LIST */
785 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 DPAA2_SET_FD_COMPOUND_FMT(fd);
787 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
790 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 sym_op->auth.data.offset,
793 sym_op->auth.data.length,
795 sym_op->cipher.data.offset,
796 sym_op->cipher.data.length,
798 sym_op->m_src->data_off);
800 /* Configure Output FLE with Scatter/Gather Entry */
801 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
803 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 fle->length = (sess->dir == DIR_ENC) ?
805 (sym_op->cipher.data.length + icv_len) :
806 sym_op->cipher.data.length;
808 DPAA2_SET_FLE_SG_EXT(fle);
810 /* Configure Output SGE for Encap/Decap */
811 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
814 sge->length = sym_op->cipher.data.length;
816 if (sess->dir == DIR_ENC) {
818 DPAA2_SET_FLE_ADDR(sge,
819 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 sge->length = sess->digest_length;
821 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
824 DPAA2_SET_FLE_FIN(sge);
829 /* Configure Input FLE with Scatter/Gather Entry */
830 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 DPAA2_SET_FLE_SG_EXT(fle);
832 DPAA2_SET_FLE_FIN(fle);
833 fle->length = (sess->dir == DIR_ENC) ?
834 (sym_op->auth.data.length + sess->iv.length) :
835 (sym_op->auth.data.length + sess->iv.length +
836 sess->digest_length);
838 /* Configure Input SGE for Encap/Decap */
839 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 sge->length = sess->iv.length;
843 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 sym_op->m_src->data_off);
846 sge->length = sym_op->auth.data.length;
847 if (sess->dir == DIR_DEC) {
849 old_icv = (uint8_t *)(sge + 1);
850 memcpy(old_icv, sym_op->auth.digest.data,
851 sess->digest_length);
852 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 sge->length = sess->digest_length;
854 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 sess->digest_length +
858 DPAA2_SET_FLE_FIN(sge);
860 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
866 static inline int build_auth_sg_fd(
867 dpaa2_sec_session *sess,
868 struct rte_crypto_op *op,
870 __rte_unused uint16_t bpid)
872 struct rte_crypto_sym_op *sym_op = op->sym;
873 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 struct sec_flow_context *flc;
875 struct ctxt_priv *priv = sess->ctxt;
876 int data_len, data_offset;
878 struct rte_mbuf *mbuf;
880 data_len = sym_op->auth.data.length;
881 data_offset = sym_op->auth.data.offset;
883 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 if ((data_len & 7) || (data_offset & 7)) {
886 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
890 data_len = data_len >> 3;
891 data_offset = data_offset >> 3;
894 mbuf = sym_op->m_src;
895 fle = (struct qbman_fle *)rte_malloc(NULL,
896 FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 RTE_CACHE_LINE_SIZE);
898 if (unlikely(!fle)) {
899 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
902 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 /* first FLE entry used to store mbuf and session ctxt */
904 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
910 flc = &priv->flc_desc[DESC_INITFINAL].flc;
912 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 DPAA2_SET_FD_COMPOUND_FMT(fd);
917 DPAA2_SET_FLE_ADDR(op_fle,
918 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 op_fle->length = sess->digest_length;
922 DPAA2_SET_FLE_SG_EXT(ip_fle);
923 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 ip_fle->length = data_len;
926 if (sess->iv.length) {
929 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
935 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
939 sge->length = sess->iv.length;
941 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 ip_fle->length += sge->length;
946 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
949 if (data_len <= (mbuf->data_len - data_offset)) {
950 sge->length = data_len;
953 sge->length = mbuf->data_len - data_offset;
955 /* remaining i/p segs */
956 while ((data_len = data_len - sge->length) &&
957 (mbuf = mbuf->next)) {
959 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 if (data_len > mbuf->data_len)
962 sge->length = mbuf->data_len;
964 sge->length = data_len;
968 if (sess->dir == DIR_DEC) {
969 /* Digest verification case */
971 old_digest = (uint8_t *)(sge + 1);
972 rte_memcpy(old_digest, sym_op->auth.digest.data,
973 sess->digest_length);
974 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 sge->length = sess->digest_length;
976 ip_fle->length += sess->digest_length;
978 DPAA2_SET_FLE_FIN(sge);
979 DPAA2_SET_FLE_FIN(ip_fle);
980 DPAA2_SET_FD_LEN(fd, ip_fle->length);
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 struct qbman_fd *fd, uint16_t bpid)
989 struct rte_crypto_sym_op *sym_op = op->sym;
990 struct qbman_fle *fle, *sge;
991 struct sec_flow_context *flc;
992 struct ctxt_priv *priv = sess->ctxt;
993 int data_len, data_offset;
997 data_len = sym_op->auth.data.length;
998 data_offset = sym_op->auth.data.offset;
1000 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 if ((data_len & 7) || (data_offset & 7)) {
1003 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1007 data_len = data_len >> 3;
1008 data_offset = data_offset >> 3;
1011 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1013 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1016 memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 /* TODO we are using the first FLE entry to store Mbuf.
1018 * Currently we donot know which FLE has the mbuf stored.
1019 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 * to get the MBUF Addr from the previous FLE.
1021 * We can have a better approach to use the inline Mbuf
1023 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1028 if (likely(bpid < MAX_BPID)) {
1029 DPAA2_SET_FD_BPID(fd, bpid);
1030 DPAA2_SET_FLE_BPID(fle, bpid);
1031 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 DPAA2_SET_FLE_BPID(sge, bpid);
1033 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1035 DPAA2_SET_FD_IVP(fd);
1036 DPAA2_SET_FLE_IVP(fle);
1037 DPAA2_SET_FLE_IVP((fle + 1));
1038 DPAA2_SET_FLE_IVP(sge);
1039 DPAA2_SET_FLE_IVP((sge + 1));
1042 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 DPAA2_SET_FD_COMPOUND_FMT(fd);
1047 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 fle->length = sess->digest_length;
1051 /* Setting input FLE */
1052 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 DPAA2_SET_FLE_SG_EXT(fle);
1054 fle->length = data_len;
1056 if (sess->iv.length) {
1059 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1065 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1069 sge->length = sess->iv.length;
1072 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 fle->length = fle->length + sge->length;
1077 /* Setting data to authenticate */
1078 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 sge->length = data_len;
1082 if (sess->dir == DIR_DEC) {
1084 old_digest = (uint8_t *)(sge + 1);
1085 rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 sess->digest_length);
1087 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 sge->length = sess->digest_length;
1089 fle->length = fle->length + sess->digest_length;
1092 DPAA2_SET_FLE_FIN(sge);
1093 DPAA2_SET_FLE_FIN(fle);
1094 DPAA2_SET_FD_LEN(fd, fle->length);
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1103 struct rte_crypto_sym_op *sym_op = op->sym;
1104 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 int data_len, data_offset;
1106 struct sec_flow_context *flc;
1107 struct ctxt_priv *priv = sess->ctxt;
1108 struct rte_mbuf *mbuf;
1109 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1112 data_len = sym_op->cipher.data.length;
1113 data_offset = sym_op->cipher.data.offset;
1115 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 if ((data_len & 7) || (data_offset & 7)) {
1118 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1122 data_len = data_len >> 3;
1123 data_offset = data_offset >> 3;
1127 mbuf = sym_op->m_dst;
1129 mbuf = sym_op->m_src;
1131 /* first FLE entry used to store mbuf and session ctxt */
1132 fle = (struct qbman_fle *)rte_malloc(NULL,
1133 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 RTE_CACHE_LINE_SIZE);
1136 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1139 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 /* first FLE entry used to store mbuf and session ctxt */
1141 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1148 flc = &priv->flc_desc[0].flc;
1151 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 " data_off: 0x%x\n",
1156 sym_op->m_src->data_off);
1159 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 op_fle->length = data_len;
1161 DPAA2_SET_FLE_SG_EXT(op_fle);
1164 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 sge->length = mbuf->data_len - data_offset;
1172 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 sge->length = mbuf->data_len;
1177 DPAA2_SET_FLE_FIN(sge);
1180 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 flc, fle, fle->addr_hi, fle->addr_lo,
1185 mbuf = sym_op->m_src;
1187 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 ip_fle->length = sess->iv.length + data_len;
1189 DPAA2_SET_FLE_SG_EXT(ip_fle);
1192 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 DPAA2_SET_FLE_OFFSET(sge, 0);
1194 sge->length = sess->iv.length;
1199 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 sge->length = mbuf->data_len - data_offset;
1207 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 sge->length = mbuf->data_len;
1212 DPAA2_SET_FLE_FIN(sge);
1213 DPAA2_SET_FLE_FIN(ip_fle);
1216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1222 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 " off =%d, len =%d\n",
1224 DPAA2_GET_FD_ADDR(fd),
1225 DPAA2_GET_FD_BPID(fd),
1226 rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 DPAA2_GET_FD_OFFSET(fd),
1228 DPAA2_GET_FD_LEN(fd));
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 struct qbman_fd *fd, uint16_t bpid)
1236 struct rte_crypto_sym_op *sym_op = op->sym;
1237 struct qbman_fle *fle, *sge;
1238 int retval, data_len, data_offset;
1239 struct sec_flow_context *flc;
1240 struct ctxt_priv *priv = sess->ctxt;
1241 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1243 struct rte_mbuf *dst;
1245 data_len = sym_op->cipher.data.length;
1246 data_offset = sym_op->cipher.data.offset;
1248 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 if ((data_len & 7) || (data_offset & 7)) {
1251 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1255 data_len = data_len >> 3;
1256 data_offset = data_offset >> 3;
1260 dst = sym_op->m_dst;
1262 dst = sym_op->m_src;
1264 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1266 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1269 memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 /* TODO we are using the first FLE entry to store Mbuf.
1271 * Currently we donot know which FLE has the mbuf stored.
1272 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 * to get the MBUF Addr from the previous FLE.
1274 * We can have a better approach to use the inline Mbuf
1276 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1281 if (likely(bpid < MAX_BPID)) {
1282 DPAA2_SET_FD_BPID(fd, bpid);
1283 DPAA2_SET_FLE_BPID(fle, bpid);
1284 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 DPAA2_SET_FLE_BPID(sge, bpid);
1286 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1288 DPAA2_SET_FD_IVP(fd);
1289 DPAA2_SET_FLE_IVP(fle);
1290 DPAA2_SET_FLE_IVP((fle + 1));
1291 DPAA2_SET_FLE_IVP(sge);
1292 DPAA2_SET_FLE_IVP((sge + 1));
1295 flc = &priv->flc_desc[0].flc;
1296 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1302 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 " data_off: 0x%x\n",
1307 sym_op->m_src->data_off);
1309 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1312 fle->length = data_len + sess->iv.length;
1315 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 flc, fle, fle->addr_hi, fle->addr_lo,
1321 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 fle->length = data_len + sess->iv.length;
1324 DPAA2_SET_FLE_SG_EXT(fle);
1326 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 sge->length = sess->iv.length;
1330 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1333 sge->length = data_len;
1334 DPAA2_SET_FLE_FIN(sge);
1335 DPAA2_SET_FLE_FIN(fle);
1338 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 " off =%d, len =%d\n",
1340 DPAA2_GET_FD_ADDR(fd),
1341 DPAA2_GET_FD_BPID(fd),
1342 rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 DPAA2_GET_FD_OFFSET(fd),
1344 DPAA2_GET_FD_LEN(fd));
1350 build_sec_fd(struct rte_crypto_op *op,
1351 struct qbman_fd *fd, uint16_t bpid)
1354 dpaa2_sec_session *sess;
1356 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIBRTE_SECURITY
1360 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 op->sym->sec_session);
1370 /* Any of the buffer is segmented*/
1371 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 ((op->sym->m_dst != NULL) &&
1373 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 switch (sess->ctxt_type) {
1375 case DPAA2_SEC_CIPHER:
1376 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1378 case DPAA2_SEC_AUTH:
1379 ret = build_auth_sg_fd(sess, op, fd, bpid);
1381 case DPAA2_SEC_AEAD:
1382 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1384 case DPAA2_SEC_CIPHER_HASH:
1385 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1387 #ifdef RTE_LIBRTE_SECURITY
1388 case DPAA2_SEC_IPSEC:
1389 case DPAA2_SEC_PDCP:
1390 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1393 case DPAA2_SEC_HASH_CIPHER:
1395 DPAA2_SEC_ERR("error: Unsupported session");
1398 switch (sess->ctxt_type) {
1399 case DPAA2_SEC_CIPHER:
1400 ret = build_cipher_fd(sess, op, fd, bpid);
1402 case DPAA2_SEC_AUTH:
1403 ret = build_auth_fd(sess, op, fd, bpid);
1405 case DPAA2_SEC_AEAD:
1406 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1408 case DPAA2_SEC_CIPHER_HASH:
1409 ret = build_authenc_fd(sess, op, fd, bpid);
1411 #ifdef RTE_LIBRTE_SECURITY
1412 case DPAA2_SEC_IPSEC:
1413 ret = build_proto_fd(sess, op, fd, bpid);
1415 case DPAA2_SEC_PDCP:
1416 ret = build_proto_compound_fd(sess, op, fd, bpid);
1419 case DPAA2_SEC_HASH_CIPHER:
1421 DPAA2_SEC_ERR("error: Unsupported session");
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1432 /* Function to transmit the frames to given device and VQ*/
1435 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 uint32_t frames_to_send, retry_count;
1437 struct qbman_eq_desc eqdesc;
1438 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 struct qbman_swp *swp;
1440 uint16_t num_tx = 0;
1441 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 /*todo - need to support multiple buffer pools */
1444 struct rte_mempool *mb_pool;
1446 if (unlikely(nb_ops == 0))
1449 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 DPAA2_SEC_ERR("sessionless crypto op not supported");
1453 /*Prepare enqueue descriptor*/
1454 qbman_eq_desc_clear(&eqdesc);
1455 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1459 if (!DPAA2_PER_LCORE_DPIO) {
1460 ret = dpaa2_affine_qbman_swp();
1463 "Failed to allocate IO portal, tid: %d\n",
1468 swp = DPAA2_PER_LCORE_PORTAL;
1471 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 dpaa2_eqcr_size : nb_ops;
1474 for (loop = 0; loop < frames_to_send; loop++) {
1475 if ((*ops)->sym->m_src->seqn) {
1476 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1478 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1479 DPAA2_PER_LCORE_DQRR_SIZE--;
1480 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1481 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1484 /*Clear the unused FD fields before sending*/
1485 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1486 mb_pool = (*ops)->sym->m_src->pool;
1487 bpid = mempool_to_bpid(mb_pool);
1488 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1490 DPAA2_SEC_ERR("error: Improper packet contents"
1491 " for crypto operation");
1499 while (loop < frames_to_send) {
1500 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1503 frames_to_send - loop);
1504 if (unlikely(ret < 0)) {
1506 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1521 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1522 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1526 #ifdef RTE_LIBRTE_SECURITY
1527 static inline struct rte_crypto_op *
1528 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1530 struct rte_crypto_op *op;
1531 uint16_t len = DPAA2_GET_FD_LEN(fd);
1533 dpaa2_sec_session *sess_priv __rte_unused;
1535 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1536 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1537 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1539 diff = len - mbuf->pkt_len;
1540 mbuf->pkt_len += diff;
1541 mbuf->data_len += diff;
1542 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1543 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1544 op->sym->aead.digest.phys_addr = 0L;
1546 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1547 op->sym->sec_session);
1548 if (sess_priv->dir == DIR_ENC)
1549 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1551 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1557 static inline struct rte_crypto_op *
1558 sec_fd_to_mbuf(const struct qbman_fd *fd)
1560 struct qbman_fle *fle;
1561 struct rte_crypto_op *op;
1562 struct ctxt_priv *priv;
1563 struct rte_mbuf *dst, *src;
1565 #ifdef RTE_LIBRTE_SECURITY
1566 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1567 return sec_simple_fd_to_mbuf(fd);
1569 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1571 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1572 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1574 /* we are using the first FLE entry to store Mbuf.
1575 * Currently we donot know which FLE has the mbuf stored.
1576 * So while retreiving we can go back 1 FLE from the FD -ADDR
1577 * to get the MBUF Addr from the previous FLE.
1578 * We can have a better approach to use the inline Mbuf
1581 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1582 /* TODO complete it. */
1583 DPAA2_SEC_ERR("error: non inline buffer");
1586 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1589 src = op->sym->m_src;
1592 if (op->sym->m_dst) {
1593 dst = op->sym->m_dst;
1598 #ifdef RTE_LIBRTE_SECURITY
1599 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1600 uint16_t len = DPAA2_GET_FD_LEN(fd);
1602 while (dst->next != NULL) {
1603 len -= dst->data_len;
1606 dst->data_len = len;
1609 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1610 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1613 DPAA2_GET_FD_ADDR(fd),
1614 DPAA2_GET_FD_BPID(fd),
1615 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1616 DPAA2_GET_FD_OFFSET(fd),
1617 DPAA2_GET_FD_LEN(fd));
1619 /* free the fle memory */
1620 if (likely(rte_pktmbuf_is_contiguous(src))) {
1621 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1622 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1624 rte_free((void *)(fle-1));
1630 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1633 /* Function is responsible to receive frames for a given device and VQ*/
1634 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1635 struct qbman_result *dq_storage;
1636 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1637 int ret, num_rx = 0;
1638 uint8_t is_last = 0, status;
1639 struct qbman_swp *swp;
1640 const struct qbman_fd *fd;
1641 struct qbman_pull_desc pulldesc;
1643 if (!DPAA2_PER_LCORE_DPIO) {
1644 ret = dpaa2_affine_qbman_swp();
1647 "Failed to allocate IO portal, tid: %d\n",
1652 swp = DPAA2_PER_LCORE_PORTAL;
1653 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1655 qbman_pull_desc_clear(&pulldesc);
1656 qbman_pull_desc_set_numframes(&pulldesc,
1657 (nb_ops > dpaa2_dqrr_size) ?
1658 dpaa2_dqrr_size : nb_ops);
1659 qbman_pull_desc_set_fq(&pulldesc, fqid);
1660 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1661 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1664 /*Issue a volatile dequeue command. */
1666 if (qbman_swp_pull(swp, &pulldesc)) {
1668 "SEC VDQ command is not issued : QBMAN busy");
1669 /* Portal was busy, try again */
1675 /* Receive the packets till Last Dequeue entry is found with
1676 * respect to the above issues PULL command.
1679 /* Check if the previous issued command is completed.
1680 * Also seems like the SWP is shared between the Ethernet Driver
1681 * and the SEC driver.
1683 while (!qbman_check_command_complete(dq_storage))
1686 /* Loop until the dq_storage is updated with
1687 * new token by QBMAN
1689 while (!qbman_check_new_result(dq_storage))
1691 /* Check whether Last Pull command is Expired and
1692 * setting Condition for Loop termination
1694 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1696 /* Check for valid frame. */
1697 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1699 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1700 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1705 fd = qbman_result_DQ_fd(dq_storage);
1706 ops[num_rx] = sec_fd_to_mbuf(fd);
1708 if (unlikely(fd->simple.frc)) {
1709 /* TODO Parse SEC errors */
1710 DPAA2_SEC_ERR("SEC returned Error - %x",
1712 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1714 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1719 } /* End of Packet Rx loop */
1721 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1723 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1724 /*Return the total number of packets received to DPAA2 app*/
1728 /** Release queue pair */
1730 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1732 struct dpaa2_sec_qp *qp =
1733 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1735 PMD_INIT_FUNC_TRACE();
1737 if (qp->rx_vq.q_storage) {
1738 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1739 rte_free(qp->rx_vq.q_storage);
1743 dev->data->queue_pairs[queue_pair_id] = NULL;
1748 /** Setup a queue pair */
1750 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1751 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1752 __rte_unused int socket_id)
1754 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1755 struct dpaa2_sec_qp *qp;
1756 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1757 struct dpseci_rx_queue_cfg cfg;
1760 PMD_INIT_FUNC_TRACE();
1762 /* If qp is already in use free ring memory and qp metadata. */
1763 if (dev->data->queue_pairs[qp_id] != NULL) {
1764 DPAA2_SEC_INFO("QP already setup");
1768 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1769 dev, qp_id, qp_conf);
1771 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1773 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1774 RTE_CACHE_LINE_SIZE);
1776 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1780 qp->rx_vq.crypto_data = dev->data;
1781 qp->tx_vq.crypto_data = dev->data;
1782 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1783 sizeof(struct queue_storage_info_t),
1784 RTE_CACHE_LINE_SIZE);
1785 if (!qp->rx_vq.q_storage) {
1786 DPAA2_SEC_ERR("malloc failed for q_storage");
1789 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1791 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1792 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1796 dev->data->queue_pairs[qp_id] = qp;
1798 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1799 cfg.user_ctx = (size_t)(&qp->rx_vq);
1800 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1805 /** Returns the size of the aesni gcm session structure */
1807 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1809 PMD_INIT_FUNC_TRACE();
1811 return sizeof(dpaa2_sec_session);
1815 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1816 struct rte_crypto_sym_xform *xform,
1817 dpaa2_sec_session *session)
1819 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1820 struct alginfo cipherdata;
1821 int bufsize, ret = 0;
1822 struct ctxt_priv *priv;
1823 struct sec_flow_context *flc;
1825 PMD_INIT_FUNC_TRACE();
1827 /* For SEC CIPHER only one descriptor is required. */
1828 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1829 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1830 RTE_CACHE_LINE_SIZE);
1832 DPAA2_SEC_ERR("No Memory for priv CTXT");
1836 priv->fle_pool = dev_priv->fle_pool;
1838 flc = &priv->flc_desc[0].flc;
1840 session->ctxt_type = DPAA2_SEC_CIPHER;
1841 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1842 RTE_CACHE_LINE_SIZE);
1843 if (session->cipher_key.data == NULL) {
1844 DPAA2_SEC_ERR("No Memory for cipher key");
1848 session->cipher_key.length = xform->cipher.key.length;
1850 memcpy(session->cipher_key.data, xform->cipher.key.data,
1851 xform->cipher.key.length);
1852 cipherdata.key = (size_t)session->cipher_key.data;
1853 cipherdata.keylen = session->cipher_key.length;
1854 cipherdata.key_enc_flags = 0;
1855 cipherdata.key_type = RTA_DATA_IMM;
1857 /* Set IV parameters */
1858 session->iv.offset = xform->cipher.iv.offset;
1859 session->iv.length = xform->cipher.iv.length;
1860 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1863 switch (xform->cipher.algo) {
1864 case RTE_CRYPTO_CIPHER_AES_CBC:
1865 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1866 cipherdata.algmode = OP_ALG_AAI_CBC;
1867 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1868 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1869 SHR_NEVER, &cipherdata,
1873 case RTE_CRYPTO_CIPHER_3DES_CBC:
1874 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1875 cipherdata.algmode = OP_ALG_AAI_CBC;
1876 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1877 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1878 SHR_NEVER, &cipherdata,
1882 case RTE_CRYPTO_CIPHER_DES_CBC:
1883 cipherdata.algtype = OP_ALG_ALGSEL_DES;
1884 cipherdata.algmode = OP_ALG_AAI_CBC;
1885 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1886 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1887 SHR_NEVER, &cipherdata,
1891 case RTE_CRYPTO_CIPHER_AES_CTR:
1892 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1893 cipherdata.algmode = OP_ALG_AAI_CTR;
1894 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1895 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1896 SHR_NEVER, &cipherdata,
1900 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1901 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1902 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1903 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1907 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1908 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1909 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1910 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1914 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1915 case RTE_CRYPTO_CIPHER_AES_F8:
1916 case RTE_CRYPTO_CIPHER_AES_ECB:
1917 case RTE_CRYPTO_CIPHER_3DES_ECB:
1918 case RTE_CRYPTO_CIPHER_3DES_CTR:
1919 case RTE_CRYPTO_CIPHER_AES_XTS:
1920 case RTE_CRYPTO_CIPHER_ARC4:
1921 case RTE_CRYPTO_CIPHER_NULL:
1922 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1923 xform->cipher.algo);
1927 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1928 xform->cipher.algo);
1934 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1939 flc->word1_sdl = (uint8_t)bufsize;
1940 session->ctxt = priv;
1942 #ifdef CAAM_DESC_DEBUG
1944 for (i = 0; i < bufsize; i++)
1945 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1950 rte_free(session->cipher_key.data);
1956 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1957 struct rte_crypto_sym_xform *xform,
1958 dpaa2_sec_session *session)
1960 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1961 struct alginfo authdata;
1962 int bufsize, ret = 0;
1963 struct ctxt_priv *priv;
1964 struct sec_flow_context *flc;
1966 PMD_INIT_FUNC_TRACE();
1968 /* For SEC AUTH three descriptors are required for various stages */
1969 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1970 sizeof(struct ctxt_priv) + 3 *
1971 sizeof(struct sec_flc_desc),
1972 RTE_CACHE_LINE_SIZE);
1974 DPAA2_SEC_ERR("No Memory for priv CTXT");
1978 priv->fle_pool = dev_priv->fle_pool;
1979 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1981 session->ctxt_type = DPAA2_SEC_AUTH;
1982 session->auth_key.length = xform->auth.key.length;
1983 if (xform->auth.key.length) {
1984 session->auth_key.data = rte_zmalloc(NULL,
1985 xform->auth.key.length,
1986 RTE_CACHE_LINE_SIZE);
1987 if (session->auth_key.data == NULL) {
1988 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1992 memcpy(session->auth_key.data, xform->auth.key.data,
1993 xform->auth.key.length);
1994 authdata.key = (size_t)session->auth_key.data;
1995 authdata.key_enc_flags = 0;
1996 authdata.key_type = RTA_DATA_IMM;
1998 authdata.keylen = session->auth_key.length;
2000 session->digest_length = xform->auth.digest_length;
2001 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2004 switch (xform->auth.algo) {
2005 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2006 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2007 authdata.algmode = OP_ALG_AAI_HMAC;
2008 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2009 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2010 1, 0, SHR_NEVER, &authdata,
2012 session->digest_length);
2014 case RTE_CRYPTO_AUTH_MD5_HMAC:
2015 authdata.algtype = OP_ALG_ALGSEL_MD5;
2016 authdata.algmode = OP_ALG_AAI_HMAC;
2017 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2018 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2019 1, 0, SHR_NEVER, &authdata,
2021 session->digest_length);
2023 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2024 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2025 authdata.algmode = OP_ALG_AAI_HMAC;
2026 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2027 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2028 1, 0, SHR_NEVER, &authdata,
2030 session->digest_length);
2032 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2033 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2034 authdata.algmode = OP_ALG_AAI_HMAC;
2035 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2036 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2037 1, 0, SHR_NEVER, &authdata,
2039 session->digest_length);
2041 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2042 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2043 authdata.algmode = OP_ALG_AAI_HMAC;
2044 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2045 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2046 1, 0, SHR_NEVER, &authdata,
2048 session->digest_length);
2050 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2051 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2052 authdata.algmode = OP_ALG_AAI_HMAC;
2053 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2054 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2055 1, 0, SHR_NEVER, &authdata,
2057 session->digest_length);
2059 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2060 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2061 authdata.algmode = OP_ALG_AAI_F9;
2062 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2063 session->iv.offset = xform->auth.iv.offset;
2064 session->iv.length = xform->auth.iv.length;
2065 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2068 session->digest_length);
2070 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2071 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2072 authdata.algmode = OP_ALG_AAI_F9;
2073 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2074 session->iv.offset = xform->auth.iv.offset;
2075 session->iv.length = xform->auth.iv.length;
2076 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2079 session->digest_length);
2081 case RTE_CRYPTO_AUTH_SHA1:
2082 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2083 authdata.algmode = OP_ALG_AAI_HASH;
2084 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2085 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2086 1, 0, SHR_NEVER, &authdata,
2088 session->digest_length);
2090 case RTE_CRYPTO_AUTH_MD5:
2091 authdata.algtype = OP_ALG_ALGSEL_MD5;
2092 authdata.algmode = OP_ALG_AAI_HASH;
2093 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2094 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2095 1, 0, SHR_NEVER, &authdata,
2097 session->digest_length);
2099 case RTE_CRYPTO_AUTH_SHA256:
2100 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2101 authdata.algmode = OP_ALG_AAI_HASH;
2102 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2103 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2104 1, 0, SHR_NEVER, &authdata,
2106 session->digest_length);
2108 case RTE_CRYPTO_AUTH_SHA384:
2109 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2110 authdata.algmode = OP_ALG_AAI_HASH;
2111 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2112 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2113 1, 0, SHR_NEVER, &authdata,
2115 session->digest_length);
2117 case RTE_CRYPTO_AUTH_SHA512:
2118 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2119 authdata.algmode = OP_ALG_AAI_HASH;
2120 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2121 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2122 1, 0, SHR_NEVER, &authdata,
2124 session->digest_length);
2126 case RTE_CRYPTO_AUTH_SHA224:
2127 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2128 authdata.algmode = OP_ALG_AAI_HASH;
2129 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2130 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2131 1, 0, SHR_NEVER, &authdata,
2133 session->digest_length);
2135 case RTE_CRYPTO_AUTH_AES_GMAC:
2136 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2137 case RTE_CRYPTO_AUTH_AES_CMAC:
2138 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2139 case RTE_CRYPTO_AUTH_KASUMI_F9:
2140 case RTE_CRYPTO_AUTH_NULL:
2141 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2146 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2153 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2158 flc->word1_sdl = (uint8_t)bufsize;
2159 session->ctxt = priv;
2160 #ifdef CAAM_DESC_DEBUG
2162 for (i = 0; i < bufsize; i++)
2163 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2164 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2170 rte_free(session->auth_key.data);
2176 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2177 struct rte_crypto_sym_xform *xform,
2178 dpaa2_sec_session *session)
2180 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2181 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2182 struct alginfo aeaddata;
2184 struct ctxt_priv *priv;
2185 struct sec_flow_context *flc;
2186 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2189 PMD_INIT_FUNC_TRACE();
2191 /* Set IV parameters */
2192 session->iv.offset = aead_xform->iv.offset;
2193 session->iv.length = aead_xform->iv.length;
2194 session->ctxt_type = DPAA2_SEC_AEAD;
2196 /* For SEC AEAD only one descriptor is required */
2197 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2198 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2199 RTE_CACHE_LINE_SIZE);
2201 DPAA2_SEC_ERR("No Memory for priv CTXT");
2205 priv->fle_pool = dev_priv->fle_pool;
2206 flc = &priv->flc_desc[0].flc;
2208 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2209 RTE_CACHE_LINE_SIZE);
2210 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2211 DPAA2_SEC_ERR("No Memory for aead key");
2215 memcpy(session->aead_key.data, aead_xform->key.data,
2216 aead_xform->key.length);
2218 session->digest_length = aead_xform->digest_length;
2219 session->aead_key.length = aead_xform->key.length;
2220 ctxt->auth_only_len = aead_xform->aad_length;
2222 aeaddata.key = (size_t)session->aead_key.data;
2223 aeaddata.keylen = session->aead_key.length;
2224 aeaddata.key_enc_flags = 0;
2225 aeaddata.key_type = RTA_DATA_IMM;
2227 switch (aead_xform->algo) {
2228 case RTE_CRYPTO_AEAD_AES_GCM:
2229 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2230 aeaddata.algmode = OP_ALG_AAI_GCM;
2231 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2233 case RTE_CRYPTO_AEAD_AES_CCM:
2234 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2239 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2244 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2247 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2248 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2250 (unsigned int *)priv->flc_desc[0].desc,
2251 &priv->flc_desc[0].desc[1], 1);
2254 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2258 if (priv->flc_desc[0].desc[1] & 1) {
2259 aeaddata.key_type = RTA_DATA_IMM;
2261 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2262 aeaddata.key_type = RTA_DATA_PTR;
2264 priv->flc_desc[0].desc[0] = 0;
2265 priv->flc_desc[0].desc[1] = 0;
2267 if (session->dir == DIR_ENC)
2268 bufsize = cnstr_shdsc_gcm_encap(
2269 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2270 &aeaddata, session->iv.length,
2271 session->digest_length);
2273 bufsize = cnstr_shdsc_gcm_decap(
2274 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2275 &aeaddata, session->iv.length,
2276 session->digest_length);
2278 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2283 flc->word1_sdl = (uint8_t)bufsize;
2284 session->ctxt = priv;
2285 #ifdef CAAM_DESC_DEBUG
2287 for (i = 0; i < bufsize; i++)
2288 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2289 i, priv->flc_desc[0].desc[i]);
2294 rte_free(session->aead_key.data);
2301 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2302 struct rte_crypto_sym_xform *xform,
2303 dpaa2_sec_session *session)
2305 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2306 struct alginfo authdata, cipherdata;
2308 struct ctxt_priv *priv;
2309 struct sec_flow_context *flc;
2310 struct rte_crypto_cipher_xform *cipher_xform;
2311 struct rte_crypto_auth_xform *auth_xform;
2314 PMD_INIT_FUNC_TRACE();
2316 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2317 cipher_xform = &xform->cipher;
2318 auth_xform = &xform->next->auth;
2319 session->ctxt_type =
2320 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2321 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2323 cipher_xform = &xform->next->cipher;
2324 auth_xform = &xform->auth;
2325 session->ctxt_type =
2326 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2327 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2330 /* Set IV parameters */
2331 session->iv.offset = cipher_xform->iv.offset;
2332 session->iv.length = cipher_xform->iv.length;
2334 /* For SEC AEAD only one descriptor is required */
2335 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2336 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2337 RTE_CACHE_LINE_SIZE);
2339 DPAA2_SEC_ERR("No Memory for priv CTXT");
2343 priv->fle_pool = dev_priv->fle_pool;
2344 flc = &priv->flc_desc[0].flc;
2346 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2347 RTE_CACHE_LINE_SIZE);
2348 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2349 DPAA2_SEC_ERR("No Memory for cipher key");
2353 session->cipher_key.length = cipher_xform->key.length;
2354 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2355 RTE_CACHE_LINE_SIZE);
2356 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2357 DPAA2_SEC_ERR("No Memory for auth key");
2358 rte_free(session->cipher_key.data);
2362 session->auth_key.length = auth_xform->key.length;
2363 memcpy(session->cipher_key.data, cipher_xform->key.data,
2364 cipher_xform->key.length);
2365 memcpy(session->auth_key.data, auth_xform->key.data,
2366 auth_xform->key.length);
2368 authdata.key = (size_t)session->auth_key.data;
2369 authdata.keylen = session->auth_key.length;
2370 authdata.key_enc_flags = 0;
2371 authdata.key_type = RTA_DATA_IMM;
2373 session->digest_length = auth_xform->digest_length;
2375 switch (auth_xform->algo) {
2376 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2377 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2378 authdata.algmode = OP_ALG_AAI_HMAC;
2379 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2381 case RTE_CRYPTO_AUTH_MD5_HMAC:
2382 authdata.algtype = OP_ALG_ALGSEL_MD5;
2383 authdata.algmode = OP_ALG_AAI_HMAC;
2384 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2386 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2387 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2388 authdata.algmode = OP_ALG_AAI_HMAC;
2389 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2391 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2392 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2393 authdata.algmode = OP_ALG_AAI_HMAC;
2394 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2396 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2397 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2398 authdata.algmode = OP_ALG_AAI_HMAC;
2399 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2401 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2402 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2403 authdata.algmode = OP_ALG_AAI_HMAC;
2404 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2406 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2407 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2408 case RTE_CRYPTO_AUTH_NULL:
2409 case RTE_CRYPTO_AUTH_SHA1:
2410 case RTE_CRYPTO_AUTH_SHA256:
2411 case RTE_CRYPTO_AUTH_SHA512:
2412 case RTE_CRYPTO_AUTH_SHA224:
2413 case RTE_CRYPTO_AUTH_SHA384:
2414 case RTE_CRYPTO_AUTH_MD5:
2415 case RTE_CRYPTO_AUTH_AES_GMAC:
2416 case RTE_CRYPTO_AUTH_KASUMI_F9:
2417 case RTE_CRYPTO_AUTH_AES_CMAC:
2418 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2419 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2420 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2425 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2430 cipherdata.key = (size_t)session->cipher_key.data;
2431 cipherdata.keylen = session->cipher_key.length;
2432 cipherdata.key_enc_flags = 0;
2433 cipherdata.key_type = RTA_DATA_IMM;
2435 switch (cipher_xform->algo) {
2436 case RTE_CRYPTO_CIPHER_AES_CBC:
2437 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2438 cipherdata.algmode = OP_ALG_AAI_CBC;
2439 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2441 case RTE_CRYPTO_CIPHER_3DES_CBC:
2442 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2443 cipherdata.algmode = OP_ALG_AAI_CBC;
2444 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2446 case RTE_CRYPTO_CIPHER_DES_CBC:
2447 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2448 cipherdata.algmode = OP_ALG_AAI_CBC;
2449 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2451 case RTE_CRYPTO_CIPHER_AES_CTR:
2452 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2453 cipherdata.algmode = OP_ALG_AAI_CTR;
2454 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2456 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2457 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2458 case RTE_CRYPTO_CIPHER_NULL:
2459 case RTE_CRYPTO_CIPHER_3DES_ECB:
2460 case RTE_CRYPTO_CIPHER_3DES_CTR:
2461 case RTE_CRYPTO_CIPHER_AES_ECB:
2462 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2463 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2464 cipher_xform->algo);
2468 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2469 cipher_xform->algo);
2473 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2476 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2477 priv->flc_desc[0].desc[1] = authdata.keylen;
2478 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2480 (unsigned int *)priv->flc_desc[0].desc,
2481 &priv->flc_desc[0].desc[2], 2);
2484 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2488 if (priv->flc_desc[0].desc[2] & 1) {
2489 cipherdata.key_type = RTA_DATA_IMM;
2491 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2492 cipherdata.key_type = RTA_DATA_PTR;
2494 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2495 authdata.key_type = RTA_DATA_IMM;
2497 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2498 authdata.key_type = RTA_DATA_PTR;
2500 priv->flc_desc[0].desc[0] = 0;
2501 priv->flc_desc[0].desc[1] = 0;
2502 priv->flc_desc[0].desc[2] = 0;
2504 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2505 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2507 &cipherdata, &authdata,
2509 session->digest_length,
2512 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2517 DPAA2_SEC_ERR("Hash before cipher not supported");
2522 flc->word1_sdl = (uint8_t)bufsize;
2523 session->ctxt = priv;
2524 #ifdef CAAM_DESC_DEBUG
2526 for (i = 0; i < bufsize; i++)
2527 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2528 i, priv->flc_desc[0].desc[i]);
2534 rte_free(session->cipher_key.data);
2535 rte_free(session->auth_key.data);
2541 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2542 struct rte_crypto_sym_xform *xform, void *sess)
2544 dpaa2_sec_session *session = sess;
2547 PMD_INIT_FUNC_TRACE();
2549 if (unlikely(sess == NULL)) {
2550 DPAA2_SEC_ERR("Invalid session struct");
2554 memset(session, 0, sizeof(dpaa2_sec_session));
2555 /* Default IV length = 0 */
2556 session->iv.length = 0;
2559 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2560 ret = dpaa2_sec_cipher_init(dev, xform, session);
2562 /* Authentication Only */
2563 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2564 xform->next == NULL) {
2565 ret = dpaa2_sec_auth_init(dev, xform, session);
2567 /* Cipher then Authenticate */
2568 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2569 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2570 session->ext_params.aead_ctxt.auth_cipher_text = true;
2571 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2572 ret = dpaa2_sec_auth_init(dev, xform, session);
2573 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2574 ret = dpaa2_sec_cipher_init(dev, xform, session);
2576 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2577 /* Authenticate then Cipher */
2578 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2579 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2580 session->ext_params.aead_ctxt.auth_cipher_text = false;
2581 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2582 ret = dpaa2_sec_cipher_init(dev, xform, session);
2583 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2584 ret = dpaa2_sec_auth_init(dev, xform, session);
2586 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2587 /* AEAD operation for AES-GCM kind of Algorithms */
2588 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2589 xform->next == NULL) {
2590 ret = dpaa2_sec_aead_init(dev, xform, session);
2593 DPAA2_SEC_ERR("Invalid crypto type");
2600 #ifdef RTE_LIBRTE_SECURITY
2602 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2603 dpaa2_sec_session *session,
2604 struct alginfo *aeaddata)
2606 PMD_INIT_FUNC_TRACE();
2608 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2609 RTE_CACHE_LINE_SIZE);
2610 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2611 DPAA2_SEC_ERR("No Memory for aead key");
2614 memcpy(session->aead_key.data, aead_xform->key.data,
2615 aead_xform->key.length);
2617 session->digest_length = aead_xform->digest_length;
2618 session->aead_key.length = aead_xform->key.length;
2620 aeaddata->key = (size_t)session->aead_key.data;
2621 aeaddata->keylen = session->aead_key.length;
2622 aeaddata->key_enc_flags = 0;
2623 aeaddata->key_type = RTA_DATA_IMM;
2625 switch (aead_xform->algo) {
2626 case RTE_CRYPTO_AEAD_AES_GCM:
2627 switch (session->digest_length) {
2629 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2632 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2635 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2638 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2639 session->digest_length);
2642 aeaddata->algmode = OP_ALG_AAI_GCM;
2643 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2645 case RTE_CRYPTO_AEAD_AES_CCM:
2646 switch (session->digest_length) {
2648 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2651 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2654 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2657 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2658 session->digest_length);
2661 aeaddata->algmode = OP_ALG_AAI_CCM;
2662 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2665 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2669 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2676 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2677 struct rte_crypto_auth_xform *auth_xform,
2678 dpaa2_sec_session *session,
2679 struct alginfo *cipherdata,
2680 struct alginfo *authdata)
2683 session->cipher_key.data = rte_zmalloc(NULL,
2684 cipher_xform->key.length,
2685 RTE_CACHE_LINE_SIZE);
2686 if (session->cipher_key.data == NULL &&
2687 cipher_xform->key.length > 0) {
2688 DPAA2_SEC_ERR("No Memory for cipher key");
2692 session->cipher_key.length = cipher_xform->key.length;
2693 memcpy(session->cipher_key.data, cipher_xform->key.data,
2694 cipher_xform->key.length);
2695 session->cipher_alg = cipher_xform->algo;
2697 session->cipher_key.data = NULL;
2698 session->cipher_key.length = 0;
2699 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2703 session->auth_key.data = rte_zmalloc(NULL,
2704 auth_xform->key.length,
2705 RTE_CACHE_LINE_SIZE);
2706 if (session->auth_key.data == NULL &&
2707 auth_xform->key.length > 0) {
2708 DPAA2_SEC_ERR("No Memory for auth key");
2711 session->auth_key.length = auth_xform->key.length;
2712 memcpy(session->auth_key.data, auth_xform->key.data,
2713 auth_xform->key.length);
2714 session->auth_alg = auth_xform->algo;
2715 session->digest_length = auth_xform->digest_length;
2717 session->auth_key.data = NULL;
2718 session->auth_key.length = 0;
2719 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2722 authdata->key = (size_t)session->auth_key.data;
2723 authdata->keylen = session->auth_key.length;
2724 authdata->key_enc_flags = 0;
2725 authdata->key_type = RTA_DATA_IMM;
2726 switch (session->auth_alg) {
2727 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2728 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2729 authdata->algmode = OP_ALG_AAI_HMAC;
2731 case RTE_CRYPTO_AUTH_MD5_HMAC:
2732 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2733 authdata->algmode = OP_ALG_AAI_HMAC;
2735 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2736 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2737 authdata->algmode = OP_ALG_AAI_HMAC;
2738 if (session->digest_length != 16)
2740 "+++Using sha256-hmac truncated len is non-standard,"
2741 "it will not work with lookaside proto");
2743 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2744 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2745 authdata->algmode = OP_ALG_AAI_HMAC;
2747 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2748 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2749 authdata->algmode = OP_ALG_AAI_HMAC;
2751 case RTE_CRYPTO_AUTH_AES_CMAC:
2752 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2754 case RTE_CRYPTO_AUTH_NULL:
2755 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2757 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2758 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2759 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2760 case RTE_CRYPTO_AUTH_SHA1:
2761 case RTE_CRYPTO_AUTH_SHA256:
2762 case RTE_CRYPTO_AUTH_SHA512:
2763 case RTE_CRYPTO_AUTH_SHA224:
2764 case RTE_CRYPTO_AUTH_SHA384:
2765 case RTE_CRYPTO_AUTH_MD5:
2766 case RTE_CRYPTO_AUTH_AES_GMAC:
2767 case RTE_CRYPTO_AUTH_KASUMI_F9:
2768 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2769 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2770 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2774 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2778 cipherdata->key = (size_t)session->cipher_key.data;
2779 cipherdata->keylen = session->cipher_key.length;
2780 cipherdata->key_enc_flags = 0;
2781 cipherdata->key_type = RTA_DATA_IMM;
2783 switch (session->cipher_alg) {
2784 case RTE_CRYPTO_CIPHER_AES_CBC:
2785 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2786 cipherdata->algmode = OP_ALG_AAI_CBC;
2788 case RTE_CRYPTO_CIPHER_3DES_CBC:
2789 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2790 cipherdata->algmode = OP_ALG_AAI_CBC;
2792 case RTE_CRYPTO_CIPHER_DES_CBC:
2793 cipherdata->algtype = OP_PCL_IPSEC_DES;
2794 cipherdata->algmode = OP_ALG_AAI_CBC;
2796 case RTE_CRYPTO_CIPHER_AES_CTR:
2797 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2798 cipherdata->algmode = OP_ALG_AAI_CTR;
2800 case RTE_CRYPTO_CIPHER_NULL:
2801 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2803 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2804 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2805 case RTE_CRYPTO_CIPHER_3DES_ECB:
2806 case RTE_CRYPTO_CIPHER_3DES_CTR:
2807 case RTE_CRYPTO_CIPHER_AES_ECB:
2808 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2809 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2810 session->cipher_alg);
2813 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2814 session->cipher_alg);
2821 #ifdef RTE_LIBRTE_SECURITY_TEST
2822 static uint8_t aes_cbc_iv[] = {
2823 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2824 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2828 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2829 struct rte_security_session_conf *conf,
2832 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2833 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2834 struct rte_crypto_auth_xform *auth_xform = NULL;
2835 struct rte_crypto_aead_xform *aead_xform = NULL;
2836 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2837 struct ctxt_priv *priv;
2838 struct alginfo authdata, cipherdata;
2840 struct sec_flow_context *flc;
2841 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2844 PMD_INIT_FUNC_TRACE();
2846 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2847 sizeof(struct ctxt_priv) +
2848 sizeof(struct sec_flc_desc),
2849 RTE_CACHE_LINE_SIZE);
2852 DPAA2_SEC_ERR("No memory for priv CTXT");
2856 priv->fle_pool = dev_priv->fle_pool;
2857 flc = &priv->flc_desc[0].flc;
2859 memset(session, 0, sizeof(dpaa2_sec_session));
2861 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2862 cipher_xform = &conf->crypto_xform->cipher;
2863 if (conf->crypto_xform->next)
2864 auth_xform = &conf->crypto_xform->next->auth;
2865 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2866 session, &cipherdata, &authdata);
2867 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2868 auth_xform = &conf->crypto_xform->auth;
2869 if (conf->crypto_xform->next)
2870 cipher_xform = &conf->crypto_xform->next->cipher;
2871 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2872 session, &cipherdata, &authdata);
2873 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2874 aead_xform = &conf->crypto_xform->aead;
2875 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2876 session, &cipherdata);
2877 authdata.keylen = 0;
2878 authdata.algtype = 0;
2880 DPAA2_SEC_ERR("XFORM not specified");
2885 DPAA2_SEC_ERR("Failed to process xform");
2889 session->ctxt_type = DPAA2_SEC_IPSEC;
2890 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2891 uint8_t *hdr = NULL;
2893 struct rte_ipv6_hdr ip6_hdr;
2894 struct ipsec_encap_pdb encap_pdb;
2896 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2897 /* For Sec Proto only one descriptor is required. */
2898 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2900 /* copy algo specific data to PDB */
2901 switch (cipherdata.algtype) {
2902 case OP_PCL_IPSEC_AES_CTR:
2903 encap_pdb.ctr.ctr_initial = 0x00000001;
2904 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2906 case OP_PCL_IPSEC_AES_GCM8:
2907 case OP_PCL_IPSEC_AES_GCM12:
2908 case OP_PCL_IPSEC_AES_GCM16:
2909 memcpy(encap_pdb.gcm.salt,
2910 (uint8_t *)&(ipsec_xform->salt), 4);
2914 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2915 PDBOPTS_ESP_OIHI_PDB_INL |
2917 PDBHMO_ESP_ENCAP_DTTL |
2919 if (ipsec_xform->options.esn)
2920 encap_pdb.options |= PDBOPTS_ESP_ESN;
2921 encap_pdb.spi = ipsec_xform->spi;
2922 session->dir = DIR_ENC;
2923 if (ipsec_xform->tunnel.type ==
2924 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2925 encap_pdb.ip_hdr_len = sizeof(struct ip);
2926 ip4_hdr.ip_v = IPVERSION;
2928 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2929 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2932 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2933 ip4_hdr.ip_p = IPPROTO_ESP;
2935 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2936 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2937 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2938 &ip4_hdr, sizeof(struct ip));
2939 hdr = (uint8_t *)&ip4_hdr;
2940 } else if (ipsec_xform->tunnel.type ==
2941 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2942 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2943 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2944 ((ipsec_xform->tunnel.ipv6.dscp <<
2945 RTE_IPV6_HDR_TC_SHIFT) &
2946 RTE_IPV6_HDR_TC_MASK) |
2947 ((ipsec_xform->tunnel.ipv6.flabel <<
2948 RTE_IPV6_HDR_FL_SHIFT) &
2949 RTE_IPV6_HDR_FL_MASK));
2950 /* Payload length will be updated by HW */
2951 ip6_hdr.payload_len = 0;
2952 ip6_hdr.hop_limits =
2953 ipsec_xform->tunnel.ipv6.hlimit;
2954 ip6_hdr.proto = (ipsec_xform->proto ==
2955 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2956 IPPROTO_ESP : IPPROTO_AH;
2957 memcpy(&ip6_hdr.src_addr,
2958 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2959 memcpy(&ip6_hdr.dst_addr,
2960 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2961 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2962 hdr = (uint8_t *)&ip6_hdr;
2965 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2966 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2967 SHR_WAIT : SHR_SERIAL, &encap_pdb,
2968 hdr, &cipherdata, &authdata);
2969 } else if (ipsec_xform->direction ==
2970 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2971 struct ipsec_decap_pdb decap_pdb;
2973 flc->dhr = SEC_FLC_DHR_INBOUND;
2974 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2975 /* copy algo specific data to PDB */
2976 switch (cipherdata.algtype) {
2977 case OP_PCL_IPSEC_AES_CTR:
2978 decap_pdb.ctr.ctr_initial = 0x00000001;
2979 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2981 case OP_PCL_IPSEC_AES_GCM8:
2982 case OP_PCL_IPSEC_AES_GCM12:
2983 case OP_PCL_IPSEC_AES_GCM16:
2984 memcpy(decap_pdb.gcm.salt,
2985 (uint8_t *)&(ipsec_xform->salt), 4);
2989 decap_pdb.options = (ipsec_xform->tunnel.type ==
2990 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2991 sizeof(struct ip) << 16 :
2992 sizeof(struct rte_ipv6_hdr) << 16;
2993 if (ipsec_xform->options.esn)
2994 decap_pdb.options |= PDBOPTS_ESP_ESN;
2996 if (ipsec_xform->replay_win_sz) {
2998 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3000 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3001 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3011 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3014 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3017 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3020 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3023 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3027 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3030 session->dir = DIR_DEC;
3031 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3032 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3033 SHR_WAIT : SHR_SERIAL,
3034 &decap_pdb, &cipherdata, &authdata);
3039 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3043 flc->word1_sdl = (uint8_t)bufsize;
3045 /* Enable the stashing control bit */
3046 DPAA2_SET_FLC_RSC(flc);
3047 flc->word2_rflc_31_0 = lower_32_bits(
3048 (size_t)&(((struct dpaa2_sec_qp *)
3049 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3050 flc->word3_rflc_63_32 = upper_32_bits(
3051 (size_t)&(((struct dpaa2_sec_qp *)
3052 dev->data->queue_pairs[0])->rx_vq));
3054 /* Set EWS bit i.e. enable write-safe */
3055 DPAA2_SET_FLC_EWS(flc);
3056 /* Set BS = 1 i.e reuse input buffers as output buffers */
3057 DPAA2_SET_FLC_REUSE_BS(flc);
3058 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3059 DPAA2_SET_FLC_REUSE_FF(flc);
3061 session->ctxt = priv;
3065 rte_free(session->auth_key.data);
3066 rte_free(session->cipher_key.data);
3072 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3073 struct rte_security_session_conf *conf,
3076 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3077 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3078 struct rte_crypto_auth_xform *auth_xform = NULL;
3079 struct rte_crypto_cipher_xform *cipher_xform;
3080 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3081 struct ctxt_priv *priv;
3082 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3083 struct alginfo authdata, cipherdata;
3084 struct alginfo *p_authdata = NULL;
3086 struct sec_flow_context *flc;
3087 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3093 PMD_INIT_FUNC_TRACE();
3095 memset(session, 0, sizeof(dpaa2_sec_session));
3097 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3098 sizeof(struct ctxt_priv) +
3099 sizeof(struct sec_flc_desc),
3100 RTE_CACHE_LINE_SIZE);
3103 DPAA2_SEC_ERR("No memory for priv CTXT");
3107 priv->fle_pool = dev_priv->fle_pool;
3108 flc = &priv->flc_desc[0].flc;
3110 /* find xfrm types */
3111 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
3112 cipher_xform = &xform->cipher;
3113 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
3114 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3115 session->ext_params.aead_ctxt.auth_cipher_text = true;
3116 cipher_xform = &xform->cipher;
3117 auth_xform = &xform->next->auth;
3118 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
3119 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3120 session->ext_params.aead_ctxt.auth_cipher_text = false;
3121 cipher_xform = &xform->next->cipher;
3122 auth_xform = &xform->auth;
3124 DPAA2_SEC_ERR("Invalid crypto type");
3128 session->ctxt_type = DPAA2_SEC_PDCP;
3130 session->cipher_key.data = rte_zmalloc(NULL,
3131 cipher_xform->key.length,
3132 RTE_CACHE_LINE_SIZE);
3133 if (session->cipher_key.data == NULL &&
3134 cipher_xform->key.length > 0) {
3135 DPAA2_SEC_ERR("No Memory for cipher key");
3139 session->cipher_key.length = cipher_xform->key.length;
3140 memcpy(session->cipher_key.data, cipher_xform->key.data,
3141 cipher_xform->key.length);
3143 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3145 session->cipher_alg = cipher_xform->algo;
3147 session->cipher_key.data = NULL;
3148 session->cipher_key.length = 0;
3149 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3150 session->dir = DIR_ENC;
3153 session->pdcp.domain = pdcp_xform->domain;
3154 session->pdcp.bearer = pdcp_xform->bearer;
3155 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3156 session->pdcp.sn_size = pdcp_xform->sn_size;
3157 session->pdcp.hfn = pdcp_xform->hfn;
3158 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3159 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3160 /* hfv ovd offset location is stored in iv.offset value*/
3161 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3163 cipherdata.key = (size_t)session->cipher_key.data;
3164 cipherdata.keylen = session->cipher_key.length;
3165 cipherdata.key_enc_flags = 0;
3166 cipherdata.key_type = RTA_DATA_IMM;
3168 switch (session->cipher_alg) {
3169 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3170 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3172 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3173 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3175 case RTE_CRYPTO_CIPHER_AES_CTR:
3176 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3178 case RTE_CRYPTO_CIPHER_NULL:
3179 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3182 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3183 session->cipher_alg);
3188 session->auth_key.data = rte_zmalloc(NULL,
3189 auth_xform->key.length,
3190 RTE_CACHE_LINE_SIZE);
3191 if (!session->auth_key.data &&
3192 auth_xform->key.length > 0) {
3193 DPAA2_SEC_ERR("No Memory for auth key");
3194 rte_free(session->cipher_key.data);
3198 session->auth_key.length = auth_xform->key.length;
3199 memcpy(session->auth_key.data, auth_xform->key.data,
3200 auth_xform->key.length);
3201 session->auth_alg = auth_xform->algo;
3203 session->auth_key.data = NULL;
3204 session->auth_key.length = 0;
3205 session->auth_alg = 0;
3207 authdata.key = (size_t)session->auth_key.data;
3208 authdata.keylen = session->auth_key.length;
3209 authdata.key_enc_flags = 0;
3210 authdata.key_type = RTA_DATA_IMM;
3212 if (session->auth_alg) {
3213 switch (session->auth_alg) {
3214 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3215 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3217 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3218 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3220 case RTE_CRYPTO_AUTH_AES_CMAC:
3221 authdata.algtype = PDCP_AUTH_TYPE_AES;
3223 case RTE_CRYPTO_AUTH_NULL:
3224 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3227 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3232 p_authdata = &authdata;
3233 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3234 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3238 if (rta_inline_pdcp_query(authdata.algtype,
3240 session->pdcp.sn_size,
3241 session->pdcp.hfn_ovd)) {
3242 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3243 cipherdata.key_type = RTA_DATA_PTR;
3246 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3247 if (session->dir == DIR_ENC)
3248 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3249 priv->flc_desc[0].desc, 1, swap,
3251 session->pdcp.sn_size,
3253 pdcp_xform->pkt_dir,
3254 pdcp_xform->hfn_threshold,
3255 &cipherdata, &authdata,
3257 else if (session->dir == DIR_DEC)
3258 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3259 priv->flc_desc[0].desc, 1, swap,
3261 session->pdcp.sn_size,
3263 pdcp_xform->pkt_dir,
3264 pdcp_xform->hfn_threshold,
3265 &cipherdata, &authdata,
3268 if (session->dir == DIR_ENC) {
3269 if (pdcp_xform->sdap_enabled)
3270 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3271 priv->flc_desc[0].desc, 1, swap,
3272 session->pdcp.sn_size,
3275 pdcp_xform->pkt_dir,
3276 pdcp_xform->hfn_threshold,
3277 &cipherdata, p_authdata, 0);
3279 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3280 priv->flc_desc[0].desc, 1, swap,
3281 session->pdcp.sn_size,
3284 pdcp_xform->pkt_dir,
3285 pdcp_xform->hfn_threshold,
3286 &cipherdata, p_authdata, 0);
3287 } else if (session->dir == DIR_DEC) {
3288 if (pdcp_xform->sdap_enabled)
3289 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3290 priv->flc_desc[0].desc, 1, swap,
3291 session->pdcp.sn_size,
3294 pdcp_xform->pkt_dir,
3295 pdcp_xform->hfn_threshold,
3296 &cipherdata, p_authdata, 0);
3298 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3299 priv->flc_desc[0].desc, 1, swap,
3300 session->pdcp.sn_size,
3303 pdcp_xform->pkt_dir,
3304 pdcp_xform->hfn_threshold,
3305 &cipherdata, p_authdata, 0);
3310 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3314 /* Enable the stashing control bit */
3315 DPAA2_SET_FLC_RSC(flc);
3316 flc->word2_rflc_31_0 = lower_32_bits(
3317 (size_t)&(((struct dpaa2_sec_qp *)
3318 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3319 flc->word3_rflc_63_32 = upper_32_bits(
3320 (size_t)&(((struct dpaa2_sec_qp *)
3321 dev->data->queue_pairs[0])->rx_vq));
3323 flc->word1_sdl = (uint8_t)bufsize;
3325 /* TODO - check the perf impact or
3326 * align as per descriptor type
3327 * Set EWS bit i.e. enable write-safe
3328 * DPAA2_SET_FLC_EWS(flc);
3331 /* Set BS = 1 i.e reuse input buffers as output buffers */
3332 DPAA2_SET_FLC_REUSE_BS(flc);
3333 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3334 DPAA2_SET_FLC_REUSE_FF(flc);
3336 session->ctxt = priv;
3340 rte_free(session->auth_key.data);
3341 rte_free(session->cipher_key.data);
3347 dpaa2_sec_security_session_create(void *dev,
3348 struct rte_security_session_conf *conf,
3349 struct rte_security_session *sess,
3350 struct rte_mempool *mempool)
3352 void *sess_private_data;
3353 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3356 if (rte_mempool_get(mempool, &sess_private_data)) {
3357 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3361 switch (conf->protocol) {
3362 case RTE_SECURITY_PROTOCOL_IPSEC:
3363 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3366 case RTE_SECURITY_PROTOCOL_MACSEC:
3368 case RTE_SECURITY_PROTOCOL_PDCP:
3369 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3376 DPAA2_SEC_ERR("Failed to configure session parameters");
3377 /* Return session to mempool */
3378 rte_mempool_put(mempool, sess_private_data);
3382 set_sec_session_private_data(sess, sess_private_data);
3387 /** Clear the memory of session so it doesn't leave key material behind */
3389 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3390 struct rte_security_session *sess)
3392 PMD_INIT_FUNC_TRACE();
3393 void *sess_priv = get_sec_session_private_data(sess);
3395 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3398 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3401 rte_free(s->cipher_key.data);
3402 rte_free(s->auth_key.data);
3403 memset(s, 0, sizeof(dpaa2_sec_session));
3404 set_sec_session_private_data(sess, NULL);
3405 rte_mempool_put(sess_mp, sess_priv);
3411 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3412 struct rte_crypto_sym_xform *xform,
3413 struct rte_cryptodev_sym_session *sess,
3414 struct rte_mempool *mempool)
3416 void *sess_private_data;
3419 if (rte_mempool_get(mempool, &sess_private_data)) {
3420 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3424 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3426 DPAA2_SEC_ERR("Failed to configure session parameters");
3427 /* Return session to mempool */
3428 rte_mempool_put(mempool, sess_private_data);
3432 set_sym_session_private_data(sess, dev->driver_id,
3438 /** Clear the memory of session so it doesn't leave key material behind */
3440 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3441 struct rte_cryptodev_sym_session *sess)
3443 PMD_INIT_FUNC_TRACE();
3444 uint8_t index = dev->driver_id;
3445 void *sess_priv = get_sym_session_private_data(sess, index);
3446 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3450 rte_free(s->cipher_key.data);
3451 rte_free(s->auth_key.data);
3452 memset(s, 0, sizeof(dpaa2_sec_session));
3453 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3454 set_sym_session_private_data(sess, index, NULL);
3455 rte_mempool_put(sess_mp, sess_priv);
3460 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3461 struct rte_cryptodev_config *config __rte_unused)
3463 PMD_INIT_FUNC_TRACE();
3469 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3471 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3472 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3473 struct dpseci_attr attr;
3474 struct dpaa2_queue *dpaa2_q;
3475 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3476 dev->data->queue_pairs;
3477 struct dpseci_rx_queue_attr rx_attr;
3478 struct dpseci_tx_queue_attr tx_attr;
3481 PMD_INIT_FUNC_TRACE();
3483 memset(&attr, 0, sizeof(struct dpseci_attr));
3485 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3487 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3489 goto get_attr_failure;
3491 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3493 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3494 goto get_attr_failure;
3496 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3497 dpaa2_q = &qp[i]->rx_vq;
3498 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3500 dpaa2_q->fqid = rx_attr.fqid;
3501 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3503 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3504 dpaa2_q = &qp[i]->tx_vq;
3505 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3507 dpaa2_q->fqid = tx_attr.fqid;
3508 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3513 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3518 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3520 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3521 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3524 PMD_INIT_FUNC_TRACE();
3526 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3528 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3533 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3535 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3541 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3543 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3544 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3547 PMD_INIT_FUNC_TRACE();
3549 /* Function is reverse of dpaa2_sec_dev_init.
3550 * It does the following:
3551 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3552 * 2. Close the DPSECI device
3553 * 3. Free the allocated resources.
3556 /*Close the device at underlying layer*/
3557 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3559 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3563 /*Free the allocated memory for ethernet private data and dpseci*/
3571 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3572 struct rte_cryptodev_info *info)
3574 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3576 PMD_INIT_FUNC_TRACE();
3578 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3579 info->feature_flags = dev->feature_flags;
3580 info->capabilities = dpaa2_sec_capabilities;
3581 /* No limit of number of sessions */
3582 info->sym.max_nb_sessions = 0;
3583 info->driver_id = cryptodev_driver_id;
3588 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3589 struct rte_cryptodev_stats *stats)
3591 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3592 struct fsl_mc_io dpseci;
3593 struct dpseci_sec_counters counters = {0};
3594 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3595 dev->data->queue_pairs;
3598 PMD_INIT_FUNC_TRACE();
3599 if (stats == NULL) {
3600 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3603 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3604 if (qp == NULL || qp[i] == NULL) {
3605 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3609 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3610 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3611 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3612 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3615 /* In case as secondary process access stats, MCP portal in priv-hw
3616 * may have primary process address. Need the secondary process
3617 * based MCP portal address for this object.
3619 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3620 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3623 DPAA2_SEC_ERR("SEC counters failed");
3625 DPAA2_SEC_INFO("dpseci hardware stats:"
3626 "\n\tNum of Requests Dequeued = %" PRIu64
3627 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3628 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3629 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3630 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3631 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3632 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3633 counters.dequeued_requests,
3634 counters.ob_enc_requests,
3635 counters.ib_dec_requests,
3636 counters.ob_enc_bytes,
3637 counters.ob_prot_bytes,
3638 counters.ib_dec_bytes,
3639 counters.ib_valid_bytes);
3644 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3647 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3648 (dev->data->queue_pairs);
3650 PMD_INIT_FUNC_TRACE();
3652 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3653 if (qp[i] == NULL) {
3654 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3657 qp[i]->tx_vq.rx_pkts = 0;
3658 qp[i]->tx_vq.tx_pkts = 0;
3659 qp[i]->tx_vq.err_pkts = 0;
3660 qp[i]->rx_vq.rx_pkts = 0;
3661 qp[i]->rx_vq.tx_pkts = 0;
3662 qp[i]->rx_vq.err_pkts = 0;
3666 static void __rte_hot
3667 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3668 const struct qbman_fd *fd,
3669 const struct qbman_result *dq,
3670 struct dpaa2_queue *rxq,
3671 struct rte_event *ev)
3673 /* Prefetching mbuf */
3674 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3675 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3677 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3678 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3680 ev->flow_id = rxq->ev.flow_id;
3681 ev->sub_event_type = rxq->ev.sub_event_type;
3682 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3683 ev->op = RTE_EVENT_OP_NEW;
3684 ev->sched_type = rxq->ev.sched_type;
3685 ev->queue_id = rxq->ev.queue_id;
3686 ev->priority = rxq->ev.priority;
3687 ev->event_ptr = sec_fd_to_mbuf(fd);
3689 qbman_swp_dqrr_consume(swp, dq);
3692 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3693 const struct qbman_fd *fd,
3694 const struct qbman_result *dq,
3695 struct dpaa2_queue *rxq,
3696 struct rte_event *ev)
3699 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3700 /* Prefetching mbuf */
3701 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3702 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3704 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3705 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3707 ev->flow_id = rxq->ev.flow_id;
3708 ev->sub_event_type = rxq->ev.sub_event_type;
3709 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3710 ev->op = RTE_EVENT_OP_NEW;
3711 ev->sched_type = rxq->ev.sched_type;
3712 ev->queue_id = rxq->ev.queue_id;
3713 ev->priority = rxq->ev.priority;
3715 ev->event_ptr = sec_fd_to_mbuf(fd);
3716 dqrr_index = qbman_get_dqrr_idx(dq);
3717 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3718 DPAA2_PER_LCORE_DQRR_SIZE++;
3719 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3720 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3724 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3726 struct dpaa2_dpcon_dev *dpcon,
3727 const struct rte_event *event)
3729 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3730 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3731 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3732 struct dpseci_rx_queue_cfg cfg;
3736 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3737 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3738 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3739 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3743 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3744 (dpcon->num_priorities - 1);
3746 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3747 cfg.options = DPSECI_QUEUE_OPT_DEST;
3748 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3749 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3750 cfg.dest_cfg.priority = priority;
3752 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3753 cfg.user_ctx = (size_t)(qp);
3754 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3755 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3756 cfg.order_preservation_en = 1;
3758 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3761 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3765 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3771 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3774 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3775 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3776 struct dpseci_rx_queue_cfg cfg;
3779 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3780 cfg.options = DPSECI_QUEUE_OPT_DEST;
3781 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3783 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3786 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3791 static struct rte_cryptodev_ops crypto_ops = {
3792 .dev_configure = dpaa2_sec_dev_configure,
3793 .dev_start = dpaa2_sec_dev_start,
3794 .dev_stop = dpaa2_sec_dev_stop,
3795 .dev_close = dpaa2_sec_dev_close,
3796 .dev_infos_get = dpaa2_sec_dev_infos_get,
3797 .stats_get = dpaa2_sec_stats_get,
3798 .stats_reset = dpaa2_sec_stats_reset,
3799 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3800 .queue_pair_release = dpaa2_sec_queue_pair_release,
3801 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3802 .sym_session_configure = dpaa2_sec_sym_session_configure,
3803 .sym_session_clear = dpaa2_sec_sym_session_clear,
3806 #ifdef RTE_LIBRTE_SECURITY
3807 static const struct rte_security_capability *
3808 dpaa2_sec_capabilities_get(void *device __rte_unused)
3810 return dpaa2_sec_security_cap;
3813 static const struct rte_security_ops dpaa2_sec_security_ops = {
3814 .session_create = dpaa2_sec_security_session_create,
3815 .session_update = NULL,
3816 .session_stats_get = NULL,
3817 .session_destroy = dpaa2_sec_security_session_destroy,
3818 .set_pkt_metadata = NULL,
3819 .capabilities_get = dpaa2_sec_capabilities_get
3824 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3826 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3828 rte_free(dev->security_ctx);
3830 rte_mempool_free(internals->fle_pool);
3832 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3833 dev->data->name, rte_socket_id());
3839 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3841 struct dpaa2_sec_dev_private *internals;
3842 struct rte_device *dev = cryptodev->device;
3843 struct rte_dpaa2_device *dpaa2_dev;
3844 #ifdef RTE_LIBRTE_SECURITY
3845 struct rte_security_ctx *security_instance;
3847 struct fsl_mc_io *dpseci;
3849 struct dpseci_attr attr;
3853 PMD_INIT_FUNC_TRACE();
3854 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3855 hw_id = dpaa2_dev->object_id;
3857 cryptodev->driver_id = cryptodev_driver_id;
3858 cryptodev->dev_ops = &crypto_ops;
3860 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3861 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3862 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3863 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3864 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3865 RTE_CRYPTODEV_FF_SECURITY |
3866 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3867 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3868 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3869 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3870 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3872 internals = cryptodev->data->dev_private;
3875 * For secondary processes, we don't initialise any further as primary
3876 * has already done this work. Only check we don't need a different
3879 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3880 DPAA2_SEC_DEBUG("Device already init by primary process");
3883 #ifdef RTE_LIBRTE_SECURITY
3884 /* Initialize security_ctx only for primary process*/
3885 security_instance = rte_malloc("rte_security_instances_ops",
3886 sizeof(struct rte_security_ctx), 0);
3887 if (security_instance == NULL)
3889 security_instance->device = (void *)cryptodev;
3890 security_instance->ops = &dpaa2_sec_security_ops;
3891 security_instance->sess_cnt = 0;
3892 cryptodev->security_ctx = security_instance;
3894 /*Open the rte device via MC and save the handle for further use*/
3895 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3896 sizeof(struct fsl_mc_io), 0);
3899 "Error in allocating the memory for dpsec object");
3902 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3904 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3906 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3910 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3913 "Cannot get dpsec device attributed: Error = %x",
3917 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3920 internals->max_nb_queue_pairs = attr.num_tx_queues;
3921 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3922 internals->hw = dpseci;
3923 internals->token = token;
3925 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3926 getpid(), cryptodev->data->dev_id);
3927 internals->fle_pool = rte_mempool_create((const char *)str,
3930 FLE_POOL_CACHE_SIZE, 0,
3931 NULL, NULL, NULL, NULL,
3933 if (!internals->fle_pool) {
3934 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3938 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3942 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3944 /* dpaa2_sec_uninit(crypto_dev_name); */
3949 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3950 struct rte_dpaa2_device *dpaa2_dev)
3952 struct rte_cryptodev *cryptodev;
3953 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3957 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3958 dpaa2_dev->object_id);
3960 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3961 if (cryptodev == NULL)
3964 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3965 cryptodev->data->dev_private = rte_zmalloc_socket(
3966 "cryptodev private structure",
3967 sizeof(struct dpaa2_sec_dev_private),
3968 RTE_CACHE_LINE_SIZE,
3971 if (cryptodev->data->dev_private == NULL)
3972 rte_panic("Cannot allocate memzone for private "
3976 dpaa2_dev->cryptodev = cryptodev;
3977 cryptodev->device = &dpaa2_dev->device;
3979 /* init user callbacks */
3980 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3982 if (dpaa2_svr_family == SVR_LX2160A)
3983 rta_set_sec_era(RTA_SEC_ERA_10);
3985 rta_set_sec_era(RTA_SEC_ERA_8);
3987 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3989 /* Invoke PMD device initialization function */
3990 retval = dpaa2_sec_dev_init(cryptodev);
3994 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3995 rte_free(cryptodev->data->dev_private);
3997 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4003 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4005 struct rte_cryptodev *cryptodev;
4008 cryptodev = dpaa2_dev->cryptodev;
4009 if (cryptodev == NULL)
4012 ret = dpaa2_sec_uninit(cryptodev);
4016 return rte_cryptodev_pmd_destroy(cryptodev);
4019 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4020 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4021 .drv_type = DPAA2_CRYPTO,
4023 .name = "DPAA2 SEC PMD"
4025 .probe = cryptodev_dpaa2_sec_probe,
4026 .remove = cryptodev_dpaa2_sec_remove,
4029 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4031 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4032 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4033 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4034 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);