1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43 * a pointer to the shared descriptor
45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID 0x1957
47 #define FSL_DEVICE_ID 0x410
48 #define FSL_SUBSYSTEM_SEC 1
49 #define FSL_MC_DPSECI_DEVID 3
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS 32000
54 #define FLE_POOL_BUF_SIZE 256
55 #define FLE_POOL_CACHE_SIZE 512
56 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND -114
58 #define SEC_FLC_DHR_INBOUND 0
60 static uint8_t cryptodev_driver_id;
62 #ifdef RTE_LIB_SECURITY
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 struct rte_crypto_op *op,
66 struct qbman_fd *fd, uint16_t bpid)
68 struct rte_crypto_sym_op *sym_op = op->sym;
69 struct ctxt_priv *priv = sess->ctxt;
70 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 struct sec_flow_context *flc;
72 struct rte_mbuf *mbuf;
73 uint32_t in_len = 0, out_len = 0;
80 /* first FLE entry used to store mbuf and session ctxt */
81 fle = (struct qbman_fle *)rte_malloc(NULL,
82 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
85 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
88 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
92 /* Save the shared descriptor */
93 flc = &priv->flc_desc[0].flc;
99 if (likely(bpid < MAX_BPID)) {
100 DPAA2_SET_FD_BPID(fd, bpid);
101 DPAA2_SET_FLE_BPID(op_fle, bpid);
102 DPAA2_SET_FLE_BPID(ip_fle, bpid);
104 DPAA2_SET_FD_IVP(fd);
105 DPAA2_SET_FLE_IVP(op_fle);
106 DPAA2_SET_FLE_IVP(ip_fle);
109 /* Configure FD as a FRAME LIST */
110 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 DPAA2_SET_FD_COMPOUND_FMT(fd);
112 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
114 /* Configure Output FLE with Scatter/Gather Entry */
115 DPAA2_SET_FLE_SG_EXT(op_fle);
116 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
118 /* Configure Output SGE for Encap/Decap */
119 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
123 sge->length = mbuf->data_len;
124 out_len += sge->length;
127 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
130 /* using buf_len for last buf - so that extra data can be added */
131 sge->length = mbuf->buf_len - mbuf->data_off;
132 out_len += sge->length;
134 DPAA2_SET_FLE_FIN(sge);
135 op_fle->length = out_len;
138 mbuf = sym_op->m_src;
140 /* Configure Input FLE with Scatter/Gather Entry */
141 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 DPAA2_SET_FLE_SG_EXT(ip_fle);
143 DPAA2_SET_FLE_FIN(ip_fle);
145 /* Configure input SGE for Encap/Decap */
146 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 sge->length = mbuf->data_len;
149 in_len += sge->length;
155 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 sge->length = mbuf->data_len;
158 in_len += sge->length;
161 ip_fle->length = in_len;
162 DPAA2_SET_FLE_FIN(sge);
164 /* In case of PDCP, per packet HFN is stored in
165 * mbuf priv after sym_op.
167 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 sess->pdcp.hfn_ovd_offset);
170 /*enable HFN override override */
171 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
175 DPAA2_SET_FD_LEN(fd, ip_fle->length);
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 struct rte_crypto_op *op,
183 struct qbman_fd *fd, uint16_t bpid)
185 struct rte_crypto_sym_op *sym_op = op->sym;
186 struct ctxt_priv *priv = sess->ctxt;
187 struct qbman_fle *fle, *ip_fle, *op_fle;
188 struct sec_flow_context *flc;
189 struct rte_mbuf *src_mbuf = sym_op->m_src;
190 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
196 /* Save the shared descriptor */
197 flc = &priv->flc_desc[0].flc;
199 /* we are using the first FLE entry to store Mbuf */
200 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
202 DPAA2_SEC_DP_ERR("Memory alloc failed");
205 memset(fle, 0, FLE_POOL_BUF_SIZE);
206 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
212 if (likely(bpid < MAX_BPID)) {
213 DPAA2_SET_FD_BPID(fd, bpid);
214 DPAA2_SET_FLE_BPID(op_fle, bpid);
215 DPAA2_SET_FLE_BPID(ip_fle, bpid);
217 DPAA2_SET_FD_IVP(fd);
218 DPAA2_SET_FLE_IVP(op_fle);
219 DPAA2_SET_FLE_IVP(ip_fle);
222 /* Configure FD as a FRAME LIST */
223 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 DPAA2_SET_FD_COMPOUND_FMT(fd);
225 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
227 /* Configure Output FLE with dst mbuf data */
228 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
232 /* Configure Input FLE with src mbuf data */
233 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
237 DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 DPAA2_SET_FLE_FIN(ip_fle);
240 /* In case of PDCP, per packet HFN is stored in
241 * mbuf priv after sym_op.
243 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 sess->pdcp.hfn_ovd_offset);
246 /*enable HFN override override */
247 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
257 build_proto_fd(dpaa2_sec_session *sess,
258 struct rte_crypto_op *op,
259 struct qbman_fd *fd, uint16_t bpid)
261 struct rte_crypto_sym_op *sym_op = op->sym;
263 return build_proto_compound_fd(sess, op, fd, bpid);
265 struct ctxt_priv *priv = sess->ctxt;
266 struct sec_flow_context *flc;
267 struct rte_mbuf *mbuf = sym_op->m_src;
269 if (likely(bpid < MAX_BPID))
270 DPAA2_SET_FD_BPID(fd, bpid);
272 DPAA2_SET_FD_IVP(fd);
274 /* Save the shared descriptor */
275 flc = &priv->flc_desc[0].flc;
277 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
282 /* save physical address of mbuf */
283 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 mbuf->buf_iova = (size_t)op;
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 struct rte_crypto_op *op,
293 struct qbman_fd *fd, __rte_unused uint16_t bpid)
295 struct rte_crypto_sym_op *sym_op = op->sym;
296 struct ctxt_priv *priv = sess->ctxt;
297 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 struct sec_flow_context *flc;
299 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 int icv_len = sess->digest_length;
302 struct rte_mbuf *mbuf;
303 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
307 mbuf = sym_op->m_dst;
309 mbuf = sym_op->m_src;
311 /* first FLE entry used to store mbuf and session ctxt */
312 fle = (struct qbman_fle *)rte_malloc(NULL,
313 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 RTE_CACHE_LINE_SIZE);
315 if (unlikely(!fle)) {
316 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
319 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
327 /* Save the shared descriptor */
328 flc = &priv->flc_desc[0].flc;
330 /* Configure FD as a FRAME LIST */
331 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 DPAA2_SET_FD_COMPOUND_FMT(fd);
333 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
335 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 "iv-len=%d data_off: 0x%x\n",
337 sym_op->aead.data.offset,
338 sym_op->aead.data.length,
341 sym_op->m_src->data_off);
343 /* Configure Output FLE with Scatter/Gather Entry */
344 DPAA2_SET_FLE_SG_EXT(op_fle);
345 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
348 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
350 op_fle->length = (sess->dir == DIR_ENC) ?
351 (sym_op->aead.data.length + icv_len) :
352 sym_op->aead.data.length;
354 /* Configure Output SGE for Encap/Decap */
355 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 sge->length = mbuf->data_len - sym_op->aead.data.offset;
363 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 sge->length = mbuf->data_len;
368 sge->length -= icv_len;
370 if (sess->dir == DIR_ENC) {
372 DPAA2_SET_FLE_ADDR(sge,
373 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 sge->length = icv_len;
376 DPAA2_SET_FLE_FIN(sge);
379 mbuf = sym_op->m_src;
381 /* Configure Input FLE with Scatter/Gather Entry */
382 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 DPAA2_SET_FLE_SG_EXT(ip_fle);
384 DPAA2_SET_FLE_FIN(ip_fle);
385 ip_fle->length = (sess->dir == DIR_ENC) ?
386 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
390 /* Configure Input SGE for Encap/Decap */
391 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 sge->length = sess->iv.length;
396 DPAA2_SET_FLE_ADDR(sge,
397 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 sge->length = auth_only_len;
402 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
405 sge->length = mbuf->data_len - sym_op->aead.data.offset;
411 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 sge->length = mbuf->data_len;
417 if (sess->dir == DIR_DEC) {
419 old_icv = (uint8_t *)(sge + 1);
420 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
421 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 sge->length = icv_len;
425 DPAA2_SET_FLE_FIN(sge);
427 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
430 DPAA2_SET_FD_LEN(fd, ip_fle->length);
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 struct rte_crypto_op *op,
438 struct qbman_fd *fd, uint16_t bpid)
440 struct rte_crypto_sym_op *sym_op = op->sym;
441 struct ctxt_priv *priv = sess->ctxt;
442 struct qbman_fle *fle, *sge;
443 struct sec_flow_context *flc;
444 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 int icv_len = sess->digest_length, retval;
447 struct rte_mbuf *dst;
448 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
456 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 * Currently we donot know which FLE has the mbuf stored.
458 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 * to get the MBUF Addr from the previous FLE.
460 * We can have a better approach to use the inline Mbuf
462 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
464 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
467 memset(fle, 0, FLE_POOL_BUF_SIZE);
468 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
472 if (likely(bpid < MAX_BPID)) {
473 DPAA2_SET_FD_BPID(fd, bpid);
474 DPAA2_SET_FLE_BPID(fle, bpid);
475 DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 DPAA2_SET_FLE_BPID(sge, bpid);
477 DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 DPAA2_SET_FLE_BPID(sge + 3, bpid);
481 DPAA2_SET_FD_IVP(fd);
482 DPAA2_SET_FLE_IVP(fle);
483 DPAA2_SET_FLE_IVP((fle + 1));
484 DPAA2_SET_FLE_IVP(sge);
485 DPAA2_SET_FLE_IVP((sge + 1));
486 DPAA2_SET_FLE_IVP((sge + 2));
487 DPAA2_SET_FLE_IVP((sge + 3));
490 /* Save the shared descriptor */
491 flc = &priv->flc_desc[0].flc;
492 /* Configure FD as a FRAME LIST */
493 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 DPAA2_SET_FD_COMPOUND_FMT(fd);
495 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
497 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 "iv-len=%d data_off: 0x%x\n",
499 sym_op->aead.data.offset,
500 sym_op->aead.data.length,
503 sym_op->m_src->data_off);
505 /* Configure Output FLE with Scatter/Gather Entry */
506 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
508 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 fle->length = (sess->dir == DIR_ENC) ?
510 (sym_op->aead.data.length + icv_len) :
511 sym_op->aead.data.length;
513 DPAA2_SET_FLE_SG_EXT(fle);
515 /* Configure Output SGE for Encap/Decap */
516 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 sge->length = sym_op->aead.data.length;
520 if (sess->dir == DIR_ENC) {
522 DPAA2_SET_FLE_ADDR(sge,
523 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 sge->length = sess->digest_length;
526 DPAA2_SET_FLE_FIN(sge);
531 /* Configure Input FLE with Scatter/Gather Entry */
532 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 DPAA2_SET_FLE_SG_EXT(fle);
534 DPAA2_SET_FLE_FIN(fle);
535 fle->length = (sess->dir == DIR_ENC) ?
536 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 sess->digest_length);
540 /* Configure Input SGE for Encap/Decap */
541 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 sge->length = sess->iv.length;
545 DPAA2_SET_FLE_ADDR(sge,
546 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 sge->length = auth_only_len;
548 DPAA2_SET_FLE_BPID(sge, bpid);
552 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 sym_op->m_src->data_off);
555 sge->length = sym_op->aead.data.length;
556 if (sess->dir == DIR_DEC) {
558 old_icv = (uint8_t *)(sge + 1);
559 memcpy(old_icv, sym_op->aead.digest.data,
560 sess->digest_length);
561 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 sge->length = sess->digest_length;
564 DPAA2_SET_FLE_FIN(sge);
567 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
571 DPAA2_SET_FD_LEN(fd, fle->length);
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 struct rte_crypto_op *op,
578 struct qbman_fd *fd, __rte_unused uint16_t bpid)
580 struct rte_crypto_sym_op *sym_op = op->sym;
581 struct ctxt_priv *priv = sess->ctxt;
582 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 struct sec_flow_context *flc;
584 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 sym_op->auth.data.offset;
586 uint16_t auth_tail_len = sym_op->auth.data.length -
587 sym_op->cipher.data.length - auth_hdr_len;
588 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 int icv_len = sess->digest_length;
591 struct rte_mbuf *mbuf;
592 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
596 mbuf = sym_op->m_dst;
598 mbuf = sym_op->m_src;
600 /* first FLE entry used to store mbuf and session ctxt */
601 fle = (struct qbman_fle *)rte_malloc(NULL,
602 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 RTE_CACHE_LINE_SIZE);
604 if (unlikely(!fle)) {
605 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
608 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
616 /* Save the shared descriptor */
617 flc = &priv->flc_desc[0].flc;
619 /* Configure FD as a FRAME LIST */
620 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 DPAA2_SET_FD_COMPOUND_FMT(fd);
622 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
625 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 sym_op->auth.data.offset,
628 sym_op->auth.data.length,
630 sym_op->cipher.data.offset,
631 sym_op->cipher.data.length,
633 sym_op->m_src->data_off);
635 /* Configure Output FLE with Scatter/Gather Entry */
636 DPAA2_SET_FLE_SG_EXT(op_fle);
637 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
640 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
642 op_fle->length = (sess->dir == DIR_ENC) ?
643 (sym_op->cipher.data.length + icv_len) :
644 sym_op->cipher.data.length;
646 /* Configure Output SGE for Encap/Decap */
647 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 sge->length = mbuf->data_len - sym_op->auth.data.offset;
655 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 sge->length = mbuf->data_len;
660 sge->length -= icv_len;
662 if (sess->dir == DIR_ENC) {
664 DPAA2_SET_FLE_ADDR(sge,
665 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 sge->length = icv_len;
668 DPAA2_SET_FLE_FIN(sge);
671 mbuf = sym_op->m_src;
673 /* Configure Input FLE with Scatter/Gather Entry */
674 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 DPAA2_SET_FLE_SG_EXT(ip_fle);
676 DPAA2_SET_FLE_FIN(ip_fle);
677 ip_fle->length = (sess->dir == DIR_ENC) ?
678 (sym_op->auth.data.length + sess->iv.length) :
679 (sym_op->auth.data.length + sess->iv.length +
682 /* Configure Input SGE for Encap/Decap */
683 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 sge->length = sess->iv.length;
687 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
690 sge->length = mbuf->data_len - sym_op->auth.data.offset;
696 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 sge->length = mbuf->data_len;
701 sge->length -= icv_len;
703 if (sess->dir == DIR_DEC) {
705 old_icv = (uint8_t *)(sge + 1);
706 memcpy(old_icv, sym_op->auth.digest.data,
708 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 sge->length = icv_len;
712 DPAA2_SET_FLE_FIN(sge);
714 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
717 DPAA2_SET_FD_LEN(fd, ip_fle->length);
723 build_authenc_fd(dpaa2_sec_session *sess,
724 struct rte_crypto_op *op,
725 struct qbman_fd *fd, uint16_t bpid)
727 struct rte_crypto_sym_op *sym_op = op->sym;
728 struct ctxt_priv *priv = sess->ctxt;
729 struct qbman_fle *fle, *sge;
730 struct sec_flow_context *flc;
731 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 sym_op->auth.data.offset;
733 uint16_t auth_tail_len = sym_op->auth.data.length -
734 sym_op->cipher.data.length - auth_hdr_len;
735 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
737 int icv_len = sess->digest_length, retval;
739 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
741 struct rte_mbuf *dst;
748 /* we are using the first FLE entry to store Mbuf.
749 * Currently we donot know which FLE has the mbuf stored.
750 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 * to get the MBUF Addr from the previous FLE.
752 * We can have a better approach to use the inline Mbuf
754 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
756 DPAA2_SEC_ERR("Memory alloc failed for SGE");
759 memset(fle, 0, FLE_POOL_BUF_SIZE);
760 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
764 if (likely(bpid < MAX_BPID)) {
765 DPAA2_SET_FD_BPID(fd, bpid);
766 DPAA2_SET_FLE_BPID(fle, bpid);
767 DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 DPAA2_SET_FLE_BPID(sge, bpid);
769 DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 DPAA2_SET_FLE_BPID(sge + 3, bpid);
773 DPAA2_SET_FD_IVP(fd);
774 DPAA2_SET_FLE_IVP(fle);
775 DPAA2_SET_FLE_IVP((fle + 1));
776 DPAA2_SET_FLE_IVP(sge);
777 DPAA2_SET_FLE_IVP((sge + 1));
778 DPAA2_SET_FLE_IVP((sge + 2));
779 DPAA2_SET_FLE_IVP((sge + 3));
782 /* Save the shared descriptor */
783 flc = &priv->flc_desc[0].flc;
784 /* Configure FD as a FRAME LIST */
785 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 DPAA2_SET_FD_COMPOUND_FMT(fd);
787 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
790 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 sym_op->auth.data.offset,
793 sym_op->auth.data.length,
795 sym_op->cipher.data.offset,
796 sym_op->cipher.data.length,
798 sym_op->m_src->data_off);
800 /* Configure Output FLE with Scatter/Gather Entry */
801 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
803 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 fle->length = (sess->dir == DIR_ENC) ?
805 (sym_op->cipher.data.length + icv_len) :
806 sym_op->cipher.data.length;
808 DPAA2_SET_FLE_SG_EXT(fle);
810 /* Configure Output SGE for Encap/Decap */
811 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
814 sge->length = sym_op->cipher.data.length;
816 if (sess->dir == DIR_ENC) {
818 DPAA2_SET_FLE_ADDR(sge,
819 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 sge->length = sess->digest_length;
821 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
824 DPAA2_SET_FLE_FIN(sge);
829 /* Configure Input FLE with Scatter/Gather Entry */
830 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 DPAA2_SET_FLE_SG_EXT(fle);
832 DPAA2_SET_FLE_FIN(fle);
833 fle->length = (sess->dir == DIR_ENC) ?
834 (sym_op->auth.data.length + sess->iv.length) :
835 (sym_op->auth.data.length + sess->iv.length +
836 sess->digest_length);
838 /* Configure Input SGE for Encap/Decap */
839 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 sge->length = sess->iv.length;
843 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 sym_op->m_src->data_off);
846 sge->length = sym_op->auth.data.length;
847 if (sess->dir == DIR_DEC) {
849 old_icv = (uint8_t *)(sge + 1);
850 memcpy(old_icv, sym_op->auth.digest.data,
851 sess->digest_length);
852 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 sge->length = sess->digest_length;
854 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 sess->digest_length +
858 DPAA2_SET_FLE_FIN(sge);
860 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
866 static inline int build_auth_sg_fd(
867 dpaa2_sec_session *sess,
868 struct rte_crypto_op *op,
870 __rte_unused uint16_t bpid)
872 struct rte_crypto_sym_op *sym_op = op->sym;
873 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 struct sec_flow_context *flc;
875 struct ctxt_priv *priv = sess->ctxt;
876 int data_len, data_offset;
878 struct rte_mbuf *mbuf;
880 data_len = sym_op->auth.data.length;
881 data_offset = sym_op->auth.data.offset;
883 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 if ((data_len & 7) || (data_offset & 7)) {
886 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
890 data_len = data_len >> 3;
891 data_offset = data_offset >> 3;
894 mbuf = sym_op->m_src;
895 fle = (struct qbman_fle *)rte_malloc(NULL,
896 FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 RTE_CACHE_LINE_SIZE);
898 if (unlikely(!fle)) {
899 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
902 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 /* first FLE entry used to store mbuf and session ctxt */
904 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
910 flc = &priv->flc_desc[DESC_INITFINAL].flc;
912 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 DPAA2_SET_FD_COMPOUND_FMT(fd);
917 DPAA2_SET_FLE_ADDR(op_fle,
918 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 op_fle->length = sess->digest_length;
922 DPAA2_SET_FLE_SG_EXT(ip_fle);
923 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 ip_fle->length = data_len;
926 if (sess->iv.length) {
929 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
935 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
939 sge->length = sess->iv.length;
941 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 ip_fle->length += sge->length;
946 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
949 if (data_len <= (mbuf->data_len - data_offset)) {
950 sge->length = data_len;
953 sge->length = mbuf->data_len - data_offset;
955 /* remaining i/p segs */
956 while ((data_len = data_len - sge->length) &&
957 (mbuf = mbuf->next)) {
959 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 if (data_len > mbuf->data_len)
962 sge->length = mbuf->data_len;
964 sge->length = data_len;
968 if (sess->dir == DIR_DEC) {
969 /* Digest verification case */
971 old_digest = (uint8_t *)(sge + 1);
972 rte_memcpy(old_digest, sym_op->auth.digest.data,
973 sess->digest_length);
974 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 sge->length = sess->digest_length;
976 ip_fle->length += sess->digest_length;
978 DPAA2_SET_FLE_FIN(sge);
979 DPAA2_SET_FLE_FIN(ip_fle);
980 DPAA2_SET_FD_LEN(fd, ip_fle->length);
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 struct qbman_fd *fd, uint16_t bpid)
989 struct rte_crypto_sym_op *sym_op = op->sym;
990 struct qbman_fle *fle, *sge;
991 struct sec_flow_context *flc;
992 struct ctxt_priv *priv = sess->ctxt;
993 int data_len, data_offset;
997 data_len = sym_op->auth.data.length;
998 data_offset = sym_op->auth.data.offset;
1000 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 if ((data_len & 7) || (data_offset & 7)) {
1003 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1007 data_len = data_len >> 3;
1008 data_offset = data_offset >> 3;
1011 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1013 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1016 memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 /* TODO we are using the first FLE entry to store Mbuf.
1018 * Currently we donot know which FLE has the mbuf stored.
1019 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 * to get the MBUF Addr from the previous FLE.
1021 * We can have a better approach to use the inline Mbuf
1023 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1028 if (likely(bpid < MAX_BPID)) {
1029 DPAA2_SET_FD_BPID(fd, bpid);
1030 DPAA2_SET_FLE_BPID(fle, bpid);
1031 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 DPAA2_SET_FLE_BPID(sge, bpid);
1033 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1035 DPAA2_SET_FD_IVP(fd);
1036 DPAA2_SET_FLE_IVP(fle);
1037 DPAA2_SET_FLE_IVP((fle + 1));
1038 DPAA2_SET_FLE_IVP(sge);
1039 DPAA2_SET_FLE_IVP((sge + 1));
1042 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 DPAA2_SET_FD_COMPOUND_FMT(fd);
1047 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 fle->length = sess->digest_length;
1051 /* Setting input FLE */
1052 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 DPAA2_SET_FLE_SG_EXT(fle);
1054 fle->length = data_len;
1056 if (sess->iv.length) {
1059 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1065 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1069 sge->length = sess->iv.length;
1072 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 fle->length = fle->length + sge->length;
1077 /* Setting data to authenticate */
1078 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 sge->length = data_len;
1082 if (sess->dir == DIR_DEC) {
1084 old_digest = (uint8_t *)(sge + 1);
1085 rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 sess->digest_length);
1087 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 sge->length = sess->digest_length;
1089 fle->length = fle->length + sess->digest_length;
1092 DPAA2_SET_FLE_FIN(sge);
1093 DPAA2_SET_FLE_FIN(fle);
1094 DPAA2_SET_FD_LEN(fd, fle->length);
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1103 struct rte_crypto_sym_op *sym_op = op->sym;
1104 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 int data_len, data_offset;
1106 struct sec_flow_context *flc;
1107 struct ctxt_priv *priv = sess->ctxt;
1108 struct rte_mbuf *mbuf;
1109 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1112 data_len = sym_op->cipher.data.length;
1113 data_offset = sym_op->cipher.data.offset;
1115 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 if ((data_len & 7) || (data_offset & 7)) {
1118 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1122 data_len = data_len >> 3;
1123 data_offset = data_offset >> 3;
1127 mbuf = sym_op->m_dst;
1129 mbuf = sym_op->m_src;
1131 /* first FLE entry used to store mbuf and session ctxt */
1132 fle = (struct qbman_fle *)rte_malloc(NULL,
1133 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 RTE_CACHE_LINE_SIZE);
1136 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1139 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 /* first FLE entry used to store mbuf and session ctxt */
1141 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1148 flc = &priv->flc_desc[0].flc;
1151 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 " data_off: 0x%x\n",
1156 sym_op->m_src->data_off);
1159 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 op_fle->length = data_len;
1161 DPAA2_SET_FLE_SG_EXT(op_fle);
1164 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 sge->length = mbuf->data_len - data_offset;
1172 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 sge->length = mbuf->data_len;
1177 DPAA2_SET_FLE_FIN(sge);
1180 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 flc, fle, fle->addr_hi, fle->addr_lo,
1185 mbuf = sym_op->m_src;
1187 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 ip_fle->length = sess->iv.length + data_len;
1189 DPAA2_SET_FLE_SG_EXT(ip_fle);
1192 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 DPAA2_SET_FLE_OFFSET(sge, 0);
1194 sge->length = sess->iv.length;
1199 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 sge->length = mbuf->data_len - data_offset;
1207 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 sge->length = mbuf->data_len;
1212 DPAA2_SET_FLE_FIN(sge);
1213 DPAA2_SET_FLE_FIN(ip_fle);
1216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1222 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 " off =%d, len =%d\n",
1224 DPAA2_GET_FD_ADDR(fd),
1225 DPAA2_GET_FD_BPID(fd),
1226 rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 DPAA2_GET_FD_OFFSET(fd),
1228 DPAA2_GET_FD_LEN(fd));
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 struct qbman_fd *fd, uint16_t bpid)
1236 struct rte_crypto_sym_op *sym_op = op->sym;
1237 struct qbman_fle *fle, *sge;
1238 int retval, data_len, data_offset;
1239 struct sec_flow_context *flc;
1240 struct ctxt_priv *priv = sess->ctxt;
1241 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1243 struct rte_mbuf *dst;
1245 data_len = sym_op->cipher.data.length;
1246 data_offset = sym_op->cipher.data.offset;
1248 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 if ((data_len & 7) || (data_offset & 7)) {
1251 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1255 data_len = data_len >> 3;
1256 data_offset = data_offset >> 3;
1260 dst = sym_op->m_dst;
1262 dst = sym_op->m_src;
1264 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1266 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1269 memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 /* TODO we are using the first FLE entry to store Mbuf.
1271 * Currently we donot know which FLE has the mbuf stored.
1272 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 * to get the MBUF Addr from the previous FLE.
1274 * We can have a better approach to use the inline Mbuf
1276 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1281 if (likely(bpid < MAX_BPID)) {
1282 DPAA2_SET_FD_BPID(fd, bpid);
1283 DPAA2_SET_FLE_BPID(fle, bpid);
1284 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 DPAA2_SET_FLE_BPID(sge, bpid);
1286 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1288 DPAA2_SET_FD_IVP(fd);
1289 DPAA2_SET_FLE_IVP(fle);
1290 DPAA2_SET_FLE_IVP((fle + 1));
1291 DPAA2_SET_FLE_IVP(sge);
1292 DPAA2_SET_FLE_IVP((sge + 1));
1295 flc = &priv->flc_desc[0].flc;
1296 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1302 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 " data_off: 0x%x\n",
1307 sym_op->m_src->data_off);
1309 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1312 fle->length = data_len + sess->iv.length;
1315 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 flc, fle, fle->addr_hi, fle->addr_lo,
1321 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 fle->length = data_len + sess->iv.length;
1324 DPAA2_SET_FLE_SG_EXT(fle);
1326 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 sge->length = sess->iv.length;
1330 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1333 sge->length = data_len;
1334 DPAA2_SET_FLE_FIN(sge);
1335 DPAA2_SET_FLE_FIN(fle);
1338 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 " off =%d, len =%d\n",
1340 DPAA2_GET_FD_ADDR(fd),
1341 DPAA2_GET_FD_BPID(fd),
1342 rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 DPAA2_GET_FD_OFFSET(fd),
1344 DPAA2_GET_FD_LEN(fd));
1350 build_sec_fd(struct rte_crypto_op *op,
1351 struct qbman_fd *fd, uint16_t bpid)
1354 dpaa2_sec_session *sess;
1356 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIB_SECURITY
1360 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 op->sym->sec_session);
1370 /* Any of the buffer is segmented*/
1371 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 ((op->sym->m_dst != NULL) &&
1373 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 switch (sess->ctxt_type) {
1375 case DPAA2_SEC_CIPHER:
1376 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1378 case DPAA2_SEC_AUTH:
1379 ret = build_auth_sg_fd(sess, op, fd, bpid);
1381 case DPAA2_SEC_AEAD:
1382 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1384 case DPAA2_SEC_CIPHER_HASH:
1385 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1387 #ifdef RTE_LIB_SECURITY
1388 case DPAA2_SEC_IPSEC:
1389 case DPAA2_SEC_PDCP:
1390 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1393 case DPAA2_SEC_HASH_CIPHER:
1395 DPAA2_SEC_ERR("error: Unsupported session");
1398 switch (sess->ctxt_type) {
1399 case DPAA2_SEC_CIPHER:
1400 ret = build_cipher_fd(sess, op, fd, bpid);
1402 case DPAA2_SEC_AUTH:
1403 ret = build_auth_fd(sess, op, fd, bpid);
1405 case DPAA2_SEC_AEAD:
1406 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1408 case DPAA2_SEC_CIPHER_HASH:
1409 ret = build_authenc_fd(sess, op, fd, bpid);
1411 #ifdef RTE_LIB_SECURITY
1412 case DPAA2_SEC_IPSEC:
1413 ret = build_proto_fd(sess, op, fd, bpid);
1415 case DPAA2_SEC_PDCP:
1416 ret = build_proto_compound_fd(sess, op, fd, bpid);
1419 case DPAA2_SEC_HASH_CIPHER:
1421 DPAA2_SEC_ERR("error: Unsupported session");
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1432 /* Function to transmit the frames to given device and VQ*/
1435 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 uint32_t frames_to_send, retry_count;
1437 struct qbman_eq_desc eqdesc;
1438 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 struct qbman_swp *swp;
1440 uint16_t num_tx = 0;
1441 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 /*todo - need to support multiple buffer pools */
1444 struct rte_mempool *mb_pool;
1446 if (unlikely(nb_ops == 0))
1449 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 DPAA2_SEC_ERR("sessionless crypto op not supported");
1453 /*Prepare enqueue descriptor*/
1454 qbman_eq_desc_clear(&eqdesc);
1455 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1459 if (!DPAA2_PER_LCORE_DPIO) {
1460 ret = dpaa2_affine_qbman_swp();
1463 "Failed to allocate IO portal, tid: %d\n",
1468 swp = DPAA2_PER_LCORE_PORTAL;
1471 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 dpaa2_eqcr_size : nb_ops;
1474 for (loop = 0; loop < frames_to_send; loop++) {
1475 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1476 uint8_t dqrr_index =
1477 *dpaa2_seqn((*ops)->sym->m_src) - 1;
1479 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1480 DPAA2_PER_LCORE_DQRR_SIZE--;
1481 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1482 *dpaa2_seqn((*ops)->sym->m_src) =
1483 DPAA2_INVALID_MBUF_SEQN;
1486 /*Clear the unused FD fields before sending*/
1487 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1488 mb_pool = (*ops)->sym->m_src->pool;
1489 bpid = mempool_to_bpid(mb_pool);
1490 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1492 DPAA2_SEC_ERR("error: Improper packet contents"
1493 " for crypto operation");
1501 while (loop < frames_to_send) {
1502 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1505 frames_to_send - loop);
1506 if (unlikely(ret < 0)) {
1508 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1523 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1524 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1528 #ifdef RTE_LIB_SECURITY
1529 static inline struct rte_crypto_op *
1530 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1532 struct rte_crypto_op *op;
1533 uint16_t len = DPAA2_GET_FD_LEN(fd);
1535 dpaa2_sec_session *sess_priv __rte_unused;
1537 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1538 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1539 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1541 diff = len - mbuf->pkt_len;
1542 mbuf->pkt_len += diff;
1543 mbuf->data_len += diff;
1544 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1545 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1546 op->sym->aead.digest.phys_addr = 0L;
1548 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1549 op->sym->sec_session);
1550 if (sess_priv->dir == DIR_ENC)
1551 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1553 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1559 static inline struct rte_crypto_op *
1560 sec_fd_to_mbuf(const struct qbman_fd *fd)
1562 struct qbman_fle *fle;
1563 struct rte_crypto_op *op;
1564 struct ctxt_priv *priv;
1565 struct rte_mbuf *dst, *src;
1567 #ifdef RTE_LIB_SECURITY
1568 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1569 return sec_simple_fd_to_mbuf(fd);
1571 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1573 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1574 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1576 /* we are using the first FLE entry to store Mbuf.
1577 * Currently we donot know which FLE has the mbuf stored.
1578 * So while retreiving we can go back 1 FLE from the FD -ADDR
1579 * to get the MBUF Addr from the previous FLE.
1580 * We can have a better approach to use the inline Mbuf
1583 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1584 /* TODO complete it. */
1585 DPAA2_SEC_ERR("error: non inline buffer");
1588 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1591 src = op->sym->m_src;
1594 if (op->sym->m_dst) {
1595 dst = op->sym->m_dst;
1600 #ifdef RTE_LIB_SECURITY
1601 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1602 uint16_t len = DPAA2_GET_FD_LEN(fd);
1604 while (dst->next != NULL) {
1605 len -= dst->data_len;
1608 dst->data_len = len;
1611 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1612 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1615 DPAA2_GET_FD_ADDR(fd),
1616 DPAA2_GET_FD_BPID(fd),
1617 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1618 DPAA2_GET_FD_OFFSET(fd),
1619 DPAA2_GET_FD_LEN(fd));
1621 /* free the fle memory */
1622 if (likely(rte_pktmbuf_is_contiguous(src))) {
1623 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1624 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1626 rte_free((void *)(fle-1));
1632 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1635 /* Function is responsible to receive frames for a given device and VQ*/
1636 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1637 struct qbman_result *dq_storage;
1638 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1639 int ret, num_rx = 0;
1640 uint8_t is_last = 0, status;
1641 struct qbman_swp *swp;
1642 const struct qbman_fd *fd;
1643 struct qbman_pull_desc pulldesc;
1645 if (!DPAA2_PER_LCORE_DPIO) {
1646 ret = dpaa2_affine_qbman_swp();
1649 "Failed to allocate IO portal, tid: %d\n",
1654 swp = DPAA2_PER_LCORE_PORTAL;
1655 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1657 qbman_pull_desc_clear(&pulldesc);
1658 qbman_pull_desc_set_numframes(&pulldesc,
1659 (nb_ops > dpaa2_dqrr_size) ?
1660 dpaa2_dqrr_size : nb_ops);
1661 qbman_pull_desc_set_fq(&pulldesc, fqid);
1662 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1663 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1666 /*Issue a volatile dequeue command. */
1668 if (qbman_swp_pull(swp, &pulldesc)) {
1670 "SEC VDQ command is not issued : QBMAN busy");
1671 /* Portal was busy, try again */
1677 /* Receive the packets till Last Dequeue entry is found with
1678 * respect to the above issues PULL command.
1681 /* Check if the previous issued command is completed.
1682 * Also seems like the SWP is shared between the Ethernet Driver
1683 * and the SEC driver.
1685 while (!qbman_check_command_complete(dq_storage))
1688 /* Loop until the dq_storage is updated with
1689 * new token by QBMAN
1691 while (!qbman_check_new_result(dq_storage))
1693 /* Check whether Last Pull command is Expired and
1694 * setting Condition for Loop termination
1696 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1698 /* Check for valid frame. */
1699 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1701 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1702 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1707 fd = qbman_result_DQ_fd(dq_storage);
1708 ops[num_rx] = sec_fd_to_mbuf(fd);
1710 if (unlikely(fd->simple.frc)) {
1711 /* TODO Parse SEC errors */
1712 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1714 dpaa2_qp->rx_vq.err_pkts += 1;
1715 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1717 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1722 } /* End of Packet Rx loop */
1724 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1726 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1727 dpaa2_qp->rx_vq.err_pkts);
1728 /*Return the total number of packets received to DPAA2 app*/
1732 /** Release queue pair */
1734 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1736 struct dpaa2_sec_qp *qp =
1737 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1739 PMD_INIT_FUNC_TRACE();
1741 if (qp->rx_vq.q_storage) {
1742 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1743 rte_free(qp->rx_vq.q_storage);
1747 dev->data->queue_pairs[queue_pair_id] = NULL;
1752 /** Setup a queue pair */
1754 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1755 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1756 __rte_unused int socket_id)
1758 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1759 struct dpaa2_sec_qp *qp;
1760 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1761 struct dpseci_rx_queue_cfg cfg;
1764 PMD_INIT_FUNC_TRACE();
1766 /* If qp is already in use free ring memory and qp metadata. */
1767 if (dev->data->queue_pairs[qp_id] != NULL) {
1768 DPAA2_SEC_INFO("QP already setup");
1772 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1773 dev, qp_id, qp_conf);
1775 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1777 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1778 RTE_CACHE_LINE_SIZE);
1780 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1784 qp->rx_vq.crypto_data = dev->data;
1785 qp->tx_vq.crypto_data = dev->data;
1786 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1787 sizeof(struct queue_storage_info_t),
1788 RTE_CACHE_LINE_SIZE);
1789 if (!qp->rx_vq.q_storage) {
1790 DPAA2_SEC_ERR("malloc failed for q_storage");
1793 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1795 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1796 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1800 dev->data->queue_pairs[qp_id] = qp;
1802 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1803 cfg.user_ctx = (size_t)(&qp->rx_vq);
1804 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1809 /** Returns the size of the aesni gcm session structure */
1811 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1813 PMD_INIT_FUNC_TRACE();
1815 return sizeof(dpaa2_sec_session);
1819 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1820 struct rte_crypto_sym_xform *xform,
1821 dpaa2_sec_session *session)
1823 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1824 struct alginfo cipherdata;
1825 int bufsize, ret = 0;
1826 struct ctxt_priv *priv;
1827 struct sec_flow_context *flc;
1829 PMD_INIT_FUNC_TRACE();
1831 /* For SEC CIPHER only one descriptor is required. */
1832 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1833 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1834 RTE_CACHE_LINE_SIZE);
1836 DPAA2_SEC_ERR("No Memory for priv CTXT");
1840 priv->fle_pool = dev_priv->fle_pool;
1842 flc = &priv->flc_desc[0].flc;
1844 session->ctxt_type = DPAA2_SEC_CIPHER;
1845 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1846 RTE_CACHE_LINE_SIZE);
1847 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1848 DPAA2_SEC_ERR("No Memory for cipher key");
1852 session->cipher_key.length = xform->cipher.key.length;
1854 memcpy(session->cipher_key.data, xform->cipher.key.data,
1855 xform->cipher.key.length);
1856 cipherdata.key = (size_t)session->cipher_key.data;
1857 cipherdata.keylen = session->cipher_key.length;
1858 cipherdata.key_enc_flags = 0;
1859 cipherdata.key_type = RTA_DATA_IMM;
1861 /* Set IV parameters */
1862 session->iv.offset = xform->cipher.iv.offset;
1863 session->iv.length = xform->cipher.iv.length;
1864 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1867 switch (xform->cipher.algo) {
1868 case RTE_CRYPTO_CIPHER_AES_CBC:
1869 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1870 cipherdata.algmode = OP_ALG_AAI_CBC;
1871 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1872 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1873 SHR_NEVER, &cipherdata,
1877 case RTE_CRYPTO_CIPHER_3DES_CBC:
1878 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1879 cipherdata.algmode = OP_ALG_AAI_CBC;
1880 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1881 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1882 SHR_NEVER, &cipherdata,
1886 case RTE_CRYPTO_CIPHER_DES_CBC:
1887 cipherdata.algtype = OP_ALG_ALGSEL_DES;
1888 cipherdata.algmode = OP_ALG_AAI_CBC;
1889 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1890 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1891 SHR_NEVER, &cipherdata,
1895 case RTE_CRYPTO_CIPHER_AES_CTR:
1896 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1897 cipherdata.algmode = OP_ALG_AAI_CTR;
1898 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1899 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1900 SHR_NEVER, &cipherdata,
1904 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1905 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1906 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1907 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1911 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1912 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1913 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1914 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1918 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1919 case RTE_CRYPTO_CIPHER_AES_F8:
1920 case RTE_CRYPTO_CIPHER_AES_ECB:
1921 case RTE_CRYPTO_CIPHER_3DES_ECB:
1922 case RTE_CRYPTO_CIPHER_3DES_CTR:
1923 case RTE_CRYPTO_CIPHER_AES_XTS:
1924 case RTE_CRYPTO_CIPHER_ARC4:
1925 case RTE_CRYPTO_CIPHER_NULL:
1926 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1927 xform->cipher.algo);
1931 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1932 xform->cipher.algo);
1938 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1943 flc->word1_sdl = (uint8_t)bufsize;
1944 session->ctxt = priv;
1946 #ifdef CAAM_DESC_DEBUG
1948 for (i = 0; i < bufsize; i++)
1949 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1954 rte_free(session->cipher_key.data);
1960 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1961 struct rte_crypto_sym_xform *xform,
1962 dpaa2_sec_session *session)
1964 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1965 struct alginfo authdata;
1966 int bufsize, ret = 0;
1967 struct ctxt_priv *priv;
1968 struct sec_flow_context *flc;
1970 PMD_INIT_FUNC_TRACE();
1972 /* For SEC AUTH three descriptors are required for various stages */
1973 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1974 sizeof(struct ctxt_priv) + 3 *
1975 sizeof(struct sec_flc_desc),
1976 RTE_CACHE_LINE_SIZE);
1978 DPAA2_SEC_ERR("No Memory for priv CTXT");
1982 priv->fle_pool = dev_priv->fle_pool;
1983 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1985 session->ctxt_type = DPAA2_SEC_AUTH;
1986 session->auth_key.length = xform->auth.key.length;
1987 if (xform->auth.key.length) {
1988 session->auth_key.data = rte_zmalloc(NULL,
1989 xform->auth.key.length,
1990 RTE_CACHE_LINE_SIZE);
1991 if (session->auth_key.data == NULL) {
1992 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1996 memcpy(session->auth_key.data, xform->auth.key.data,
1997 xform->auth.key.length);
1998 authdata.key = (size_t)session->auth_key.data;
1999 authdata.key_enc_flags = 0;
2000 authdata.key_type = RTA_DATA_IMM;
2002 authdata.keylen = session->auth_key.length;
2004 session->digest_length = xform->auth.digest_length;
2005 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2008 switch (xform->auth.algo) {
2009 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2010 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2011 authdata.algmode = OP_ALG_AAI_HMAC;
2012 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2013 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2014 1, 0, SHR_NEVER, &authdata,
2016 session->digest_length);
2018 case RTE_CRYPTO_AUTH_MD5_HMAC:
2019 authdata.algtype = OP_ALG_ALGSEL_MD5;
2020 authdata.algmode = OP_ALG_AAI_HMAC;
2021 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2022 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2023 1, 0, SHR_NEVER, &authdata,
2025 session->digest_length);
2027 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2028 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2029 authdata.algmode = OP_ALG_AAI_HMAC;
2030 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2031 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2032 1, 0, SHR_NEVER, &authdata,
2034 session->digest_length);
2036 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2037 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2038 authdata.algmode = OP_ALG_AAI_HMAC;
2039 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2040 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2041 1, 0, SHR_NEVER, &authdata,
2043 session->digest_length);
2045 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2046 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2047 authdata.algmode = OP_ALG_AAI_HMAC;
2048 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2049 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2050 1, 0, SHR_NEVER, &authdata,
2052 session->digest_length);
2054 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2055 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2056 authdata.algmode = OP_ALG_AAI_HMAC;
2057 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2058 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2059 1, 0, SHR_NEVER, &authdata,
2061 session->digest_length);
2063 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2064 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2065 authdata.algmode = OP_ALG_AAI_F9;
2066 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2067 session->iv.offset = xform->auth.iv.offset;
2068 session->iv.length = xform->auth.iv.length;
2069 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2072 session->digest_length);
2074 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2075 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2076 authdata.algmode = OP_ALG_AAI_F9;
2077 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2078 session->iv.offset = xform->auth.iv.offset;
2079 session->iv.length = xform->auth.iv.length;
2080 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2083 session->digest_length);
2085 case RTE_CRYPTO_AUTH_SHA1:
2086 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2087 authdata.algmode = OP_ALG_AAI_HASH;
2088 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2089 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2090 1, 0, SHR_NEVER, &authdata,
2092 session->digest_length);
2094 case RTE_CRYPTO_AUTH_MD5:
2095 authdata.algtype = OP_ALG_ALGSEL_MD5;
2096 authdata.algmode = OP_ALG_AAI_HASH;
2097 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2098 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2099 1, 0, SHR_NEVER, &authdata,
2101 session->digest_length);
2103 case RTE_CRYPTO_AUTH_SHA256:
2104 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2105 authdata.algmode = OP_ALG_AAI_HASH;
2106 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2107 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2108 1, 0, SHR_NEVER, &authdata,
2110 session->digest_length);
2112 case RTE_CRYPTO_AUTH_SHA384:
2113 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2114 authdata.algmode = OP_ALG_AAI_HASH;
2115 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2116 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2117 1, 0, SHR_NEVER, &authdata,
2119 session->digest_length);
2121 case RTE_CRYPTO_AUTH_SHA512:
2122 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2123 authdata.algmode = OP_ALG_AAI_HASH;
2124 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2125 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2126 1, 0, SHR_NEVER, &authdata,
2128 session->digest_length);
2130 case RTE_CRYPTO_AUTH_SHA224:
2131 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2132 authdata.algmode = OP_ALG_AAI_HASH;
2133 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2134 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2135 1, 0, SHR_NEVER, &authdata,
2137 session->digest_length);
2139 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2140 authdata.algtype = OP_ALG_ALGSEL_AES;
2141 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2142 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2143 bufsize = cnstr_shdsc_aes_mac(
2144 priv->flc_desc[DESC_INITFINAL].desc,
2145 1, 0, SHR_NEVER, &authdata,
2147 session->digest_length);
2149 case RTE_CRYPTO_AUTH_AES_CMAC:
2150 authdata.algtype = OP_ALG_ALGSEL_AES;
2151 authdata.algmode = OP_ALG_AAI_CMAC;
2152 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2153 bufsize = cnstr_shdsc_aes_mac(
2154 priv->flc_desc[DESC_INITFINAL].desc,
2155 1, 0, SHR_NEVER, &authdata,
2157 session->digest_length);
2159 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2160 case RTE_CRYPTO_AUTH_AES_GMAC:
2161 case RTE_CRYPTO_AUTH_KASUMI_F9:
2162 case RTE_CRYPTO_AUTH_NULL:
2163 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2168 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2175 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2180 flc->word1_sdl = (uint8_t)bufsize;
2181 session->ctxt = priv;
2182 #ifdef CAAM_DESC_DEBUG
2184 for (i = 0; i < bufsize; i++)
2185 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2186 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2192 rte_free(session->auth_key.data);
2198 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2199 struct rte_crypto_sym_xform *xform,
2200 dpaa2_sec_session *session)
2202 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2203 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2204 struct alginfo aeaddata;
2206 struct ctxt_priv *priv;
2207 struct sec_flow_context *flc;
2208 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2211 PMD_INIT_FUNC_TRACE();
2213 /* Set IV parameters */
2214 session->iv.offset = aead_xform->iv.offset;
2215 session->iv.length = aead_xform->iv.length;
2216 session->ctxt_type = DPAA2_SEC_AEAD;
2218 /* For SEC AEAD only one descriptor is required */
2219 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2220 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2221 RTE_CACHE_LINE_SIZE);
2223 DPAA2_SEC_ERR("No Memory for priv CTXT");
2227 priv->fle_pool = dev_priv->fle_pool;
2228 flc = &priv->flc_desc[0].flc;
2230 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2231 RTE_CACHE_LINE_SIZE);
2232 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2233 DPAA2_SEC_ERR("No Memory for aead key");
2237 memcpy(session->aead_key.data, aead_xform->key.data,
2238 aead_xform->key.length);
2240 session->digest_length = aead_xform->digest_length;
2241 session->aead_key.length = aead_xform->key.length;
2242 ctxt->auth_only_len = aead_xform->aad_length;
2244 aeaddata.key = (size_t)session->aead_key.data;
2245 aeaddata.keylen = session->aead_key.length;
2246 aeaddata.key_enc_flags = 0;
2247 aeaddata.key_type = RTA_DATA_IMM;
2249 switch (aead_xform->algo) {
2250 case RTE_CRYPTO_AEAD_AES_GCM:
2251 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2252 aeaddata.algmode = OP_ALG_AAI_GCM;
2253 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2255 case RTE_CRYPTO_AEAD_AES_CCM:
2256 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2261 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2266 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2269 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2270 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2272 (unsigned int *)priv->flc_desc[0].desc,
2273 &priv->flc_desc[0].desc[1], 1);
2276 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2280 if (priv->flc_desc[0].desc[1] & 1) {
2281 aeaddata.key_type = RTA_DATA_IMM;
2283 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2284 aeaddata.key_type = RTA_DATA_PTR;
2286 priv->flc_desc[0].desc[0] = 0;
2287 priv->flc_desc[0].desc[1] = 0;
2289 if (session->dir == DIR_ENC)
2290 bufsize = cnstr_shdsc_gcm_encap(
2291 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2292 &aeaddata, session->iv.length,
2293 session->digest_length);
2295 bufsize = cnstr_shdsc_gcm_decap(
2296 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2297 &aeaddata, session->iv.length,
2298 session->digest_length);
2300 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2305 flc->word1_sdl = (uint8_t)bufsize;
2306 session->ctxt = priv;
2307 #ifdef CAAM_DESC_DEBUG
2309 for (i = 0; i < bufsize; i++)
2310 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2311 i, priv->flc_desc[0].desc[i]);
2316 rte_free(session->aead_key.data);
2323 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2324 struct rte_crypto_sym_xform *xform,
2325 dpaa2_sec_session *session)
2327 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2328 struct alginfo authdata, cipherdata;
2330 struct ctxt_priv *priv;
2331 struct sec_flow_context *flc;
2332 struct rte_crypto_cipher_xform *cipher_xform;
2333 struct rte_crypto_auth_xform *auth_xform;
2336 PMD_INIT_FUNC_TRACE();
2338 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2339 cipher_xform = &xform->cipher;
2340 auth_xform = &xform->next->auth;
2341 session->ctxt_type =
2342 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2343 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2345 cipher_xform = &xform->next->cipher;
2346 auth_xform = &xform->auth;
2347 session->ctxt_type =
2348 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2349 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2352 /* Set IV parameters */
2353 session->iv.offset = cipher_xform->iv.offset;
2354 session->iv.length = cipher_xform->iv.length;
2356 /* For SEC AEAD only one descriptor is required */
2357 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2358 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2359 RTE_CACHE_LINE_SIZE);
2361 DPAA2_SEC_ERR("No Memory for priv CTXT");
2365 priv->fle_pool = dev_priv->fle_pool;
2366 flc = &priv->flc_desc[0].flc;
2368 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2369 RTE_CACHE_LINE_SIZE);
2370 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2371 DPAA2_SEC_ERR("No Memory for cipher key");
2375 session->cipher_key.length = cipher_xform->key.length;
2376 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2377 RTE_CACHE_LINE_SIZE);
2378 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2379 DPAA2_SEC_ERR("No Memory for auth key");
2380 rte_free(session->cipher_key.data);
2384 session->auth_key.length = auth_xform->key.length;
2385 memcpy(session->cipher_key.data, cipher_xform->key.data,
2386 cipher_xform->key.length);
2387 memcpy(session->auth_key.data, auth_xform->key.data,
2388 auth_xform->key.length);
2390 authdata.key = (size_t)session->auth_key.data;
2391 authdata.keylen = session->auth_key.length;
2392 authdata.key_enc_flags = 0;
2393 authdata.key_type = RTA_DATA_IMM;
2395 session->digest_length = auth_xform->digest_length;
2397 switch (auth_xform->algo) {
2398 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2399 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2400 authdata.algmode = OP_ALG_AAI_HMAC;
2401 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2403 case RTE_CRYPTO_AUTH_MD5_HMAC:
2404 authdata.algtype = OP_ALG_ALGSEL_MD5;
2405 authdata.algmode = OP_ALG_AAI_HMAC;
2406 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2408 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2409 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2410 authdata.algmode = OP_ALG_AAI_HMAC;
2411 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2413 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2414 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2415 authdata.algmode = OP_ALG_AAI_HMAC;
2416 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2418 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2419 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2420 authdata.algmode = OP_ALG_AAI_HMAC;
2421 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2423 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2424 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2425 authdata.algmode = OP_ALG_AAI_HMAC;
2426 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2428 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2429 authdata.algtype = OP_ALG_ALGSEL_AES;
2430 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2431 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2433 case RTE_CRYPTO_AUTH_AES_CMAC:
2434 authdata.algtype = OP_ALG_ALGSEL_AES;
2435 authdata.algmode = OP_ALG_AAI_CMAC;
2436 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2438 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2439 case RTE_CRYPTO_AUTH_AES_GMAC:
2440 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2441 case RTE_CRYPTO_AUTH_NULL:
2442 case RTE_CRYPTO_AUTH_SHA1:
2443 case RTE_CRYPTO_AUTH_SHA256:
2444 case RTE_CRYPTO_AUTH_SHA512:
2445 case RTE_CRYPTO_AUTH_SHA224:
2446 case RTE_CRYPTO_AUTH_SHA384:
2447 case RTE_CRYPTO_AUTH_MD5:
2448 case RTE_CRYPTO_AUTH_KASUMI_F9:
2449 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2450 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2455 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2460 cipherdata.key = (size_t)session->cipher_key.data;
2461 cipherdata.keylen = session->cipher_key.length;
2462 cipherdata.key_enc_flags = 0;
2463 cipherdata.key_type = RTA_DATA_IMM;
2465 switch (cipher_xform->algo) {
2466 case RTE_CRYPTO_CIPHER_AES_CBC:
2467 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2468 cipherdata.algmode = OP_ALG_AAI_CBC;
2469 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2471 case RTE_CRYPTO_CIPHER_3DES_CBC:
2472 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2473 cipherdata.algmode = OP_ALG_AAI_CBC;
2474 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2476 case RTE_CRYPTO_CIPHER_DES_CBC:
2477 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2478 cipherdata.algmode = OP_ALG_AAI_CBC;
2479 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2481 case RTE_CRYPTO_CIPHER_AES_CTR:
2482 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2483 cipherdata.algmode = OP_ALG_AAI_CTR;
2484 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2486 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2487 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2488 case RTE_CRYPTO_CIPHER_NULL:
2489 case RTE_CRYPTO_CIPHER_3DES_ECB:
2490 case RTE_CRYPTO_CIPHER_3DES_CTR:
2491 case RTE_CRYPTO_CIPHER_AES_ECB:
2492 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2493 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2494 cipher_xform->algo);
2498 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2499 cipher_xform->algo);
2503 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2506 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2507 priv->flc_desc[0].desc[1] = authdata.keylen;
2508 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2510 (unsigned int *)priv->flc_desc[0].desc,
2511 &priv->flc_desc[0].desc[2], 2);
2514 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2518 if (priv->flc_desc[0].desc[2] & 1) {
2519 cipherdata.key_type = RTA_DATA_IMM;
2521 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2522 cipherdata.key_type = RTA_DATA_PTR;
2524 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2525 authdata.key_type = RTA_DATA_IMM;
2527 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2528 authdata.key_type = RTA_DATA_PTR;
2530 priv->flc_desc[0].desc[0] = 0;
2531 priv->flc_desc[0].desc[1] = 0;
2532 priv->flc_desc[0].desc[2] = 0;
2534 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2535 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2537 &cipherdata, &authdata,
2539 session->digest_length,
2542 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2547 DPAA2_SEC_ERR("Hash before cipher not supported");
2552 flc->word1_sdl = (uint8_t)bufsize;
2553 session->ctxt = priv;
2554 #ifdef CAAM_DESC_DEBUG
2556 for (i = 0; i < bufsize; i++)
2557 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2558 i, priv->flc_desc[0].desc[i]);
2564 rte_free(session->cipher_key.data);
2565 rte_free(session->auth_key.data);
2571 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2572 struct rte_crypto_sym_xform *xform, void *sess)
2574 dpaa2_sec_session *session = sess;
2577 PMD_INIT_FUNC_TRACE();
2579 if (unlikely(sess == NULL)) {
2580 DPAA2_SEC_ERR("Invalid session struct");
2584 memset(session, 0, sizeof(dpaa2_sec_session));
2585 /* Default IV length = 0 */
2586 session->iv.length = 0;
2589 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2590 ret = dpaa2_sec_cipher_init(dev, xform, session);
2592 /* Authentication Only */
2593 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2594 xform->next == NULL) {
2595 ret = dpaa2_sec_auth_init(dev, xform, session);
2597 /* Cipher then Authenticate */
2598 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2599 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2600 session->ext_params.aead_ctxt.auth_cipher_text = true;
2601 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2602 ret = dpaa2_sec_auth_init(dev, xform, session);
2603 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2604 ret = dpaa2_sec_cipher_init(dev, xform, session);
2606 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2607 /* Authenticate then Cipher */
2608 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2609 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2610 session->ext_params.aead_ctxt.auth_cipher_text = false;
2611 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2612 ret = dpaa2_sec_cipher_init(dev, xform, session);
2613 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2614 ret = dpaa2_sec_auth_init(dev, xform, session);
2616 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2617 /* AEAD operation for AES-GCM kind of Algorithms */
2618 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2619 xform->next == NULL) {
2620 ret = dpaa2_sec_aead_init(dev, xform, session);
2623 DPAA2_SEC_ERR("Invalid crypto type");
2630 #ifdef RTE_LIB_SECURITY
2632 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2633 dpaa2_sec_session *session,
2634 struct alginfo *aeaddata)
2636 PMD_INIT_FUNC_TRACE();
2638 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2639 RTE_CACHE_LINE_SIZE);
2640 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2641 DPAA2_SEC_ERR("No Memory for aead key");
2644 memcpy(session->aead_key.data, aead_xform->key.data,
2645 aead_xform->key.length);
2647 session->digest_length = aead_xform->digest_length;
2648 session->aead_key.length = aead_xform->key.length;
2650 aeaddata->key = (size_t)session->aead_key.data;
2651 aeaddata->keylen = session->aead_key.length;
2652 aeaddata->key_enc_flags = 0;
2653 aeaddata->key_type = RTA_DATA_IMM;
2655 switch (aead_xform->algo) {
2656 case RTE_CRYPTO_AEAD_AES_GCM:
2657 switch (session->digest_length) {
2659 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2662 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2665 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2668 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2669 session->digest_length);
2672 aeaddata->algmode = OP_ALG_AAI_GCM;
2673 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2675 case RTE_CRYPTO_AEAD_AES_CCM:
2676 switch (session->digest_length) {
2678 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2681 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2684 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2687 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2688 session->digest_length);
2691 aeaddata->algmode = OP_ALG_AAI_CCM;
2692 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2695 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2699 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2706 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2707 struct rte_crypto_auth_xform *auth_xform,
2708 dpaa2_sec_session *session,
2709 struct alginfo *cipherdata,
2710 struct alginfo *authdata)
2713 session->cipher_key.data = rte_zmalloc(NULL,
2714 cipher_xform->key.length,
2715 RTE_CACHE_LINE_SIZE);
2716 if (session->cipher_key.data == NULL &&
2717 cipher_xform->key.length > 0) {
2718 DPAA2_SEC_ERR("No Memory for cipher key");
2722 session->cipher_key.length = cipher_xform->key.length;
2723 memcpy(session->cipher_key.data, cipher_xform->key.data,
2724 cipher_xform->key.length);
2725 session->cipher_alg = cipher_xform->algo;
2727 session->cipher_key.data = NULL;
2728 session->cipher_key.length = 0;
2729 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2733 session->auth_key.data = rte_zmalloc(NULL,
2734 auth_xform->key.length,
2735 RTE_CACHE_LINE_SIZE);
2736 if (session->auth_key.data == NULL &&
2737 auth_xform->key.length > 0) {
2738 DPAA2_SEC_ERR("No Memory for auth key");
2741 session->auth_key.length = auth_xform->key.length;
2742 memcpy(session->auth_key.data, auth_xform->key.data,
2743 auth_xform->key.length);
2744 session->auth_alg = auth_xform->algo;
2745 session->digest_length = auth_xform->digest_length;
2747 session->auth_key.data = NULL;
2748 session->auth_key.length = 0;
2749 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2752 authdata->key = (size_t)session->auth_key.data;
2753 authdata->keylen = session->auth_key.length;
2754 authdata->key_enc_flags = 0;
2755 authdata->key_type = RTA_DATA_IMM;
2756 switch (session->auth_alg) {
2757 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2758 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2759 authdata->algmode = OP_ALG_AAI_HMAC;
2761 case RTE_CRYPTO_AUTH_MD5_HMAC:
2762 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2763 authdata->algmode = OP_ALG_AAI_HMAC;
2765 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2766 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2767 authdata->algmode = OP_ALG_AAI_HMAC;
2768 if (session->digest_length != 16)
2770 "+++Using sha256-hmac truncated len is non-standard,"
2771 "it will not work with lookaside proto");
2773 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2774 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2775 authdata->algmode = OP_ALG_AAI_HMAC;
2777 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2778 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2779 authdata->algmode = OP_ALG_AAI_HMAC;
2781 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2782 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2783 authdata->algmode = OP_ALG_AAI_XCBC_MAC;
2785 case RTE_CRYPTO_AUTH_AES_CMAC:
2786 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2787 authdata->algmode = OP_ALG_AAI_CMAC;
2789 case RTE_CRYPTO_AUTH_NULL:
2790 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2792 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2793 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2794 case RTE_CRYPTO_AUTH_SHA1:
2795 case RTE_CRYPTO_AUTH_SHA256:
2796 case RTE_CRYPTO_AUTH_SHA512:
2797 case RTE_CRYPTO_AUTH_SHA224:
2798 case RTE_CRYPTO_AUTH_SHA384:
2799 case RTE_CRYPTO_AUTH_MD5:
2800 case RTE_CRYPTO_AUTH_AES_GMAC:
2801 case RTE_CRYPTO_AUTH_KASUMI_F9:
2802 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2803 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2804 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2808 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2812 cipherdata->key = (size_t)session->cipher_key.data;
2813 cipherdata->keylen = session->cipher_key.length;
2814 cipherdata->key_enc_flags = 0;
2815 cipherdata->key_type = RTA_DATA_IMM;
2817 switch (session->cipher_alg) {
2818 case RTE_CRYPTO_CIPHER_AES_CBC:
2819 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2820 cipherdata->algmode = OP_ALG_AAI_CBC;
2822 case RTE_CRYPTO_CIPHER_3DES_CBC:
2823 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2824 cipherdata->algmode = OP_ALG_AAI_CBC;
2826 case RTE_CRYPTO_CIPHER_DES_CBC:
2827 cipherdata->algtype = OP_PCL_IPSEC_DES;
2828 cipherdata->algmode = OP_ALG_AAI_CBC;
2830 case RTE_CRYPTO_CIPHER_AES_CTR:
2831 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2832 cipherdata->algmode = OP_ALG_AAI_CTR;
2834 case RTE_CRYPTO_CIPHER_NULL:
2835 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2837 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2838 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2839 case RTE_CRYPTO_CIPHER_3DES_ECB:
2840 case RTE_CRYPTO_CIPHER_3DES_CTR:
2841 case RTE_CRYPTO_CIPHER_AES_ECB:
2842 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2843 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2844 session->cipher_alg);
2847 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2848 session->cipher_alg);
2856 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2857 struct rte_security_session_conf *conf,
2860 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2861 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2862 struct rte_crypto_auth_xform *auth_xform = NULL;
2863 struct rte_crypto_aead_xform *aead_xform = NULL;
2864 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2865 struct ctxt_priv *priv;
2866 struct alginfo authdata, cipherdata;
2868 struct sec_flow_context *flc;
2869 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2872 PMD_INIT_FUNC_TRACE();
2874 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2875 sizeof(struct ctxt_priv) +
2876 sizeof(struct sec_flc_desc),
2877 RTE_CACHE_LINE_SIZE);
2880 DPAA2_SEC_ERR("No memory for priv CTXT");
2884 priv->fle_pool = dev_priv->fle_pool;
2885 flc = &priv->flc_desc[0].flc;
2887 memset(session, 0, sizeof(dpaa2_sec_session));
2889 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2890 cipher_xform = &conf->crypto_xform->cipher;
2891 if (conf->crypto_xform->next)
2892 auth_xform = &conf->crypto_xform->next->auth;
2893 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2894 session, &cipherdata, &authdata);
2895 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2896 auth_xform = &conf->crypto_xform->auth;
2897 if (conf->crypto_xform->next)
2898 cipher_xform = &conf->crypto_xform->next->cipher;
2899 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2900 session, &cipherdata, &authdata);
2901 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2902 aead_xform = &conf->crypto_xform->aead;
2903 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2904 session, &cipherdata);
2905 authdata.keylen = 0;
2906 authdata.algtype = 0;
2908 DPAA2_SEC_ERR("XFORM not specified");
2913 DPAA2_SEC_ERR("Failed to process xform");
2917 session->ctxt_type = DPAA2_SEC_IPSEC;
2918 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2919 uint8_t *hdr = NULL;
2921 struct rte_ipv6_hdr ip6_hdr;
2922 struct ipsec_encap_pdb encap_pdb;
2924 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2925 /* For Sec Proto only one descriptor is required. */
2926 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2928 /* copy algo specific data to PDB */
2929 switch (cipherdata.algtype) {
2930 case OP_PCL_IPSEC_AES_CTR:
2931 encap_pdb.ctr.ctr_initial = 0x00000001;
2932 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2934 case OP_PCL_IPSEC_AES_GCM8:
2935 case OP_PCL_IPSEC_AES_GCM12:
2936 case OP_PCL_IPSEC_AES_GCM16:
2937 memcpy(encap_pdb.gcm.salt,
2938 (uint8_t *)&(ipsec_xform->salt), 4);
2942 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2943 PDBOPTS_ESP_OIHI_PDB_INL |
2945 PDBHMO_ESP_ENCAP_DTTL |
2947 if (ipsec_xform->options.esn)
2948 encap_pdb.options |= PDBOPTS_ESP_ESN;
2949 encap_pdb.spi = ipsec_xform->spi;
2950 session->dir = DIR_ENC;
2951 if (ipsec_xform->tunnel.type ==
2952 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2953 encap_pdb.ip_hdr_len = sizeof(struct ip);
2954 ip4_hdr.ip_v = IPVERSION;
2956 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2957 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2960 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2961 ip4_hdr.ip_p = IPPROTO_ESP;
2963 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2964 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2965 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2966 &ip4_hdr, sizeof(struct ip));
2967 hdr = (uint8_t *)&ip4_hdr;
2968 } else if (ipsec_xform->tunnel.type ==
2969 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2970 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2971 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2972 ((ipsec_xform->tunnel.ipv6.dscp <<
2973 RTE_IPV6_HDR_TC_SHIFT) &
2974 RTE_IPV6_HDR_TC_MASK) |
2975 ((ipsec_xform->tunnel.ipv6.flabel <<
2976 RTE_IPV6_HDR_FL_SHIFT) &
2977 RTE_IPV6_HDR_FL_MASK));
2978 /* Payload length will be updated by HW */
2979 ip6_hdr.payload_len = 0;
2980 ip6_hdr.hop_limits =
2981 ipsec_xform->tunnel.ipv6.hlimit;
2982 ip6_hdr.proto = (ipsec_xform->proto ==
2983 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2984 IPPROTO_ESP : IPPROTO_AH;
2985 memcpy(&ip6_hdr.src_addr,
2986 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2987 memcpy(&ip6_hdr.dst_addr,
2988 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2989 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2990 hdr = (uint8_t *)&ip6_hdr;
2993 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2994 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2995 SHR_WAIT : SHR_SERIAL, &encap_pdb,
2996 hdr, &cipherdata, &authdata);
2997 } else if (ipsec_xform->direction ==
2998 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2999 struct ipsec_decap_pdb decap_pdb;
3001 flc->dhr = SEC_FLC_DHR_INBOUND;
3002 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3003 /* copy algo specific data to PDB */
3004 switch (cipherdata.algtype) {
3005 case OP_PCL_IPSEC_AES_CTR:
3006 decap_pdb.ctr.ctr_initial = 0x00000001;
3007 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3009 case OP_PCL_IPSEC_AES_GCM8:
3010 case OP_PCL_IPSEC_AES_GCM12:
3011 case OP_PCL_IPSEC_AES_GCM16:
3012 memcpy(decap_pdb.gcm.salt,
3013 (uint8_t *)&(ipsec_xform->salt), 4);
3017 decap_pdb.options = (ipsec_xform->tunnel.type ==
3018 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3019 sizeof(struct ip) << 16 :
3020 sizeof(struct rte_ipv6_hdr) << 16;
3021 if (ipsec_xform->options.esn)
3022 decap_pdb.options |= PDBOPTS_ESP_ESN;
3024 if (ipsec_xform->replay_win_sz) {
3026 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3028 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3029 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3039 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3042 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3045 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3048 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3051 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3055 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3058 session->dir = DIR_DEC;
3059 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3060 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3061 SHR_WAIT : SHR_SERIAL,
3062 &decap_pdb, &cipherdata, &authdata);
3067 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3071 flc->word1_sdl = (uint8_t)bufsize;
3073 /* Enable the stashing control bit */
3074 DPAA2_SET_FLC_RSC(flc);
3075 flc->word2_rflc_31_0 = lower_32_bits(
3076 (size_t)&(((struct dpaa2_sec_qp *)
3077 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3078 flc->word3_rflc_63_32 = upper_32_bits(
3079 (size_t)&(((struct dpaa2_sec_qp *)
3080 dev->data->queue_pairs[0])->rx_vq));
3082 /* Set EWS bit i.e. enable write-safe */
3083 DPAA2_SET_FLC_EWS(flc);
3084 /* Set BS = 1 i.e reuse input buffers as output buffers */
3085 DPAA2_SET_FLC_REUSE_BS(flc);
3086 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3087 DPAA2_SET_FLC_REUSE_FF(flc);
3089 session->ctxt = priv;
3093 rte_free(session->auth_key.data);
3094 rte_free(session->cipher_key.data);
3100 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3101 struct rte_security_session_conf *conf,
3104 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3105 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3106 struct rte_crypto_auth_xform *auth_xform = NULL;
3107 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3108 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3109 struct ctxt_priv *priv;
3110 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3111 struct alginfo authdata, cipherdata;
3112 struct alginfo *p_authdata = NULL;
3114 struct sec_flow_context *flc;
3115 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3121 PMD_INIT_FUNC_TRACE();
3123 memset(session, 0, sizeof(dpaa2_sec_session));
3125 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3126 sizeof(struct ctxt_priv) +
3127 sizeof(struct sec_flc_desc),
3128 RTE_CACHE_LINE_SIZE);
3131 DPAA2_SEC_ERR("No memory for priv CTXT");
3135 priv->fle_pool = dev_priv->fle_pool;
3136 flc = &priv->flc_desc[0].flc;
3138 /* find xfrm types */
3139 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3140 cipher_xform = &xform->cipher;
3141 if (xform->next != NULL) {
3142 session->ext_params.aead_ctxt.auth_cipher_text = true;
3143 auth_xform = &xform->next->auth;
3145 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3146 auth_xform = &xform->auth;
3147 if (xform->next != NULL) {
3148 session->ext_params.aead_ctxt.auth_cipher_text = false;
3149 cipher_xform = &xform->next->cipher;
3152 DPAA2_SEC_ERR("Invalid crypto type");
3156 session->ctxt_type = DPAA2_SEC_PDCP;
3158 session->cipher_key.data = rte_zmalloc(NULL,
3159 cipher_xform->key.length,
3160 RTE_CACHE_LINE_SIZE);
3161 if (session->cipher_key.data == NULL &&
3162 cipher_xform->key.length > 0) {
3163 DPAA2_SEC_ERR("No Memory for cipher key");
3167 session->cipher_key.length = cipher_xform->key.length;
3168 memcpy(session->cipher_key.data, cipher_xform->key.data,
3169 cipher_xform->key.length);
3171 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3173 session->cipher_alg = cipher_xform->algo;
3175 session->cipher_key.data = NULL;
3176 session->cipher_key.length = 0;
3177 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3178 session->dir = DIR_ENC;
3181 session->pdcp.domain = pdcp_xform->domain;
3182 session->pdcp.bearer = pdcp_xform->bearer;
3183 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3184 session->pdcp.sn_size = pdcp_xform->sn_size;
3185 session->pdcp.hfn = pdcp_xform->hfn;
3186 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3187 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3188 /* hfv ovd offset location is stored in iv.offset value*/
3190 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3192 cipherdata.key = (size_t)session->cipher_key.data;
3193 cipherdata.keylen = session->cipher_key.length;
3194 cipherdata.key_enc_flags = 0;
3195 cipherdata.key_type = RTA_DATA_IMM;
3197 switch (session->cipher_alg) {
3198 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3199 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3201 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3202 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3204 case RTE_CRYPTO_CIPHER_AES_CTR:
3205 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3207 case RTE_CRYPTO_CIPHER_NULL:
3208 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3211 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3212 session->cipher_alg);
3217 session->auth_key.data = rte_zmalloc(NULL,
3218 auth_xform->key.length,
3219 RTE_CACHE_LINE_SIZE);
3220 if (!session->auth_key.data &&
3221 auth_xform->key.length > 0) {
3222 DPAA2_SEC_ERR("No Memory for auth key");
3223 rte_free(session->cipher_key.data);
3227 session->auth_key.length = auth_xform->key.length;
3228 memcpy(session->auth_key.data, auth_xform->key.data,
3229 auth_xform->key.length);
3230 session->auth_alg = auth_xform->algo;
3232 session->auth_key.data = NULL;
3233 session->auth_key.length = 0;
3234 session->auth_alg = 0;
3236 authdata.key = (size_t)session->auth_key.data;
3237 authdata.keylen = session->auth_key.length;
3238 authdata.key_enc_flags = 0;
3239 authdata.key_type = RTA_DATA_IMM;
3241 if (session->auth_alg) {
3242 switch (session->auth_alg) {
3243 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3244 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3246 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3247 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3249 case RTE_CRYPTO_AUTH_AES_CMAC:
3250 authdata.algtype = PDCP_AUTH_TYPE_AES;
3252 case RTE_CRYPTO_AUTH_NULL:
3253 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3256 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3261 p_authdata = &authdata;
3262 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3263 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3267 if (pdcp_xform->sdap_enabled) {
3268 int nb_keys_to_inline =
3269 rta_inline_pdcp_sdap_query(authdata.algtype,
3271 session->pdcp.sn_size,
3272 session->pdcp.hfn_ovd);
3273 if (nb_keys_to_inline >= 1) {
3274 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3275 cipherdata.key_type = RTA_DATA_PTR;
3277 if (nb_keys_to_inline >= 2) {
3278 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3279 authdata.key_type = RTA_DATA_PTR;
3282 if (rta_inline_pdcp_query(authdata.algtype,
3284 session->pdcp.sn_size,
3285 session->pdcp.hfn_ovd)) {
3286 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3287 cipherdata.key_type = RTA_DATA_PTR;
3291 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3292 if (session->dir == DIR_ENC)
3293 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3294 priv->flc_desc[0].desc, 1, swap,
3296 session->pdcp.sn_size,
3298 pdcp_xform->pkt_dir,
3299 pdcp_xform->hfn_threshold,
3300 &cipherdata, &authdata,
3302 else if (session->dir == DIR_DEC)
3303 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3304 priv->flc_desc[0].desc, 1, swap,
3306 session->pdcp.sn_size,
3308 pdcp_xform->pkt_dir,
3309 pdcp_xform->hfn_threshold,
3310 &cipherdata, &authdata,
3313 if (session->dir == DIR_ENC) {
3314 if (pdcp_xform->sdap_enabled)
3315 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3316 priv->flc_desc[0].desc, 1, swap,
3317 session->pdcp.sn_size,
3320 pdcp_xform->pkt_dir,
3321 pdcp_xform->hfn_threshold,
3322 &cipherdata, p_authdata, 0);
3324 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3325 priv->flc_desc[0].desc, 1, swap,
3326 session->pdcp.sn_size,
3329 pdcp_xform->pkt_dir,
3330 pdcp_xform->hfn_threshold,
3331 &cipherdata, p_authdata, 0);
3332 } else if (session->dir == DIR_DEC) {
3333 if (pdcp_xform->sdap_enabled)
3334 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3335 priv->flc_desc[0].desc, 1, swap,
3336 session->pdcp.sn_size,
3339 pdcp_xform->pkt_dir,
3340 pdcp_xform->hfn_threshold,
3341 &cipherdata, p_authdata, 0);
3343 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3344 priv->flc_desc[0].desc, 1, swap,
3345 session->pdcp.sn_size,
3348 pdcp_xform->pkt_dir,
3349 pdcp_xform->hfn_threshold,
3350 &cipherdata, p_authdata, 0);
3355 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3359 /* Enable the stashing control bit */
3360 DPAA2_SET_FLC_RSC(flc);
3361 flc->word2_rflc_31_0 = lower_32_bits(
3362 (size_t)&(((struct dpaa2_sec_qp *)
3363 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3364 flc->word3_rflc_63_32 = upper_32_bits(
3365 (size_t)&(((struct dpaa2_sec_qp *)
3366 dev->data->queue_pairs[0])->rx_vq));
3368 flc->word1_sdl = (uint8_t)bufsize;
3370 /* TODO - check the perf impact or
3371 * align as per descriptor type
3372 * Set EWS bit i.e. enable write-safe
3373 * DPAA2_SET_FLC_EWS(flc);
3376 /* Set BS = 1 i.e reuse input buffers as output buffers */
3377 DPAA2_SET_FLC_REUSE_BS(flc);
3378 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3379 DPAA2_SET_FLC_REUSE_FF(flc);
3381 session->ctxt = priv;
3385 rte_free(session->auth_key.data);
3386 rte_free(session->cipher_key.data);
3392 dpaa2_sec_security_session_create(void *dev,
3393 struct rte_security_session_conf *conf,
3394 struct rte_security_session *sess,
3395 struct rte_mempool *mempool)
3397 void *sess_private_data;
3398 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3401 if (rte_mempool_get(mempool, &sess_private_data)) {
3402 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3406 switch (conf->protocol) {
3407 case RTE_SECURITY_PROTOCOL_IPSEC:
3408 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3411 case RTE_SECURITY_PROTOCOL_MACSEC:
3413 case RTE_SECURITY_PROTOCOL_PDCP:
3414 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3421 DPAA2_SEC_ERR("Failed to configure session parameters");
3422 /* Return session to mempool */
3423 rte_mempool_put(mempool, sess_private_data);
3427 set_sec_session_private_data(sess, sess_private_data);
3432 /** Clear the memory of session so it doesn't leave key material behind */
3434 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3435 struct rte_security_session *sess)
3437 PMD_INIT_FUNC_TRACE();
3438 void *sess_priv = get_sec_session_private_data(sess);
3440 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3443 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3446 rte_free(s->cipher_key.data);
3447 rte_free(s->auth_key.data);
3448 memset(s, 0, sizeof(dpaa2_sec_session));
3449 set_sec_session_private_data(sess, NULL);
3450 rte_mempool_put(sess_mp, sess_priv);
3456 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3457 struct rte_crypto_sym_xform *xform,
3458 struct rte_cryptodev_sym_session *sess,
3459 struct rte_mempool *mempool)
3461 void *sess_private_data;
3464 if (rte_mempool_get(mempool, &sess_private_data)) {
3465 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3469 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3471 DPAA2_SEC_ERR("Failed to configure session parameters");
3472 /* Return session to mempool */
3473 rte_mempool_put(mempool, sess_private_data);
3477 set_sym_session_private_data(sess, dev->driver_id,
3483 /** Clear the memory of session so it doesn't leave key material behind */
3485 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3486 struct rte_cryptodev_sym_session *sess)
3488 PMD_INIT_FUNC_TRACE();
3489 uint8_t index = dev->driver_id;
3490 void *sess_priv = get_sym_session_private_data(sess, index);
3491 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3495 rte_free(s->cipher_key.data);
3496 rte_free(s->auth_key.data);
3497 memset(s, 0, sizeof(dpaa2_sec_session));
3498 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3499 set_sym_session_private_data(sess, index, NULL);
3500 rte_mempool_put(sess_mp, sess_priv);
3505 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3506 struct rte_cryptodev_config *config __rte_unused)
3508 PMD_INIT_FUNC_TRACE();
3514 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3516 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3517 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3518 struct dpseci_attr attr;
3519 struct dpaa2_queue *dpaa2_q;
3520 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3521 dev->data->queue_pairs;
3522 struct dpseci_rx_queue_attr rx_attr;
3523 struct dpseci_tx_queue_attr tx_attr;
3526 PMD_INIT_FUNC_TRACE();
3528 memset(&attr, 0, sizeof(struct dpseci_attr));
3530 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3532 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3534 goto get_attr_failure;
3536 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3538 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3539 goto get_attr_failure;
3541 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3542 dpaa2_q = &qp[i]->rx_vq;
3543 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3545 dpaa2_q->fqid = rx_attr.fqid;
3546 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3548 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3549 dpaa2_q = &qp[i]->tx_vq;
3550 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3552 dpaa2_q->fqid = tx_attr.fqid;
3553 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3558 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3563 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3565 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3566 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3569 PMD_INIT_FUNC_TRACE();
3571 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3573 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3578 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3580 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3586 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3588 PMD_INIT_FUNC_TRACE();
3594 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3595 struct rte_cryptodev_info *info)
3597 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3599 PMD_INIT_FUNC_TRACE();
3601 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3602 info->feature_flags = dev->feature_flags;
3603 info->capabilities = dpaa2_sec_capabilities;
3604 /* No limit of number of sessions */
3605 info->sym.max_nb_sessions = 0;
3606 info->driver_id = cryptodev_driver_id;
3611 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3612 struct rte_cryptodev_stats *stats)
3614 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3615 struct fsl_mc_io dpseci;
3616 struct dpseci_sec_counters counters = {0};
3617 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3618 dev->data->queue_pairs;
3621 PMD_INIT_FUNC_TRACE();
3622 if (stats == NULL) {
3623 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3626 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3627 if (qp == NULL || qp[i] == NULL) {
3628 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3632 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3633 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3634 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3635 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3638 /* In case as secondary process access stats, MCP portal in priv-hw
3639 * may have primary process address. Need the secondary process
3640 * based MCP portal address for this object.
3642 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3643 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3646 DPAA2_SEC_ERR("SEC counters failed");
3648 DPAA2_SEC_INFO("dpseci hardware stats:"
3649 "\n\tNum of Requests Dequeued = %" PRIu64
3650 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3651 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3652 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3653 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3654 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3655 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3656 counters.dequeued_requests,
3657 counters.ob_enc_requests,
3658 counters.ib_dec_requests,
3659 counters.ob_enc_bytes,
3660 counters.ob_prot_bytes,
3661 counters.ib_dec_bytes,
3662 counters.ib_valid_bytes);
3667 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3670 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3671 (dev->data->queue_pairs);
3673 PMD_INIT_FUNC_TRACE();
3675 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3676 if (qp[i] == NULL) {
3677 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3680 qp[i]->tx_vq.rx_pkts = 0;
3681 qp[i]->tx_vq.tx_pkts = 0;
3682 qp[i]->tx_vq.err_pkts = 0;
3683 qp[i]->rx_vq.rx_pkts = 0;
3684 qp[i]->rx_vq.tx_pkts = 0;
3685 qp[i]->rx_vq.err_pkts = 0;
3689 static void __rte_hot
3690 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3691 const struct qbman_fd *fd,
3692 const struct qbman_result *dq,
3693 struct dpaa2_queue *rxq,
3694 struct rte_event *ev)
3696 /* Prefetching mbuf */
3697 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3698 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3700 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3701 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3703 ev->flow_id = rxq->ev.flow_id;
3704 ev->sub_event_type = rxq->ev.sub_event_type;
3705 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3706 ev->op = RTE_EVENT_OP_NEW;
3707 ev->sched_type = rxq->ev.sched_type;
3708 ev->queue_id = rxq->ev.queue_id;
3709 ev->priority = rxq->ev.priority;
3710 ev->event_ptr = sec_fd_to_mbuf(fd);
3712 qbman_swp_dqrr_consume(swp, dq);
3715 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3716 const struct qbman_fd *fd,
3717 const struct qbman_result *dq,
3718 struct dpaa2_queue *rxq,
3719 struct rte_event *ev)
3722 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3723 /* Prefetching mbuf */
3724 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3725 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3727 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3728 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3730 ev->flow_id = rxq->ev.flow_id;
3731 ev->sub_event_type = rxq->ev.sub_event_type;
3732 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3733 ev->op = RTE_EVENT_OP_NEW;
3734 ev->sched_type = rxq->ev.sched_type;
3735 ev->queue_id = rxq->ev.queue_id;
3736 ev->priority = rxq->ev.priority;
3738 ev->event_ptr = sec_fd_to_mbuf(fd);
3739 dqrr_index = qbman_get_dqrr_idx(dq);
3740 *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
3741 DPAA2_PER_LCORE_DQRR_SIZE++;
3742 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3743 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3747 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3749 struct dpaa2_dpcon_dev *dpcon,
3750 const struct rte_event *event)
3752 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3754 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3755 struct dpseci_rx_queue_cfg cfg;
3759 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3760 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3761 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3762 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3766 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3767 (dpcon->num_priorities - 1);
3769 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3770 cfg.options = DPSECI_QUEUE_OPT_DEST;
3771 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3772 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3773 cfg.dest_cfg.priority = priority;
3775 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3776 cfg.user_ctx = (size_t)(qp);
3777 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3778 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3779 cfg.order_preservation_en = 1;
3781 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3784 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3788 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3794 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3797 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3798 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3799 struct dpseci_rx_queue_cfg cfg;
3802 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3803 cfg.options = DPSECI_QUEUE_OPT_DEST;
3804 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3806 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3809 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3814 static struct rte_cryptodev_ops crypto_ops = {
3815 .dev_configure = dpaa2_sec_dev_configure,
3816 .dev_start = dpaa2_sec_dev_start,
3817 .dev_stop = dpaa2_sec_dev_stop,
3818 .dev_close = dpaa2_sec_dev_close,
3819 .dev_infos_get = dpaa2_sec_dev_infos_get,
3820 .stats_get = dpaa2_sec_stats_get,
3821 .stats_reset = dpaa2_sec_stats_reset,
3822 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3823 .queue_pair_release = dpaa2_sec_queue_pair_release,
3824 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3825 .sym_session_configure = dpaa2_sec_sym_session_configure,
3826 .sym_session_clear = dpaa2_sec_sym_session_clear,
3829 #ifdef RTE_LIB_SECURITY
3830 static const struct rte_security_capability *
3831 dpaa2_sec_capabilities_get(void *device __rte_unused)
3833 return dpaa2_sec_security_cap;
3836 static const struct rte_security_ops dpaa2_sec_security_ops = {
3837 .session_create = dpaa2_sec_security_session_create,
3838 .session_update = NULL,
3839 .session_stats_get = NULL,
3840 .session_destroy = dpaa2_sec_security_session_destroy,
3841 .set_pkt_metadata = NULL,
3842 .capabilities_get = dpaa2_sec_capabilities_get
3847 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3849 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3850 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3853 PMD_INIT_FUNC_TRACE();
3855 /* Function is reverse of dpaa2_sec_dev_init.
3856 * It does the following:
3857 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3858 * 2. Close the DPSECI device
3859 * 3. Free the allocated resources.
3862 /*Close the device at underlying layer*/
3863 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3865 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3869 /*Free the allocated memory for ethernet private data and dpseci*/
3872 rte_free(dev->security_ctx);
3873 rte_mempool_free(priv->fle_pool);
3875 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3876 dev->data->name, rte_socket_id());
3882 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3884 struct dpaa2_sec_dev_private *internals;
3885 struct rte_device *dev = cryptodev->device;
3886 struct rte_dpaa2_device *dpaa2_dev;
3887 #ifdef RTE_LIB_SECURITY
3888 struct rte_security_ctx *security_instance;
3890 struct fsl_mc_io *dpseci;
3892 struct dpseci_attr attr;
3896 PMD_INIT_FUNC_TRACE();
3897 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3898 hw_id = dpaa2_dev->object_id;
3900 cryptodev->driver_id = cryptodev_driver_id;
3901 cryptodev->dev_ops = &crypto_ops;
3903 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3904 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3905 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3906 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3907 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3908 RTE_CRYPTODEV_FF_SECURITY |
3909 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3910 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3911 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3912 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3913 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3915 internals = cryptodev->data->dev_private;
3918 * For secondary processes, we don't initialise any further as primary
3919 * has already done this work. Only check we don't need a different
3922 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3923 DPAA2_SEC_DEBUG("Device already init by primary process");
3926 #ifdef RTE_LIB_SECURITY
3927 /* Initialize security_ctx only for primary process*/
3928 security_instance = rte_malloc("rte_security_instances_ops",
3929 sizeof(struct rte_security_ctx), 0);
3930 if (security_instance == NULL)
3932 security_instance->device = (void *)cryptodev;
3933 security_instance->ops = &dpaa2_sec_security_ops;
3934 security_instance->sess_cnt = 0;
3935 cryptodev->security_ctx = security_instance;
3937 /*Open the rte device via MC and save the handle for further use*/
3938 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3939 sizeof(struct fsl_mc_io), 0);
3942 "Error in allocating the memory for dpsec object");
3945 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3947 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3949 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3953 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3956 "Cannot get dpsec device attributed: Error = %x",
3960 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3963 internals->max_nb_queue_pairs = attr.num_tx_queues;
3964 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3965 internals->hw = dpseci;
3966 internals->token = token;
3968 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3969 getpid(), cryptodev->data->dev_id);
3970 internals->fle_pool = rte_mempool_create((const char *)str,
3973 FLE_POOL_CACHE_SIZE, 0,
3974 NULL, NULL, NULL, NULL,
3976 if (!internals->fle_pool) {
3977 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3981 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3985 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3987 /* dpaa2_sec_uninit(crypto_dev_name); */
3992 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3993 struct rte_dpaa2_device *dpaa2_dev)
3995 struct rte_cryptodev *cryptodev;
3996 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4000 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4001 dpaa2_dev->object_id);
4003 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4004 if (cryptodev == NULL)
4007 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4008 cryptodev->data->dev_private = rte_zmalloc_socket(
4009 "cryptodev private structure",
4010 sizeof(struct dpaa2_sec_dev_private),
4011 RTE_CACHE_LINE_SIZE,
4014 if (cryptodev->data->dev_private == NULL)
4015 rte_panic("Cannot allocate memzone for private "
4019 dpaa2_dev->cryptodev = cryptodev;
4020 cryptodev->device = &dpaa2_dev->device;
4022 /* init user callbacks */
4023 TAILQ_INIT(&(cryptodev->link_intr_cbs));
4025 if (dpaa2_svr_family == SVR_LX2160A)
4026 rta_set_sec_era(RTA_SEC_ERA_10);
4028 rta_set_sec_era(RTA_SEC_ERA_8);
4030 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4032 /* Invoke PMD device initialization function */
4033 retval = dpaa2_sec_dev_init(cryptodev);
4037 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4038 rte_free(cryptodev->data->dev_private);
4040 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4046 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4048 struct rte_cryptodev *cryptodev;
4051 cryptodev = dpaa2_dev->cryptodev;
4052 if (cryptodev == NULL)
4055 ret = dpaa2_sec_uninit(cryptodev);
4059 return rte_cryptodev_pmd_destroy(cryptodev);
4062 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4063 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4064 .drv_type = DPAA2_CRYPTO,
4066 .name = "DPAA2 SEC PMD"
4068 .probe = cryptodev_dpaa2_sec_probe,
4069 .remove = cryptodev_dpaa2_sec_remove,
4072 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4074 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4075 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4076 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4077 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);