1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 #include <rte_hexdump.h>
33 #include "dpaa2_sec_priv.h"
34 #include "dpaa2_sec_event.h"
35 #include "dpaa2_sec_logs.h"
37 /* RTA header files */
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 #include <desc/algo.h>
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44 * a pointer to the shared descriptor
46 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID 0x1957
48 #define FSL_DEVICE_ID 0x410
49 #define FSL_SUBSYSTEM_SEC 1
50 #define FSL_MC_DPSECI_DEVID 3
54 #define DRIVER_DUMP_MODE "drv_dump_mode"
55 #define DRIVER_STRICT_ORDER "drv_strict_order"
57 /* DPAA2_SEC_DP_DUMP levels */
58 enum dpaa2_sec_dump_levels {
60 DPAA2_SEC_DP_ERR_DUMP,
61 DPAA2_SEC_DP_FULL_DUMP
64 uint8_t cryptodev_driver_id;
65 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
67 #ifdef RTE_LIB_SECURITY
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 struct rte_crypto_op *op,
71 struct qbman_fd *fd, uint16_t bpid)
73 struct rte_crypto_sym_op *sym_op = op->sym;
74 struct ctxt_priv *priv = sess->ctxt;
75 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 struct sec_flow_context *flc;
77 struct rte_mbuf *mbuf;
78 uint32_t in_len = 0, out_len = 0;
85 /* first FLE entry used to store mbuf and session ctxt */
86 fle = (struct qbman_fle *)rte_malloc(NULL,
87 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
90 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
93 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
97 /* Save the shared descriptor */
98 flc = &priv->flc_desc[0].flc;
104 if (likely(bpid < MAX_BPID)) {
105 DPAA2_SET_FD_BPID(fd, bpid);
106 DPAA2_SET_FLE_BPID(op_fle, bpid);
107 DPAA2_SET_FLE_BPID(ip_fle, bpid);
109 DPAA2_SET_FD_IVP(fd);
110 DPAA2_SET_FLE_IVP(op_fle);
111 DPAA2_SET_FLE_IVP(ip_fle);
114 /* Configure FD as a FRAME LIST */
115 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116 DPAA2_SET_FD_COMPOUND_FMT(fd);
117 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
119 /* Configure Output FLE with Scatter/Gather Entry */
120 DPAA2_SET_FLE_SG_EXT(op_fle);
121 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
123 /* Configure Output SGE for Encap/Decap */
124 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
128 sge->length = mbuf->data_len;
129 out_len += sge->length;
132 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
135 /* using buf_len for last buf - so that extra data can be added */
136 sge->length = mbuf->buf_len - mbuf->data_off;
137 out_len += sge->length;
139 DPAA2_SET_FLE_FIN(sge);
140 op_fle->length = out_len;
143 mbuf = sym_op->m_src;
145 /* Configure Input FLE with Scatter/Gather Entry */
146 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147 DPAA2_SET_FLE_SG_EXT(ip_fle);
148 DPAA2_SET_FLE_FIN(ip_fle);
150 /* Configure input SGE for Encap/Decap */
151 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 sge->length = mbuf->data_len;
154 in_len += sge->length;
160 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162 sge->length = mbuf->data_len;
163 in_len += sge->length;
166 ip_fle->length = in_len;
167 DPAA2_SET_FLE_FIN(sge);
169 /* In case of PDCP, per packet HFN is stored in
170 * mbuf priv after sym_op.
172 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
174 sess->pdcp.hfn_ovd_offset);
175 /*enable HFN override override */
176 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
177 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
178 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
180 DPAA2_SET_FD_LEN(fd, ip_fle->length);
186 build_proto_compound_fd(dpaa2_sec_session *sess,
187 struct rte_crypto_op *op,
188 struct qbman_fd *fd, uint16_t bpid)
190 struct rte_crypto_sym_op *sym_op = op->sym;
191 struct ctxt_priv *priv = sess->ctxt;
192 struct qbman_fle *fle, *ip_fle, *op_fle;
193 struct sec_flow_context *flc;
194 struct rte_mbuf *src_mbuf = sym_op->m_src;
195 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
201 /* Save the shared descriptor */
202 flc = &priv->flc_desc[0].flc;
204 /* we are using the first FLE entry to store Mbuf */
205 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
207 DPAA2_SEC_DP_ERR("Memory alloc failed");
210 memset(fle, 0, FLE_POOL_BUF_SIZE);
211 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
212 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
217 if (likely(bpid < MAX_BPID)) {
218 DPAA2_SET_FD_BPID(fd, bpid);
219 DPAA2_SET_FLE_BPID(op_fle, bpid);
220 DPAA2_SET_FLE_BPID(ip_fle, bpid);
222 DPAA2_SET_FD_IVP(fd);
223 DPAA2_SET_FLE_IVP(op_fle);
224 DPAA2_SET_FLE_IVP(ip_fle);
227 /* Configure FD as a FRAME LIST */
228 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
229 DPAA2_SET_FD_COMPOUND_FMT(fd);
230 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
232 /* Configure Output FLE with dst mbuf data */
233 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
234 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
235 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
237 /* Configure Input FLE with src mbuf data */
238 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
239 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
240 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
242 DPAA2_SET_FD_LEN(fd, ip_fle->length);
243 DPAA2_SET_FLE_FIN(ip_fle);
245 /* In case of PDCP, per packet HFN is stored in
246 * mbuf priv after sym_op.
248 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
249 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
250 sess->pdcp.hfn_ovd_offset);
251 /*enable HFN override override */
252 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
253 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
254 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
262 build_proto_fd(dpaa2_sec_session *sess,
263 struct rte_crypto_op *op,
264 struct qbman_fd *fd, uint16_t bpid)
266 struct rte_crypto_sym_op *sym_op = op->sym;
268 return build_proto_compound_fd(sess, op, fd, bpid);
270 struct ctxt_priv *priv = sess->ctxt;
271 struct sec_flow_context *flc;
272 struct rte_mbuf *mbuf = sym_op->m_src;
274 if (likely(bpid < MAX_BPID))
275 DPAA2_SET_FD_BPID(fd, bpid);
277 DPAA2_SET_FD_IVP(fd);
279 /* Save the shared descriptor */
280 flc = &priv->flc_desc[0].flc;
282 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
283 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
284 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
285 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
287 /* save physical address of mbuf */
288 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
289 mbuf->buf_iova = (size_t)op;
296 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
297 struct rte_crypto_op *op,
298 struct qbman_fd *fd, __rte_unused uint16_t bpid)
300 struct rte_crypto_sym_op *sym_op = op->sym;
301 struct ctxt_priv *priv = sess->ctxt;
302 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
303 struct sec_flow_context *flc;
304 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
305 int icv_len = sess->digest_length;
307 struct rte_mbuf *mbuf;
308 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
312 mbuf = sym_op->m_dst;
314 mbuf = sym_op->m_src;
316 /* first FLE entry used to store mbuf and session ctxt */
317 fle = (struct qbman_fle *)rte_malloc(NULL,
318 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
319 RTE_CACHE_LINE_SIZE);
320 if (unlikely(!fle)) {
321 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
324 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
325 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
326 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
332 /* Save the shared descriptor */
333 flc = &priv->flc_desc[0].flc;
335 /* Configure FD as a FRAME LIST */
336 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
337 DPAA2_SET_FD_COMPOUND_FMT(fd);
338 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
340 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
341 "iv-len=%d data_off: 0x%x\n",
342 sym_op->aead.data.offset,
343 sym_op->aead.data.length,
346 sym_op->m_src->data_off);
348 /* Configure Output FLE with Scatter/Gather Entry */
349 DPAA2_SET_FLE_SG_EXT(op_fle);
350 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
353 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
355 op_fle->length = (sess->dir == DIR_ENC) ?
356 (sym_op->aead.data.length + icv_len) :
357 sym_op->aead.data.length;
359 /* Configure Output SGE for Encap/Decap */
360 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
361 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
362 sge->length = mbuf->data_len - sym_op->aead.data.offset;
368 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
369 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
370 sge->length = mbuf->data_len;
373 sge->length -= icv_len;
375 if (sess->dir == DIR_ENC) {
377 DPAA2_SET_FLE_ADDR(sge,
378 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
379 sge->length = icv_len;
381 DPAA2_SET_FLE_FIN(sge);
384 mbuf = sym_op->m_src;
386 /* Configure Input FLE with Scatter/Gather Entry */
387 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
388 DPAA2_SET_FLE_SG_EXT(ip_fle);
389 DPAA2_SET_FLE_FIN(ip_fle);
390 ip_fle->length = (sess->dir == DIR_ENC) ?
391 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
392 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
395 /* Configure Input SGE for Encap/Decap */
396 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
397 sge->length = sess->iv.length;
401 DPAA2_SET_FLE_ADDR(sge,
402 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
403 sge->length = auth_only_len;
407 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
408 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
410 sge->length = mbuf->data_len - sym_op->aead.data.offset;
416 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
417 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
418 sge->length = mbuf->data_len;
422 if (sess->dir == DIR_DEC) {
424 old_icv = (uint8_t *)(sge + 1);
425 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
426 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
427 sge->length = icv_len;
430 DPAA2_SET_FLE_FIN(sge);
432 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
433 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
435 DPAA2_SET_FD_LEN(fd, ip_fle->length);
441 build_authenc_gcm_fd(dpaa2_sec_session *sess,
442 struct rte_crypto_op *op,
443 struct qbman_fd *fd, uint16_t bpid)
445 struct rte_crypto_sym_op *sym_op = op->sym;
446 struct ctxt_priv *priv = sess->ctxt;
447 struct qbman_fle *fle, *sge;
448 struct sec_flow_context *flc;
449 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
450 int icv_len = sess->digest_length, retval;
452 struct rte_mbuf *dst;
453 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
461 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
462 * Currently we donot know which FLE has the mbuf stored.
463 * So while retreiving we can go back 1 FLE from the FD -ADDR
464 * to get the MBUF Addr from the previous FLE.
465 * We can have a better approach to use the inline Mbuf
467 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
469 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
472 memset(fle, 0, FLE_POOL_BUF_SIZE);
473 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
474 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
477 if (likely(bpid < MAX_BPID)) {
478 DPAA2_SET_FD_BPID(fd, bpid);
479 DPAA2_SET_FLE_BPID(fle, bpid);
480 DPAA2_SET_FLE_BPID(fle + 1, bpid);
481 DPAA2_SET_FLE_BPID(sge, bpid);
482 DPAA2_SET_FLE_BPID(sge + 1, bpid);
483 DPAA2_SET_FLE_BPID(sge + 2, bpid);
484 DPAA2_SET_FLE_BPID(sge + 3, bpid);
486 DPAA2_SET_FD_IVP(fd);
487 DPAA2_SET_FLE_IVP(fle);
488 DPAA2_SET_FLE_IVP((fle + 1));
489 DPAA2_SET_FLE_IVP(sge);
490 DPAA2_SET_FLE_IVP((sge + 1));
491 DPAA2_SET_FLE_IVP((sge + 2));
492 DPAA2_SET_FLE_IVP((sge + 3));
495 /* Save the shared descriptor */
496 flc = &priv->flc_desc[0].flc;
497 /* Configure FD as a FRAME LIST */
498 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
499 DPAA2_SET_FD_COMPOUND_FMT(fd);
500 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
502 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
503 "iv-len=%d data_off: 0x%x\n",
504 sym_op->aead.data.offset,
505 sym_op->aead.data.length,
508 sym_op->m_src->data_off);
510 /* Configure Output FLE with Scatter/Gather Entry */
511 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
513 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
514 fle->length = (sess->dir == DIR_ENC) ?
515 (sym_op->aead.data.length + icv_len) :
516 sym_op->aead.data.length;
518 DPAA2_SET_FLE_SG_EXT(fle);
520 /* Configure Output SGE for Encap/Decap */
521 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
522 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
523 sge->length = sym_op->aead.data.length;
525 if (sess->dir == DIR_ENC) {
527 DPAA2_SET_FLE_ADDR(sge,
528 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
529 sge->length = sess->digest_length;
531 DPAA2_SET_FLE_FIN(sge);
536 /* Configure Input FLE with Scatter/Gather Entry */
537 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
538 DPAA2_SET_FLE_SG_EXT(fle);
539 DPAA2_SET_FLE_FIN(fle);
540 fle->length = (sess->dir == DIR_ENC) ?
541 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
542 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
543 sess->digest_length);
545 /* Configure Input SGE for Encap/Decap */
546 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
547 sge->length = sess->iv.length;
550 DPAA2_SET_FLE_ADDR(sge,
551 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
552 sge->length = auth_only_len;
553 DPAA2_SET_FLE_BPID(sge, bpid);
557 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
558 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
559 sym_op->m_src->data_off);
560 sge->length = sym_op->aead.data.length;
561 if (sess->dir == DIR_DEC) {
563 old_icv = (uint8_t *)(sge + 1);
564 memcpy(old_icv, sym_op->aead.digest.data,
565 sess->digest_length);
566 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
567 sge->length = sess->digest_length;
569 DPAA2_SET_FLE_FIN(sge);
572 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
573 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
576 DPAA2_SET_FD_LEN(fd, fle->length);
581 build_authenc_sg_fd(dpaa2_sec_session *sess,
582 struct rte_crypto_op *op,
583 struct qbman_fd *fd, __rte_unused uint16_t bpid)
585 struct rte_crypto_sym_op *sym_op = op->sym;
586 struct ctxt_priv *priv = sess->ctxt;
587 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
588 struct sec_flow_context *flc;
589 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
590 sym_op->auth.data.offset;
591 uint16_t auth_tail_len = sym_op->auth.data.length -
592 sym_op->cipher.data.length - auth_hdr_len;
593 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
594 int icv_len = sess->digest_length;
596 struct rte_mbuf *mbuf;
597 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
601 mbuf = sym_op->m_dst;
603 mbuf = sym_op->m_src;
605 /* first FLE entry used to store mbuf and session ctxt */
606 fle = (struct qbman_fle *)rte_malloc(NULL,
607 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
608 RTE_CACHE_LINE_SIZE);
609 if (unlikely(!fle)) {
610 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
613 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
614 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
615 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
621 /* Save the shared descriptor */
622 flc = &priv->flc_desc[0].flc;
624 /* Configure FD as a FRAME LIST */
625 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
626 DPAA2_SET_FD_COMPOUND_FMT(fd);
627 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
630 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
631 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
632 sym_op->auth.data.offset,
633 sym_op->auth.data.length,
635 sym_op->cipher.data.offset,
636 sym_op->cipher.data.length,
638 sym_op->m_src->data_off);
640 /* Configure Output FLE with Scatter/Gather Entry */
641 DPAA2_SET_FLE_SG_EXT(op_fle);
642 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
645 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
647 op_fle->length = (sess->dir == DIR_ENC) ?
648 (sym_op->cipher.data.length + icv_len) :
649 sym_op->cipher.data.length;
651 /* Configure Output SGE for Encap/Decap */
652 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
653 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
654 sge->length = mbuf->data_len - sym_op->auth.data.offset;
660 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
661 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
662 sge->length = mbuf->data_len;
665 sge->length -= icv_len;
667 if (sess->dir == DIR_ENC) {
669 DPAA2_SET_FLE_ADDR(sge,
670 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
671 sge->length = icv_len;
673 DPAA2_SET_FLE_FIN(sge);
676 mbuf = sym_op->m_src;
678 /* Configure Input FLE with Scatter/Gather Entry */
679 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
680 DPAA2_SET_FLE_SG_EXT(ip_fle);
681 DPAA2_SET_FLE_FIN(ip_fle);
682 ip_fle->length = (sess->dir == DIR_ENC) ?
683 (sym_op->auth.data.length + sess->iv.length) :
684 (sym_op->auth.data.length + sess->iv.length +
687 /* Configure Input SGE for Encap/Decap */
688 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
689 sge->length = sess->iv.length;
692 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
693 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
695 sge->length = mbuf->data_len - sym_op->auth.data.offset;
701 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
702 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
703 sge->length = mbuf->data_len;
706 sge->length -= icv_len;
708 if (sess->dir == DIR_DEC) {
710 old_icv = (uint8_t *)(sge + 1);
711 memcpy(old_icv, sym_op->auth.digest.data,
713 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
714 sge->length = icv_len;
717 DPAA2_SET_FLE_FIN(sge);
719 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
720 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
722 DPAA2_SET_FD_LEN(fd, ip_fle->length);
728 build_authenc_fd(dpaa2_sec_session *sess,
729 struct rte_crypto_op *op,
730 struct qbman_fd *fd, uint16_t bpid)
732 struct rte_crypto_sym_op *sym_op = op->sym;
733 struct ctxt_priv *priv = sess->ctxt;
734 struct qbman_fle *fle, *sge;
735 struct sec_flow_context *flc;
736 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
737 sym_op->auth.data.offset;
738 uint16_t auth_tail_len = sym_op->auth.data.length -
739 sym_op->cipher.data.length - auth_hdr_len;
740 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
742 int icv_len = sess->digest_length, retval;
744 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
746 struct rte_mbuf *dst;
753 /* we are using the first FLE entry to store Mbuf.
754 * Currently we donot know which FLE has the mbuf stored.
755 * So while retreiving we can go back 1 FLE from the FD -ADDR
756 * to get the MBUF Addr from the previous FLE.
757 * We can have a better approach to use the inline Mbuf
759 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
761 DPAA2_SEC_ERR("Memory alloc failed for SGE");
764 memset(fle, 0, FLE_POOL_BUF_SIZE);
765 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
766 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
769 if (likely(bpid < MAX_BPID)) {
770 DPAA2_SET_FD_BPID(fd, bpid);
771 DPAA2_SET_FLE_BPID(fle, bpid);
772 DPAA2_SET_FLE_BPID(fle + 1, bpid);
773 DPAA2_SET_FLE_BPID(sge, bpid);
774 DPAA2_SET_FLE_BPID(sge + 1, bpid);
775 DPAA2_SET_FLE_BPID(sge + 2, bpid);
776 DPAA2_SET_FLE_BPID(sge + 3, bpid);
778 DPAA2_SET_FD_IVP(fd);
779 DPAA2_SET_FLE_IVP(fle);
780 DPAA2_SET_FLE_IVP((fle + 1));
781 DPAA2_SET_FLE_IVP(sge);
782 DPAA2_SET_FLE_IVP((sge + 1));
783 DPAA2_SET_FLE_IVP((sge + 2));
784 DPAA2_SET_FLE_IVP((sge + 3));
787 /* Save the shared descriptor */
788 flc = &priv->flc_desc[0].flc;
789 /* Configure FD as a FRAME LIST */
790 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
791 DPAA2_SET_FD_COMPOUND_FMT(fd);
792 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
795 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
796 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
797 sym_op->auth.data.offset,
798 sym_op->auth.data.length,
800 sym_op->cipher.data.offset,
801 sym_op->cipher.data.length,
803 sym_op->m_src->data_off);
805 /* Configure Output FLE with Scatter/Gather Entry */
806 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
808 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
809 fle->length = (sess->dir == DIR_ENC) ?
810 (sym_op->cipher.data.length + icv_len) :
811 sym_op->cipher.data.length;
813 DPAA2_SET_FLE_SG_EXT(fle);
815 /* Configure Output SGE for Encap/Decap */
816 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
817 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
819 sge->length = sym_op->cipher.data.length;
821 if (sess->dir == DIR_ENC) {
823 DPAA2_SET_FLE_ADDR(sge,
824 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
825 sge->length = sess->digest_length;
826 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
829 DPAA2_SET_FLE_FIN(sge);
834 /* Configure Input FLE with Scatter/Gather Entry */
835 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
836 DPAA2_SET_FLE_SG_EXT(fle);
837 DPAA2_SET_FLE_FIN(fle);
838 fle->length = (sess->dir == DIR_ENC) ?
839 (sym_op->auth.data.length + sess->iv.length) :
840 (sym_op->auth.data.length + sess->iv.length +
841 sess->digest_length);
843 /* Configure Input SGE for Encap/Decap */
844 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
845 sge->length = sess->iv.length;
848 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
849 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
850 sym_op->m_src->data_off);
851 sge->length = sym_op->auth.data.length;
852 if (sess->dir == DIR_DEC) {
854 old_icv = (uint8_t *)(sge + 1);
855 memcpy(old_icv, sym_op->auth.digest.data,
856 sess->digest_length);
857 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
858 sge->length = sess->digest_length;
859 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
860 sess->digest_length +
863 DPAA2_SET_FLE_FIN(sge);
865 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
866 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
871 static inline int build_auth_sg_fd(
872 dpaa2_sec_session *sess,
873 struct rte_crypto_op *op,
875 __rte_unused uint16_t bpid)
877 struct rte_crypto_sym_op *sym_op = op->sym;
878 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
879 struct sec_flow_context *flc;
880 struct ctxt_priv *priv = sess->ctxt;
881 int data_len, data_offset;
883 struct rte_mbuf *mbuf;
885 data_len = sym_op->auth.data.length;
886 data_offset = sym_op->auth.data.offset;
888 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
889 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
890 if ((data_len & 7) || (data_offset & 7)) {
891 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
895 data_len = data_len >> 3;
896 data_offset = data_offset >> 3;
899 mbuf = sym_op->m_src;
900 fle = (struct qbman_fle *)rte_malloc(NULL,
901 FLE_SG_MEM_SIZE(mbuf->nb_segs),
902 RTE_CACHE_LINE_SIZE);
903 if (unlikely(!fle)) {
904 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
907 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
908 /* first FLE entry used to store mbuf and session ctxt */
909 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
910 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
915 flc = &priv->flc_desc[DESC_INITFINAL].flc;
917 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
918 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
919 DPAA2_SET_FD_COMPOUND_FMT(fd);
922 DPAA2_SET_FLE_ADDR(op_fle,
923 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
924 op_fle->length = sess->digest_length;
927 DPAA2_SET_FLE_SG_EXT(ip_fle);
928 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
929 ip_fle->length = data_len;
931 if (sess->iv.length) {
934 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
937 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
938 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
940 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
941 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
944 sge->length = sess->iv.length;
946 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
947 ip_fle->length += sge->length;
951 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
952 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
954 if (data_len <= (mbuf->data_len - data_offset)) {
955 sge->length = data_len;
958 sge->length = mbuf->data_len - data_offset;
960 /* remaining i/p segs */
961 while ((data_len = data_len - sge->length) &&
962 (mbuf = mbuf->next)) {
964 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
965 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
966 if (data_len > mbuf->data_len)
967 sge->length = mbuf->data_len;
969 sge->length = data_len;
973 if (sess->dir == DIR_DEC) {
974 /* Digest verification case */
976 old_digest = (uint8_t *)(sge + 1);
977 rte_memcpy(old_digest, sym_op->auth.digest.data,
978 sess->digest_length);
979 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
980 sge->length = sess->digest_length;
981 ip_fle->length += sess->digest_length;
983 DPAA2_SET_FLE_FIN(sge);
984 DPAA2_SET_FLE_FIN(ip_fle);
985 DPAA2_SET_FD_LEN(fd, ip_fle->length);
991 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
992 struct qbman_fd *fd, uint16_t bpid)
994 struct rte_crypto_sym_op *sym_op = op->sym;
995 struct qbman_fle *fle, *sge;
996 struct sec_flow_context *flc;
997 struct ctxt_priv *priv = sess->ctxt;
998 int data_len, data_offset;
1002 data_len = sym_op->auth.data.length;
1003 data_offset = sym_op->auth.data.offset;
1005 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1006 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1007 if ((data_len & 7) || (data_offset & 7)) {
1008 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1012 data_len = data_len >> 3;
1013 data_offset = data_offset >> 3;
1016 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1018 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1021 memset(fle, 0, FLE_POOL_BUF_SIZE);
1022 /* TODO we are using the first FLE entry to store Mbuf.
1023 * Currently we donot know which FLE has the mbuf stored.
1024 * So while retreiving we can go back 1 FLE from the FD -ADDR
1025 * to get the MBUF Addr from the previous FLE.
1026 * We can have a better approach to use the inline Mbuf
1028 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1029 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1033 if (likely(bpid < MAX_BPID)) {
1034 DPAA2_SET_FD_BPID(fd, bpid);
1035 DPAA2_SET_FLE_BPID(fle, bpid);
1036 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1037 DPAA2_SET_FLE_BPID(sge, bpid);
1038 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1040 DPAA2_SET_FD_IVP(fd);
1041 DPAA2_SET_FLE_IVP(fle);
1042 DPAA2_SET_FLE_IVP((fle + 1));
1043 DPAA2_SET_FLE_IVP(sge);
1044 DPAA2_SET_FLE_IVP((sge + 1));
1047 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1048 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1049 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1050 DPAA2_SET_FD_COMPOUND_FMT(fd);
1052 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1053 fle->length = sess->digest_length;
1056 /* Setting input FLE */
1057 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1058 DPAA2_SET_FLE_SG_EXT(fle);
1059 fle->length = data_len;
1061 if (sess->iv.length) {
1064 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1067 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1068 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1070 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1071 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1074 sge->length = sess->iv.length;
1077 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1078 fle->length = fle->length + sge->length;
1082 /* Setting data to authenticate */
1083 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1084 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1085 sge->length = data_len;
1087 if (sess->dir == DIR_DEC) {
1089 old_digest = (uint8_t *)(sge + 1);
1090 rte_memcpy(old_digest, sym_op->auth.digest.data,
1091 sess->digest_length);
1092 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1093 sge->length = sess->digest_length;
1094 fle->length = fle->length + sess->digest_length;
1097 DPAA2_SET_FLE_FIN(sge);
1098 DPAA2_SET_FLE_FIN(fle);
1099 DPAA2_SET_FD_LEN(fd, fle->length);
1105 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1106 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1108 struct rte_crypto_sym_op *sym_op = op->sym;
1109 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1110 int data_len, data_offset;
1111 struct sec_flow_context *flc;
1112 struct ctxt_priv *priv = sess->ctxt;
1113 struct rte_mbuf *mbuf;
1114 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1117 data_len = sym_op->cipher.data.length;
1118 data_offset = sym_op->cipher.data.offset;
1120 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1121 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1122 if ((data_len & 7) || (data_offset & 7)) {
1123 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1127 data_len = data_len >> 3;
1128 data_offset = data_offset >> 3;
1132 mbuf = sym_op->m_dst;
1134 mbuf = sym_op->m_src;
1136 /* first FLE entry used to store mbuf and session ctxt */
1137 fle = (struct qbman_fle *)rte_malloc(NULL,
1138 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1139 RTE_CACHE_LINE_SIZE);
1141 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1144 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1145 /* first FLE entry used to store mbuf and session ctxt */
1146 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1147 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1153 flc = &priv->flc_desc[0].flc;
1156 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1157 " data_off: 0x%x\n",
1161 sym_op->m_src->data_off);
1164 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1165 op_fle->length = data_len;
1166 DPAA2_SET_FLE_SG_EXT(op_fle);
1169 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1170 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1171 sge->length = mbuf->data_len - data_offset;
1177 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1178 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1179 sge->length = mbuf->data_len;
1182 DPAA2_SET_FLE_FIN(sge);
1185 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1186 flc, fle, fle->addr_hi, fle->addr_lo,
1190 mbuf = sym_op->m_src;
1192 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1193 ip_fle->length = sess->iv.length + data_len;
1194 DPAA2_SET_FLE_SG_EXT(ip_fle);
1197 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1198 DPAA2_SET_FLE_OFFSET(sge, 0);
1199 sge->length = sess->iv.length;
1204 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1205 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1206 sge->length = mbuf->data_len - data_offset;
1212 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1213 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1214 sge->length = mbuf->data_len;
1217 DPAA2_SET_FLE_FIN(sge);
1218 DPAA2_SET_FLE_FIN(ip_fle);
1221 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1222 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1223 DPAA2_SET_FD_COMPOUND_FMT(fd);
1224 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1227 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1228 " off =%d, len =%d\n",
1229 DPAA2_GET_FD_ADDR(fd),
1230 DPAA2_GET_FD_BPID(fd),
1231 rte_dpaa2_bpid_info[bpid].meta_data_size,
1232 DPAA2_GET_FD_OFFSET(fd),
1233 DPAA2_GET_FD_LEN(fd));
1238 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1239 struct qbman_fd *fd, uint16_t bpid)
1241 struct rte_crypto_sym_op *sym_op = op->sym;
1242 struct qbman_fle *fle, *sge;
1243 int retval, data_len, data_offset;
1244 struct sec_flow_context *flc;
1245 struct ctxt_priv *priv = sess->ctxt;
1246 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1248 struct rte_mbuf *dst;
1250 data_len = sym_op->cipher.data.length;
1251 data_offset = sym_op->cipher.data.offset;
1253 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1254 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1255 if ((data_len & 7) || (data_offset & 7)) {
1256 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1260 data_len = data_len >> 3;
1261 data_offset = data_offset >> 3;
1265 dst = sym_op->m_dst;
1267 dst = sym_op->m_src;
1269 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1271 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1274 memset(fle, 0, FLE_POOL_BUF_SIZE);
1275 /* TODO we are using the first FLE entry to store Mbuf.
1276 * Currently we donot know which FLE has the mbuf stored.
1277 * So while retreiving we can go back 1 FLE from the FD -ADDR
1278 * to get the MBUF Addr from the previous FLE.
1279 * We can have a better approach to use the inline Mbuf
1281 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1282 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1286 if (likely(bpid < MAX_BPID)) {
1287 DPAA2_SET_FD_BPID(fd, bpid);
1288 DPAA2_SET_FLE_BPID(fle, bpid);
1289 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1290 DPAA2_SET_FLE_BPID(sge, bpid);
1291 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1293 DPAA2_SET_FD_IVP(fd);
1294 DPAA2_SET_FLE_IVP(fle);
1295 DPAA2_SET_FLE_IVP((fle + 1));
1296 DPAA2_SET_FLE_IVP(sge);
1297 DPAA2_SET_FLE_IVP((sge + 1));
1300 flc = &priv->flc_desc[0].flc;
1301 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1302 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1303 DPAA2_SET_FD_COMPOUND_FMT(fd);
1304 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1307 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1308 " data_off: 0x%x\n",
1312 sym_op->m_src->data_off);
1314 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1315 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1317 fle->length = data_len + sess->iv.length;
1320 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1321 flc, fle, fle->addr_hi, fle->addr_lo,
1326 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1327 fle->length = data_len + sess->iv.length;
1329 DPAA2_SET_FLE_SG_EXT(fle);
1331 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1332 sge->length = sess->iv.length;
1335 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1336 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1338 sge->length = data_len;
1339 DPAA2_SET_FLE_FIN(sge);
1340 DPAA2_SET_FLE_FIN(fle);
1343 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1344 " off =%d, len =%d\n",
1345 DPAA2_GET_FD_ADDR(fd),
1346 DPAA2_GET_FD_BPID(fd),
1347 rte_dpaa2_bpid_info[bpid].meta_data_size,
1348 DPAA2_GET_FD_OFFSET(fd),
1349 DPAA2_GET_FD_LEN(fd));
1355 build_sec_fd(struct rte_crypto_op *op,
1356 struct qbman_fd *fd, uint16_t bpid)
1359 dpaa2_sec_session *sess;
1361 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1362 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1363 op->sym->session, cryptodev_driver_id);
1364 #ifdef RTE_LIB_SECURITY
1365 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1366 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1367 op->sym->sec_session);
1375 /* Any of the buffer is segmented*/
1376 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1377 ((op->sym->m_dst != NULL) &&
1378 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1379 switch (sess->ctxt_type) {
1380 case DPAA2_SEC_CIPHER:
1381 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1383 case DPAA2_SEC_AUTH:
1384 ret = build_auth_sg_fd(sess, op, fd, bpid);
1386 case DPAA2_SEC_AEAD:
1387 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1389 case DPAA2_SEC_CIPHER_HASH:
1390 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1392 #ifdef RTE_LIB_SECURITY
1393 case DPAA2_SEC_IPSEC:
1394 case DPAA2_SEC_PDCP:
1395 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1398 case DPAA2_SEC_HASH_CIPHER:
1400 DPAA2_SEC_ERR("error: Unsupported session");
1403 switch (sess->ctxt_type) {
1404 case DPAA2_SEC_CIPHER:
1405 ret = build_cipher_fd(sess, op, fd, bpid);
1407 case DPAA2_SEC_AUTH:
1408 ret = build_auth_fd(sess, op, fd, bpid);
1410 case DPAA2_SEC_AEAD:
1411 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1413 case DPAA2_SEC_CIPHER_HASH:
1414 ret = build_authenc_fd(sess, op, fd, bpid);
1416 #ifdef RTE_LIB_SECURITY
1417 case DPAA2_SEC_IPSEC:
1418 ret = build_proto_fd(sess, op, fd, bpid);
1420 case DPAA2_SEC_PDCP:
1421 ret = build_proto_compound_fd(sess, op, fd, bpid);
1424 case DPAA2_SEC_HASH_CIPHER:
1426 DPAA2_SEC_ERR("error: Unsupported session");
1434 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1437 /* Function to transmit the frames to given device and VQ*/
1440 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1441 uint32_t frames_to_send, retry_count;
1442 struct qbman_eq_desc eqdesc;
1443 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1444 struct qbman_swp *swp;
1445 uint16_t num_tx = 0;
1446 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1447 /*todo - need to support multiple buffer pools */
1449 struct rte_mempool *mb_pool;
1451 if (unlikely(nb_ops == 0))
1454 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1455 DPAA2_SEC_ERR("sessionless crypto op not supported");
1458 /*Prepare enqueue descriptor*/
1459 qbman_eq_desc_clear(&eqdesc);
1460 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1461 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1462 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1464 if (!DPAA2_PER_LCORE_DPIO) {
1465 ret = dpaa2_affine_qbman_swp();
1468 "Failed to allocate IO portal, tid: %d\n",
1473 swp = DPAA2_PER_LCORE_PORTAL;
1476 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1477 dpaa2_eqcr_size : nb_ops;
1479 for (loop = 0; loop < frames_to_send; loop++) {
1480 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1481 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1482 DPAA2_PER_LCORE_DQRR_SIZE--;
1483 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1484 *dpaa2_seqn((*ops)->sym->m_src) &
1485 QBMAN_EQCR_DCA_IDXMASK);
1487 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1488 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1491 /*Clear the unused FD fields before sending*/
1492 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1493 mb_pool = (*ops)->sym->m_src->pool;
1494 bpid = mempool_to_bpid(mb_pool);
1495 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1497 DPAA2_SEC_ERR("error: Improper packet contents"
1498 " for crypto operation");
1506 while (loop < frames_to_send) {
1507 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1510 frames_to_send - loop);
1511 if (unlikely(ret < 0)) {
1513 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1528 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1529 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1533 #ifdef RTE_LIB_SECURITY
1534 static inline struct rte_crypto_op *
1535 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1537 struct rte_crypto_op *op;
1538 uint16_t len = DPAA2_GET_FD_LEN(fd);
1540 dpaa2_sec_session *sess_priv __rte_unused;
1542 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1543 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1544 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1546 diff = len - mbuf->pkt_len;
1547 mbuf->pkt_len += diff;
1548 mbuf->data_len += diff;
1549 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1550 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1551 op->sym->aead.digest.phys_addr = 0L;
1553 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1554 op->sym->sec_session);
1555 if (sess_priv->dir == DIR_ENC)
1556 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1558 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1564 static inline struct rte_crypto_op *
1565 sec_fd_to_mbuf(const struct qbman_fd *fd)
1567 struct qbman_fle *fle;
1568 struct rte_crypto_op *op;
1569 struct ctxt_priv *priv;
1570 struct rte_mbuf *dst, *src;
1572 #ifdef RTE_LIB_SECURITY
1573 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1574 return sec_simple_fd_to_mbuf(fd);
1576 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1578 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1579 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1581 /* we are using the first FLE entry to store Mbuf.
1582 * Currently we donot know which FLE has the mbuf stored.
1583 * So while retreiving we can go back 1 FLE from the FD -ADDR
1584 * to get the MBUF Addr from the previous FLE.
1585 * We can have a better approach to use the inline Mbuf
1588 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1589 /* TODO complete it. */
1590 DPAA2_SEC_ERR("error: non inline buffer");
1593 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1596 src = op->sym->m_src;
1599 if (op->sym->m_dst) {
1600 dst = op->sym->m_dst;
1605 #ifdef RTE_LIB_SECURITY
1606 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1607 uint16_t len = DPAA2_GET_FD_LEN(fd);
1609 while (dst->next != NULL) {
1610 len -= dst->data_len;
1613 dst->data_len = len;
1616 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1617 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1620 DPAA2_GET_FD_ADDR(fd),
1621 DPAA2_GET_FD_BPID(fd),
1622 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1623 DPAA2_GET_FD_OFFSET(fd),
1624 DPAA2_GET_FD_LEN(fd));
1626 /* free the fle memory */
1627 if (likely(rte_pktmbuf_is_contiguous(src))) {
1628 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1629 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1631 rte_free((void *)(fle-1));
1637 dpaa2_sec_dump(struct rte_crypto_op *op)
1640 dpaa2_sec_session *sess = NULL;
1641 struct ctxt_priv *priv;
1643 struct rte_crypto_sym_op *sym_op;
1645 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1646 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1647 op->sym->session, cryptodev_driver_id);
1648 #ifdef RTE_LIBRTE_SECURITY
1649 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1650 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1651 op->sym->sec_session);
1657 priv = (struct ctxt_priv *)sess->ctxt;
1658 printf("\n****************************************\n"
1659 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1660 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1661 "\tCipher key len:\t%zd\n", sess->ctxt_type,
1662 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1663 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1664 sess->cipher_key.length);
1665 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
1666 sess->cipher_key.length);
1667 rte_hexdump(stdout, "auth key", sess->auth_key.data,
1668 sess->auth_key.length);
1669 printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1670 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1671 " len:\t%d\n\taead cipher text:\t%d\n",
1672 sess->auth_key.length, sess->iv.length, sess->iv.offset,
1673 sess->digest_length, sess->status,
1674 sess->ext_params.aead_ctxt.auth_only_len,
1675 sess->ext_params.aead_ctxt.auth_cipher_text);
1676 #ifdef RTE_LIBRTE_SECURITY
1677 printf("PDCP session params:\n"
1678 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1679 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1680 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1681 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1682 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1683 sess->pdcp.hfn_threshold);
1686 bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1687 printf("Descriptor Dump:\n");
1688 for (i = 0; i < bufsize; i++)
1689 printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1694 if (sym_op->m_src) {
1695 printf("Source mbuf:\n");
1696 rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
1698 if (sym_op->m_dst) {
1699 printf("Destination mbuf:\n");
1700 rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
1703 printf("Session address = %p\ncipher offset: %d, length: %d\n"
1704 "auth offset: %d, length: %d\n aead offset: %d, length: %d\n"
1706 sym_op->cipher.data.offset, sym_op->cipher.data.length,
1707 sym_op->auth.data.offset, sym_op->auth.data.length,
1708 sym_op->aead.data.offset, sym_op->aead.data.length);
1714 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
1716 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1717 struct rte_crypto_op *op;
1718 struct qbman_fd *fd;
1720 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1721 op = sec_fd_to_mbuf(fd);
1722 /* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1723 * after setting an error in FD. But this will have performance impact.
1725 rte_pktmbuf_free(op->sym->m_src);
1729 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1731 struct qbman_eq_desc *eqdesc)
1733 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1734 struct eqresp_metadata *eqresp_meta;
1735 struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1736 uint16_t orpid, seqnum;
1739 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1740 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1741 DPAA2_EQCR_OPRID_SHIFT;
1742 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1743 DPAA2_EQCR_SEQNUM_SHIFT;
1746 if (!priv->en_loose_ordered) {
1747 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1748 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1749 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1750 dpio_dev->eqresp_pi]), 1);
1751 qbman_eq_desc_set_token(eqdesc, 1);
1753 eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1754 eqresp_meta->dpaa2_q = dpaa2_q;
1755 eqresp_meta->mp = m->pool;
1757 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1758 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1760 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1763 dq_idx = *dpaa2_seqn(m) - 1;
1764 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1765 DPAA2_PER_LCORE_DQRR_SIZE--;
1766 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1768 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1773 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1776 /* Function to transmit the frames to given device and VQ*/
1779 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1780 uint32_t frames_to_send, num_free_eq_desc, retry_count;
1781 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1782 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1783 struct qbman_swp *swp;
1784 uint16_t num_tx = 0;
1786 struct rte_mempool *mb_pool;
1787 struct dpaa2_sec_dev_private *priv =
1788 dpaa2_qp->tx_vq.crypto_data->dev_private;
1790 if (unlikely(nb_ops == 0))
1793 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1794 DPAA2_SEC_ERR("sessionless crypto op not supported");
1798 if (!DPAA2_PER_LCORE_DPIO) {
1799 ret = dpaa2_affine_qbman_swp();
1801 DPAA2_SEC_ERR("Failure in affining portal");
1805 swp = DPAA2_PER_LCORE_PORTAL;
1808 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1809 dpaa2_eqcr_size : nb_ops;
1811 if (!priv->en_loose_ordered) {
1812 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1813 num_free_eq_desc = dpaa2_free_eq_descriptors();
1814 if (num_free_eq_desc < frames_to_send)
1815 frames_to_send = num_free_eq_desc;
1819 for (loop = 0; loop < frames_to_send; loop++) {
1820 /*Prepare enqueue descriptor*/
1821 qbman_eq_desc_clear(&eqdesc[loop]);
1822 qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1824 if (*dpaa2_seqn((*ops)->sym->m_src))
1825 dpaa2_sec_set_enqueue_descriptor(
1830 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1831 DPAA2_EQ_RESP_ERR_FQ);
1833 /*Clear the unused FD fields before sending*/
1834 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1835 mb_pool = (*ops)->sym->m_src->pool;
1836 bpid = mempool_to_bpid(mb_pool);
1837 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1839 DPAA2_SEC_ERR("error: Improper packet contents"
1840 " for crypto operation");
1848 while (loop < frames_to_send) {
1849 ret = qbman_swp_enqueue_multiple_desc(swp,
1850 &eqdesc[loop], &fd_arr[loop],
1851 frames_to_send - loop);
1852 if (unlikely(ret < 0)) {
1854 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1870 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1871 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1876 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1879 /* Function is responsible to receive frames for a given device and VQ*/
1880 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1881 struct qbman_result *dq_storage;
1882 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1883 int ret, num_rx = 0;
1884 uint8_t is_last = 0, status;
1885 struct qbman_swp *swp;
1886 const struct qbman_fd *fd;
1887 struct qbman_pull_desc pulldesc;
1889 if (!DPAA2_PER_LCORE_DPIO) {
1890 ret = dpaa2_affine_qbman_swp();
1893 "Failed to allocate IO portal, tid: %d\n",
1898 swp = DPAA2_PER_LCORE_PORTAL;
1899 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1901 qbman_pull_desc_clear(&pulldesc);
1902 qbman_pull_desc_set_numframes(&pulldesc,
1903 (nb_ops > dpaa2_dqrr_size) ?
1904 dpaa2_dqrr_size : nb_ops);
1905 qbman_pull_desc_set_fq(&pulldesc, fqid);
1906 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1907 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1910 /*Issue a volatile dequeue command. */
1912 if (qbman_swp_pull(swp, &pulldesc)) {
1914 "SEC VDQ command is not issued : QBMAN busy");
1915 /* Portal was busy, try again */
1921 /* Receive the packets till Last Dequeue entry is found with
1922 * respect to the above issues PULL command.
1925 /* Check if the previous issued command is completed.
1926 * Also seems like the SWP is shared between the Ethernet Driver
1927 * and the SEC driver.
1929 while (!qbman_check_command_complete(dq_storage))
1932 /* Loop until the dq_storage is updated with
1933 * new token by QBMAN
1935 while (!qbman_check_new_result(dq_storage))
1937 /* Check whether Last Pull command is Expired and
1938 * setting Condition for Loop termination
1940 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1942 /* Check for valid frame. */
1943 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1945 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1946 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1951 fd = qbman_result_DQ_fd(dq_storage);
1952 ops[num_rx] = sec_fd_to_mbuf(fd);
1954 if (unlikely(fd->simple.frc)) {
1955 /* TODO Parse SEC errors */
1956 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
1957 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1959 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
1960 dpaa2_sec_dump(ops[num_rx]);
1963 dpaa2_qp->rx_vq.err_pkts += 1;
1964 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1966 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1971 } /* End of Packet Rx loop */
1973 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1975 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1976 dpaa2_qp->rx_vq.err_pkts);
1977 /*Return the total number of packets received to DPAA2 app*/
1981 /** Release queue pair */
1983 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1985 struct dpaa2_sec_qp *qp =
1986 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1988 PMD_INIT_FUNC_TRACE();
1990 if (qp->rx_vq.q_storage) {
1991 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1992 rte_free(qp->rx_vq.q_storage);
1996 dev->data->queue_pairs[queue_pair_id] = NULL;
2001 /** Setup a queue pair */
2003 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2004 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2005 __rte_unused int socket_id)
2007 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2008 struct dpaa2_sec_qp *qp;
2009 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2010 struct dpseci_rx_queue_cfg cfg;
2013 PMD_INIT_FUNC_TRACE();
2015 /* If qp is already in use free ring memory and qp metadata. */
2016 if (dev->data->queue_pairs[qp_id] != NULL) {
2017 DPAA2_SEC_INFO("QP already setup");
2021 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2022 dev, qp_id, qp_conf);
2024 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2026 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2027 RTE_CACHE_LINE_SIZE);
2029 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2033 qp->rx_vq.crypto_data = dev->data;
2034 qp->tx_vq.crypto_data = dev->data;
2035 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
2036 sizeof(struct queue_storage_info_t),
2037 RTE_CACHE_LINE_SIZE);
2038 if (!qp->rx_vq.q_storage) {
2039 DPAA2_SEC_ERR("malloc failed for q_storage");
2042 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
2044 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
2045 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
2049 dev->data->queue_pairs[qp_id] = qp;
2051 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
2052 cfg.user_ctx = (size_t)(&qp->rx_vq);
2053 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2058 /** Returns the size of the aesni gcm session structure */
2060 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2062 PMD_INIT_FUNC_TRACE();
2064 return sizeof(dpaa2_sec_session);
2068 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
2069 struct rte_crypto_sym_xform *xform,
2070 dpaa2_sec_session *session)
2072 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2073 struct alginfo cipherdata;
2074 int bufsize, ret = 0;
2075 struct ctxt_priv *priv;
2076 struct sec_flow_context *flc;
2078 PMD_INIT_FUNC_TRACE();
2080 /* For SEC CIPHER only one descriptor is required. */
2081 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2082 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2083 RTE_CACHE_LINE_SIZE);
2085 DPAA2_SEC_ERR("No Memory for priv CTXT");
2089 priv->fle_pool = dev_priv->fle_pool;
2091 flc = &priv->flc_desc[0].flc;
2093 session->ctxt_type = DPAA2_SEC_CIPHER;
2094 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2095 RTE_CACHE_LINE_SIZE);
2096 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2097 DPAA2_SEC_ERR("No Memory for cipher key");
2101 session->cipher_key.length = xform->cipher.key.length;
2103 memcpy(session->cipher_key.data, xform->cipher.key.data,
2104 xform->cipher.key.length);
2105 cipherdata.key = (size_t)session->cipher_key.data;
2106 cipherdata.keylen = session->cipher_key.length;
2107 cipherdata.key_enc_flags = 0;
2108 cipherdata.key_type = RTA_DATA_IMM;
2110 /* Set IV parameters */
2111 session->iv.offset = xform->cipher.iv.offset;
2112 session->iv.length = xform->cipher.iv.length;
2113 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2116 switch (xform->cipher.algo) {
2117 case RTE_CRYPTO_CIPHER_AES_CBC:
2118 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2119 cipherdata.algmode = OP_ALG_AAI_CBC;
2120 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2121 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2122 SHR_NEVER, &cipherdata,
2126 case RTE_CRYPTO_CIPHER_3DES_CBC:
2127 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2128 cipherdata.algmode = OP_ALG_AAI_CBC;
2129 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2130 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2131 SHR_NEVER, &cipherdata,
2135 case RTE_CRYPTO_CIPHER_DES_CBC:
2136 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2137 cipherdata.algmode = OP_ALG_AAI_CBC;
2138 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2139 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2140 SHR_NEVER, &cipherdata,
2144 case RTE_CRYPTO_CIPHER_AES_CTR:
2145 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2146 cipherdata.algmode = OP_ALG_AAI_CTR;
2147 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2148 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2149 SHR_NEVER, &cipherdata,
2153 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2154 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2155 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2156 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2160 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2161 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2162 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2163 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2167 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2168 case RTE_CRYPTO_CIPHER_AES_F8:
2169 case RTE_CRYPTO_CIPHER_AES_ECB:
2170 case RTE_CRYPTO_CIPHER_3DES_ECB:
2171 case RTE_CRYPTO_CIPHER_3DES_CTR:
2172 case RTE_CRYPTO_CIPHER_AES_XTS:
2173 case RTE_CRYPTO_CIPHER_ARC4:
2174 case RTE_CRYPTO_CIPHER_NULL:
2175 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2176 xform->cipher.algo);
2180 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2181 xform->cipher.algo);
2187 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2192 flc->word1_sdl = (uint8_t)bufsize;
2193 session->ctxt = priv;
2195 #ifdef CAAM_DESC_DEBUG
2197 for (i = 0; i < bufsize; i++)
2198 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2203 rte_free(session->cipher_key.data);
2209 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
2210 struct rte_crypto_sym_xform *xform,
2211 dpaa2_sec_session *session)
2213 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2214 struct alginfo authdata;
2215 int bufsize, ret = 0;
2216 struct ctxt_priv *priv;
2217 struct sec_flow_context *flc;
2219 PMD_INIT_FUNC_TRACE();
2221 /* For SEC AUTH three descriptors are required for various stages */
2222 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2223 sizeof(struct ctxt_priv) + 3 *
2224 sizeof(struct sec_flc_desc),
2225 RTE_CACHE_LINE_SIZE);
2227 DPAA2_SEC_ERR("No Memory for priv CTXT");
2231 priv->fle_pool = dev_priv->fle_pool;
2232 flc = &priv->flc_desc[DESC_INITFINAL].flc;
2234 session->ctxt_type = DPAA2_SEC_AUTH;
2235 session->auth_key.length = xform->auth.key.length;
2236 if (xform->auth.key.length) {
2237 session->auth_key.data = rte_zmalloc(NULL,
2238 xform->auth.key.length,
2239 RTE_CACHE_LINE_SIZE);
2240 if (session->auth_key.data == NULL) {
2241 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2245 memcpy(session->auth_key.data, xform->auth.key.data,
2246 xform->auth.key.length);
2247 authdata.key = (size_t)session->auth_key.data;
2248 authdata.key_enc_flags = 0;
2249 authdata.key_type = RTA_DATA_IMM;
2251 authdata.keylen = session->auth_key.length;
2253 session->digest_length = xform->auth.digest_length;
2254 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2257 switch (xform->auth.algo) {
2258 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2259 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2260 authdata.algmode = OP_ALG_AAI_HMAC;
2261 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2262 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2263 1, 0, SHR_NEVER, &authdata,
2265 session->digest_length);
2267 case RTE_CRYPTO_AUTH_MD5_HMAC:
2268 authdata.algtype = OP_ALG_ALGSEL_MD5;
2269 authdata.algmode = OP_ALG_AAI_HMAC;
2270 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2271 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2272 1, 0, SHR_NEVER, &authdata,
2274 session->digest_length);
2276 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2277 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2278 authdata.algmode = OP_ALG_AAI_HMAC;
2279 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2280 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2281 1, 0, SHR_NEVER, &authdata,
2283 session->digest_length);
2285 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2286 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2287 authdata.algmode = OP_ALG_AAI_HMAC;
2288 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2289 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2290 1, 0, SHR_NEVER, &authdata,
2292 session->digest_length);
2294 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2295 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2296 authdata.algmode = OP_ALG_AAI_HMAC;
2297 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2298 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2299 1, 0, SHR_NEVER, &authdata,
2301 session->digest_length);
2303 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2304 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2305 authdata.algmode = OP_ALG_AAI_HMAC;
2306 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2307 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2308 1, 0, SHR_NEVER, &authdata,
2310 session->digest_length);
2312 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2313 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2314 authdata.algmode = OP_ALG_AAI_F9;
2315 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2316 session->iv.offset = xform->auth.iv.offset;
2317 session->iv.length = xform->auth.iv.length;
2318 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2321 session->digest_length);
2323 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2324 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2325 authdata.algmode = OP_ALG_AAI_F9;
2326 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2327 session->iv.offset = xform->auth.iv.offset;
2328 session->iv.length = xform->auth.iv.length;
2329 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2332 session->digest_length);
2334 case RTE_CRYPTO_AUTH_SHA1:
2335 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2336 authdata.algmode = OP_ALG_AAI_HASH;
2337 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2338 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2339 1, 0, SHR_NEVER, &authdata,
2341 session->digest_length);
2343 case RTE_CRYPTO_AUTH_MD5:
2344 authdata.algtype = OP_ALG_ALGSEL_MD5;
2345 authdata.algmode = OP_ALG_AAI_HASH;
2346 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2347 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2348 1, 0, SHR_NEVER, &authdata,
2350 session->digest_length);
2352 case RTE_CRYPTO_AUTH_SHA256:
2353 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2354 authdata.algmode = OP_ALG_AAI_HASH;
2355 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2356 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2357 1, 0, SHR_NEVER, &authdata,
2359 session->digest_length);
2361 case RTE_CRYPTO_AUTH_SHA384:
2362 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2363 authdata.algmode = OP_ALG_AAI_HASH;
2364 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2365 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2366 1, 0, SHR_NEVER, &authdata,
2368 session->digest_length);
2370 case RTE_CRYPTO_AUTH_SHA512:
2371 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2372 authdata.algmode = OP_ALG_AAI_HASH;
2373 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2374 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2375 1, 0, SHR_NEVER, &authdata,
2377 session->digest_length);
2379 case RTE_CRYPTO_AUTH_SHA224:
2380 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2381 authdata.algmode = OP_ALG_AAI_HASH;
2382 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2383 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2384 1, 0, SHR_NEVER, &authdata,
2386 session->digest_length);
2388 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2389 authdata.algtype = OP_ALG_ALGSEL_AES;
2390 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2391 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2392 bufsize = cnstr_shdsc_aes_mac(
2393 priv->flc_desc[DESC_INITFINAL].desc,
2394 1, 0, SHR_NEVER, &authdata,
2396 session->digest_length);
2398 case RTE_CRYPTO_AUTH_AES_CMAC:
2399 authdata.algtype = OP_ALG_ALGSEL_AES;
2400 authdata.algmode = OP_ALG_AAI_CMAC;
2401 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2402 bufsize = cnstr_shdsc_aes_mac(
2403 priv->flc_desc[DESC_INITFINAL].desc,
2404 1, 0, SHR_NEVER, &authdata,
2406 session->digest_length);
2408 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2409 case RTE_CRYPTO_AUTH_AES_GMAC:
2410 case RTE_CRYPTO_AUTH_KASUMI_F9:
2411 case RTE_CRYPTO_AUTH_NULL:
2412 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2417 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2424 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2429 flc->word1_sdl = (uint8_t)bufsize;
2430 session->ctxt = priv;
2431 #ifdef CAAM_DESC_DEBUG
2433 for (i = 0; i < bufsize; i++)
2434 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2435 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2441 rte_free(session->auth_key.data);
2447 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2448 struct rte_crypto_sym_xform *xform,
2449 dpaa2_sec_session *session)
2451 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2452 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2453 struct alginfo aeaddata;
2455 struct ctxt_priv *priv;
2456 struct sec_flow_context *flc;
2457 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2460 PMD_INIT_FUNC_TRACE();
2462 /* Set IV parameters */
2463 session->iv.offset = aead_xform->iv.offset;
2464 session->iv.length = aead_xform->iv.length;
2465 session->ctxt_type = DPAA2_SEC_AEAD;
2467 /* For SEC AEAD only one descriptor is required */
2468 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2469 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2470 RTE_CACHE_LINE_SIZE);
2472 DPAA2_SEC_ERR("No Memory for priv CTXT");
2476 priv->fle_pool = dev_priv->fle_pool;
2477 flc = &priv->flc_desc[0].flc;
2479 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2480 RTE_CACHE_LINE_SIZE);
2481 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2482 DPAA2_SEC_ERR("No Memory for aead key");
2486 memcpy(session->aead_key.data, aead_xform->key.data,
2487 aead_xform->key.length);
2489 session->digest_length = aead_xform->digest_length;
2490 session->aead_key.length = aead_xform->key.length;
2491 ctxt->auth_only_len = aead_xform->aad_length;
2493 aeaddata.key = (size_t)session->aead_key.data;
2494 aeaddata.keylen = session->aead_key.length;
2495 aeaddata.key_enc_flags = 0;
2496 aeaddata.key_type = RTA_DATA_IMM;
2498 switch (aead_xform->algo) {
2499 case RTE_CRYPTO_AEAD_AES_GCM:
2500 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2501 aeaddata.algmode = OP_ALG_AAI_GCM;
2502 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2504 case RTE_CRYPTO_AEAD_AES_CCM:
2505 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2510 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2515 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2518 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2519 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2521 (unsigned int *)priv->flc_desc[0].desc,
2522 &priv->flc_desc[0].desc[1], 1);
2525 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2529 if (priv->flc_desc[0].desc[1] & 1) {
2530 aeaddata.key_type = RTA_DATA_IMM;
2532 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2533 aeaddata.key_type = RTA_DATA_PTR;
2535 priv->flc_desc[0].desc[0] = 0;
2536 priv->flc_desc[0].desc[1] = 0;
2538 if (session->dir == DIR_ENC)
2539 bufsize = cnstr_shdsc_gcm_encap(
2540 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2541 &aeaddata, session->iv.length,
2542 session->digest_length);
2544 bufsize = cnstr_shdsc_gcm_decap(
2545 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2546 &aeaddata, session->iv.length,
2547 session->digest_length);
2549 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2554 flc->word1_sdl = (uint8_t)bufsize;
2555 session->ctxt = priv;
2556 #ifdef CAAM_DESC_DEBUG
2558 for (i = 0; i < bufsize; i++)
2559 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2560 i, priv->flc_desc[0].desc[i]);
2565 rte_free(session->aead_key.data);
2572 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2573 struct rte_crypto_sym_xform *xform,
2574 dpaa2_sec_session *session)
2576 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2577 struct alginfo authdata, cipherdata;
2579 struct ctxt_priv *priv;
2580 struct sec_flow_context *flc;
2581 struct rte_crypto_cipher_xform *cipher_xform;
2582 struct rte_crypto_auth_xform *auth_xform;
2585 PMD_INIT_FUNC_TRACE();
2587 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2588 cipher_xform = &xform->cipher;
2589 auth_xform = &xform->next->auth;
2590 session->ctxt_type =
2591 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2592 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2594 cipher_xform = &xform->next->cipher;
2595 auth_xform = &xform->auth;
2596 session->ctxt_type =
2597 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2598 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2601 /* Set IV parameters */
2602 session->iv.offset = cipher_xform->iv.offset;
2603 session->iv.length = cipher_xform->iv.length;
2605 /* For SEC AEAD only one descriptor is required */
2606 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2607 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2608 RTE_CACHE_LINE_SIZE);
2610 DPAA2_SEC_ERR("No Memory for priv CTXT");
2614 priv->fle_pool = dev_priv->fle_pool;
2615 flc = &priv->flc_desc[0].flc;
2617 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2618 RTE_CACHE_LINE_SIZE);
2619 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2620 DPAA2_SEC_ERR("No Memory for cipher key");
2624 session->cipher_key.length = cipher_xform->key.length;
2625 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2626 RTE_CACHE_LINE_SIZE);
2627 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2628 DPAA2_SEC_ERR("No Memory for auth key");
2629 rte_free(session->cipher_key.data);
2633 session->auth_key.length = auth_xform->key.length;
2634 memcpy(session->cipher_key.data, cipher_xform->key.data,
2635 cipher_xform->key.length);
2636 memcpy(session->auth_key.data, auth_xform->key.data,
2637 auth_xform->key.length);
2639 authdata.key = (size_t)session->auth_key.data;
2640 authdata.keylen = session->auth_key.length;
2641 authdata.key_enc_flags = 0;
2642 authdata.key_type = RTA_DATA_IMM;
2644 session->digest_length = auth_xform->digest_length;
2646 switch (auth_xform->algo) {
2647 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2648 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2649 authdata.algmode = OP_ALG_AAI_HMAC;
2650 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2652 case RTE_CRYPTO_AUTH_MD5_HMAC:
2653 authdata.algtype = OP_ALG_ALGSEL_MD5;
2654 authdata.algmode = OP_ALG_AAI_HMAC;
2655 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2657 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2658 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2659 authdata.algmode = OP_ALG_AAI_HMAC;
2660 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2662 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2663 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2664 authdata.algmode = OP_ALG_AAI_HMAC;
2665 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2667 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2668 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2669 authdata.algmode = OP_ALG_AAI_HMAC;
2670 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2672 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2673 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2674 authdata.algmode = OP_ALG_AAI_HMAC;
2675 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2677 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2678 authdata.algtype = OP_ALG_ALGSEL_AES;
2679 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2680 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2682 case RTE_CRYPTO_AUTH_AES_CMAC:
2683 authdata.algtype = OP_ALG_ALGSEL_AES;
2684 authdata.algmode = OP_ALG_AAI_CMAC;
2685 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2687 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2688 case RTE_CRYPTO_AUTH_AES_GMAC:
2689 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2690 case RTE_CRYPTO_AUTH_NULL:
2691 case RTE_CRYPTO_AUTH_SHA1:
2692 case RTE_CRYPTO_AUTH_SHA256:
2693 case RTE_CRYPTO_AUTH_SHA512:
2694 case RTE_CRYPTO_AUTH_SHA224:
2695 case RTE_CRYPTO_AUTH_SHA384:
2696 case RTE_CRYPTO_AUTH_MD5:
2697 case RTE_CRYPTO_AUTH_KASUMI_F9:
2698 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2699 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2704 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2709 cipherdata.key = (size_t)session->cipher_key.data;
2710 cipherdata.keylen = session->cipher_key.length;
2711 cipherdata.key_enc_flags = 0;
2712 cipherdata.key_type = RTA_DATA_IMM;
2714 switch (cipher_xform->algo) {
2715 case RTE_CRYPTO_CIPHER_AES_CBC:
2716 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2717 cipherdata.algmode = OP_ALG_AAI_CBC;
2718 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2720 case RTE_CRYPTO_CIPHER_3DES_CBC:
2721 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2722 cipherdata.algmode = OP_ALG_AAI_CBC;
2723 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2725 case RTE_CRYPTO_CIPHER_DES_CBC:
2726 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2727 cipherdata.algmode = OP_ALG_AAI_CBC;
2728 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2730 case RTE_CRYPTO_CIPHER_AES_CTR:
2731 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2732 cipherdata.algmode = OP_ALG_AAI_CTR;
2733 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2735 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2736 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2737 case RTE_CRYPTO_CIPHER_NULL:
2738 case RTE_CRYPTO_CIPHER_3DES_ECB:
2739 case RTE_CRYPTO_CIPHER_3DES_CTR:
2740 case RTE_CRYPTO_CIPHER_AES_ECB:
2741 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2742 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2743 cipher_xform->algo);
2747 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2748 cipher_xform->algo);
2752 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2755 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2756 priv->flc_desc[0].desc[1] = authdata.keylen;
2757 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2759 (unsigned int *)priv->flc_desc[0].desc,
2760 &priv->flc_desc[0].desc[2], 2);
2763 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2767 if (priv->flc_desc[0].desc[2] & 1) {
2768 cipherdata.key_type = RTA_DATA_IMM;
2770 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2771 cipherdata.key_type = RTA_DATA_PTR;
2773 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2774 authdata.key_type = RTA_DATA_IMM;
2776 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2777 authdata.key_type = RTA_DATA_PTR;
2779 priv->flc_desc[0].desc[0] = 0;
2780 priv->flc_desc[0].desc[1] = 0;
2781 priv->flc_desc[0].desc[2] = 0;
2783 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2784 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2786 &cipherdata, &authdata,
2788 session->digest_length,
2791 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2796 DPAA2_SEC_ERR("Hash before cipher not supported");
2801 flc->word1_sdl = (uint8_t)bufsize;
2802 session->ctxt = priv;
2803 #ifdef CAAM_DESC_DEBUG
2805 for (i = 0; i < bufsize; i++)
2806 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2807 i, priv->flc_desc[0].desc[i]);
2813 rte_free(session->cipher_key.data);
2814 rte_free(session->auth_key.data);
2820 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2821 struct rte_crypto_sym_xform *xform, void *sess)
2823 dpaa2_sec_session *session = sess;
2826 PMD_INIT_FUNC_TRACE();
2828 if (unlikely(sess == NULL)) {
2829 DPAA2_SEC_ERR("Invalid session struct");
2833 memset(session, 0, sizeof(dpaa2_sec_session));
2834 /* Default IV length = 0 */
2835 session->iv.length = 0;
2838 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2839 ret = dpaa2_sec_cipher_init(dev, xform, session);
2841 /* Authentication Only */
2842 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2843 xform->next == NULL) {
2844 ret = dpaa2_sec_auth_init(dev, xform, session);
2846 /* Cipher then Authenticate */
2847 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2848 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2849 session->ext_params.aead_ctxt.auth_cipher_text = true;
2850 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2851 ret = dpaa2_sec_auth_init(dev, xform, session);
2852 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2853 ret = dpaa2_sec_cipher_init(dev, xform, session);
2855 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2856 /* Authenticate then Cipher */
2857 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2858 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2859 session->ext_params.aead_ctxt.auth_cipher_text = false;
2860 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2861 ret = dpaa2_sec_cipher_init(dev, xform, session);
2862 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2863 ret = dpaa2_sec_auth_init(dev, xform, session);
2865 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2866 /* AEAD operation for AES-GCM kind of Algorithms */
2867 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2868 xform->next == NULL) {
2869 ret = dpaa2_sec_aead_init(dev, xform, session);
2872 DPAA2_SEC_ERR("Invalid crypto type");
2879 #ifdef RTE_LIB_SECURITY
2881 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2882 dpaa2_sec_session *session,
2883 struct alginfo *aeaddata)
2885 PMD_INIT_FUNC_TRACE();
2887 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2888 RTE_CACHE_LINE_SIZE);
2889 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2890 DPAA2_SEC_ERR("No Memory for aead key");
2893 memcpy(session->aead_key.data, aead_xform->key.data,
2894 aead_xform->key.length);
2896 session->digest_length = aead_xform->digest_length;
2897 session->aead_key.length = aead_xform->key.length;
2899 aeaddata->key = (size_t)session->aead_key.data;
2900 aeaddata->keylen = session->aead_key.length;
2901 aeaddata->key_enc_flags = 0;
2902 aeaddata->key_type = RTA_DATA_IMM;
2904 switch (aead_xform->algo) {
2905 case RTE_CRYPTO_AEAD_AES_GCM:
2906 switch (session->digest_length) {
2908 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2911 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2914 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2917 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2918 session->digest_length);
2921 aeaddata->algmode = OP_ALG_AAI_GCM;
2922 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2924 case RTE_CRYPTO_AEAD_AES_CCM:
2925 switch (session->digest_length) {
2927 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2930 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2933 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2936 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2937 session->digest_length);
2940 aeaddata->algmode = OP_ALG_AAI_CCM;
2941 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2944 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2948 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2955 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2956 struct rte_crypto_auth_xform *auth_xform,
2957 dpaa2_sec_session *session,
2958 struct alginfo *cipherdata,
2959 struct alginfo *authdata)
2962 session->cipher_key.data = rte_zmalloc(NULL,
2963 cipher_xform->key.length,
2964 RTE_CACHE_LINE_SIZE);
2965 if (session->cipher_key.data == NULL &&
2966 cipher_xform->key.length > 0) {
2967 DPAA2_SEC_ERR("No Memory for cipher key");
2971 session->cipher_key.length = cipher_xform->key.length;
2972 memcpy(session->cipher_key.data, cipher_xform->key.data,
2973 cipher_xform->key.length);
2974 session->cipher_alg = cipher_xform->algo;
2976 session->cipher_key.data = NULL;
2977 session->cipher_key.length = 0;
2978 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2982 session->auth_key.data = rte_zmalloc(NULL,
2983 auth_xform->key.length,
2984 RTE_CACHE_LINE_SIZE);
2985 if (session->auth_key.data == NULL &&
2986 auth_xform->key.length > 0) {
2987 DPAA2_SEC_ERR("No Memory for auth key");
2990 session->auth_key.length = auth_xform->key.length;
2991 memcpy(session->auth_key.data, auth_xform->key.data,
2992 auth_xform->key.length);
2993 session->auth_alg = auth_xform->algo;
2994 session->digest_length = auth_xform->digest_length;
2996 session->auth_key.data = NULL;
2997 session->auth_key.length = 0;
2998 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
3001 authdata->key = (size_t)session->auth_key.data;
3002 authdata->keylen = session->auth_key.length;
3003 authdata->key_enc_flags = 0;
3004 authdata->key_type = RTA_DATA_IMM;
3005 switch (session->auth_alg) {
3006 case RTE_CRYPTO_AUTH_SHA1_HMAC:
3007 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
3008 authdata->algmode = OP_ALG_AAI_HMAC;
3010 case RTE_CRYPTO_AUTH_MD5_HMAC:
3011 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
3012 authdata->algmode = OP_ALG_AAI_HMAC;
3014 case RTE_CRYPTO_AUTH_SHA256_HMAC:
3015 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
3016 authdata->algmode = OP_ALG_AAI_HMAC;
3017 if (session->digest_length != 16)
3019 "+++Using sha256-hmac truncated len is non-standard,"
3020 "it will not work with lookaside proto");
3022 case RTE_CRYPTO_AUTH_SHA384_HMAC:
3023 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
3024 authdata->algmode = OP_ALG_AAI_HMAC;
3026 case RTE_CRYPTO_AUTH_SHA512_HMAC:
3027 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3028 authdata->algmode = OP_ALG_AAI_HMAC;
3030 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3031 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3032 authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3034 case RTE_CRYPTO_AUTH_AES_CMAC:
3035 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3036 authdata->algmode = OP_ALG_AAI_CMAC;
3038 case RTE_CRYPTO_AUTH_NULL:
3039 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3041 case RTE_CRYPTO_AUTH_SHA224_HMAC:
3042 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3043 case RTE_CRYPTO_AUTH_SHA1:
3044 case RTE_CRYPTO_AUTH_SHA256:
3045 case RTE_CRYPTO_AUTH_SHA512:
3046 case RTE_CRYPTO_AUTH_SHA224:
3047 case RTE_CRYPTO_AUTH_SHA384:
3048 case RTE_CRYPTO_AUTH_MD5:
3049 case RTE_CRYPTO_AUTH_AES_GMAC:
3050 case RTE_CRYPTO_AUTH_KASUMI_F9:
3051 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
3052 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3053 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3057 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
3061 cipherdata->key = (size_t)session->cipher_key.data;
3062 cipherdata->keylen = session->cipher_key.length;
3063 cipherdata->key_enc_flags = 0;
3064 cipherdata->key_type = RTA_DATA_IMM;
3066 switch (session->cipher_alg) {
3067 case RTE_CRYPTO_CIPHER_AES_CBC:
3068 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3069 cipherdata->algmode = OP_ALG_AAI_CBC;
3071 case RTE_CRYPTO_CIPHER_3DES_CBC:
3072 cipherdata->algtype = OP_PCL_IPSEC_3DES;
3073 cipherdata->algmode = OP_ALG_AAI_CBC;
3075 case RTE_CRYPTO_CIPHER_DES_CBC:
3076 cipherdata->algtype = OP_PCL_IPSEC_DES;
3077 cipherdata->algmode = OP_ALG_AAI_CBC;
3079 case RTE_CRYPTO_CIPHER_AES_CTR:
3080 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3081 cipherdata->algmode = OP_ALG_AAI_CTR;
3083 case RTE_CRYPTO_CIPHER_NULL:
3084 cipherdata->algtype = OP_PCL_IPSEC_NULL;
3086 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3087 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3088 case RTE_CRYPTO_CIPHER_3DES_ECB:
3089 case RTE_CRYPTO_CIPHER_3DES_CTR:
3090 case RTE_CRYPTO_CIPHER_AES_ECB:
3091 case RTE_CRYPTO_CIPHER_KASUMI_F8:
3092 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
3093 session->cipher_alg);
3096 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3097 session->cipher_alg);
3105 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3106 struct rte_security_session_conf *conf,
3109 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3110 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3111 struct rte_crypto_auth_xform *auth_xform = NULL;
3112 struct rte_crypto_aead_xform *aead_xform = NULL;
3113 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3114 struct ctxt_priv *priv;
3115 struct alginfo authdata, cipherdata;
3117 struct sec_flow_context *flc;
3118 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3121 PMD_INIT_FUNC_TRACE();
3123 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3124 sizeof(struct ctxt_priv) +
3125 sizeof(struct sec_flc_desc),
3126 RTE_CACHE_LINE_SIZE);
3129 DPAA2_SEC_ERR("No memory for priv CTXT");
3133 priv->fle_pool = dev_priv->fle_pool;
3134 flc = &priv->flc_desc[0].flc;
3136 if (ipsec_xform->life.bytes_hard_limit != 0 ||
3137 ipsec_xform->life.bytes_soft_limit != 0 ||
3138 ipsec_xform->life.packets_hard_limit != 0 ||
3139 ipsec_xform->life.packets_soft_limit != 0)
3142 memset(session, 0, sizeof(dpaa2_sec_session));
3144 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3145 cipher_xform = &conf->crypto_xform->cipher;
3146 if (conf->crypto_xform->next)
3147 auth_xform = &conf->crypto_xform->next->auth;
3148 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3149 session, &cipherdata, &authdata);
3150 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3151 auth_xform = &conf->crypto_xform->auth;
3152 if (conf->crypto_xform->next)
3153 cipher_xform = &conf->crypto_xform->next->cipher;
3154 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3155 session, &cipherdata, &authdata);
3156 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3157 aead_xform = &conf->crypto_xform->aead;
3158 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3159 session, &cipherdata);
3160 authdata.keylen = 0;
3161 authdata.algtype = 0;
3163 DPAA2_SEC_ERR("XFORM not specified");
3168 DPAA2_SEC_ERR("Failed to process xform");
3172 session->ctxt_type = DPAA2_SEC_IPSEC;
3173 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3174 uint8_t *hdr = NULL;
3176 struct rte_ipv6_hdr ip6_hdr;
3177 struct ipsec_encap_pdb encap_pdb;
3179 flc->dhr = SEC_FLC_DHR_OUTBOUND;
3180 /* For Sec Proto only one descriptor is required. */
3181 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3183 /* copy algo specific data to PDB */
3184 switch (cipherdata.algtype) {
3185 case OP_PCL_IPSEC_AES_CTR:
3186 encap_pdb.ctr.ctr_initial = 0x00000001;
3187 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3189 case OP_PCL_IPSEC_AES_GCM8:
3190 case OP_PCL_IPSEC_AES_GCM12:
3191 case OP_PCL_IPSEC_AES_GCM16:
3192 memcpy(encap_pdb.gcm.salt,
3193 (uint8_t *)&(ipsec_xform->salt), 4);
3197 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3198 PDBOPTS_ESP_OIHI_PDB_INL |
3201 if (ipsec_xform->options.dec_ttl)
3202 encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3203 if (ipsec_xform->options.esn)
3204 encap_pdb.options |= PDBOPTS_ESP_ESN;
3205 encap_pdb.spi = ipsec_xform->spi;
3206 session->dir = DIR_ENC;
3207 if (ipsec_xform->tunnel.type ==
3208 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3209 encap_pdb.ip_hdr_len = sizeof(struct ip);
3210 ip4_hdr.ip_v = IPVERSION;
3212 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
3213 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3216 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3217 ip4_hdr.ip_p = IPPROTO_ESP;
3219 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
3220 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
3221 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
3222 &ip4_hdr, sizeof(struct ip));
3223 hdr = (uint8_t *)&ip4_hdr;
3224 } else if (ipsec_xform->tunnel.type ==
3225 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3226 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3227 DPAA2_IPv6_DEFAULT_VTC_FLOW |
3228 ((ipsec_xform->tunnel.ipv6.dscp <<
3229 RTE_IPV6_HDR_TC_SHIFT) &
3230 RTE_IPV6_HDR_TC_MASK) |
3231 ((ipsec_xform->tunnel.ipv6.flabel <<
3232 RTE_IPV6_HDR_FL_SHIFT) &
3233 RTE_IPV6_HDR_FL_MASK));
3234 /* Payload length will be updated by HW */
3235 ip6_hdr.payload_len = 0;
3236 ip6_hdr.hop_limits =
3237 ipsec_xform->tunnel.ipv6.hlimit;
3238 ip6_hdr.proto = (ipsec_xform->proto ==
3239 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3240 IPPROTO_ESP : IPPROTO_AH;
3241 memcpy(&ip6_hdr.src_addr,
3242 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3243 memcpy(&ip6_hdr.dst_addr,
3244 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3245 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3246 hdr = (uint8_t *)&ip6_hdr;
3249 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3250 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3251 SHR_WAIT : SHR_SERIAL, &encap_pdb,
3252 hdr, &cipherdata, &authdata);
3253 } else if (ipsec_xform->direction ==
3254 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3255 struct ipsec_decap_pdb decap_pdb;
3257 flc->dhr = SEC_FLC_DHR_INBOUND;
3258 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3259 /* copy algo specific data to PDB */
3260 switch (cipherdata.algtype) {
3261 case OP_PCL_IPSEC_AES_CTR:
3262 decap_pdb.ctr.ctr_initial = 0x00000001;
3263 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3265 case OP_PCL_IPSEC_AES_GCM8:
3266 case OP_PCL_IPSEC_AES_GCM12:
3267 case OP_PCL_IPSEC_AES_GCM16:
3268 memcpy(decap_pdb.gcm.salt,
3269 (uint8_t *)&(ipsec_xform->salt), 4);
3273 decap_pdb.options = (ipsec_xform->tunnel.type ==
3274 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3275 sizeof(struct ip) << 16 :
3276 sizeof(struct rte_ipv6_hdr) << 16;
3277 if (ipsec_xform->options.esn)
3278 decap_pdb.options |= PDBOPTS_ESP_ESN;
3280 if (ipsec_xform->replay_win_sz) {
3282 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3284 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3285 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3295 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3298 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3301 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3304 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3307 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3311 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3314 session->dir = DIR_DEC;
3315 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3316 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3317 SHR_WAIT : SHR_SERIAL,
3318 &decap_pdb, &cipherdata, &authdata);
3323 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3327 flc->word1_sdl = (uint8_t)bufsize;
3329 /* Enable the stashing control bit */
3330 DPAA2_SET_FLC_RSC(flc);
3331 flc->word2_rflc_31_0 = lower_32_bits(
3332 (size_t)&(((struct dpaa2_sec_qp *)
3333 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3334 flc->word3_rflc_63_32 = upper_32_bits(
3335 (size_t)&(((struct dpaa2_sec_qp *)
3336 dev->data->queue_pairs[0])->rx_vq));
3338 /* Set EWS bit i.e. enable write-safe */
3339 DPAA2_SET_FLC_EWS(flc);
3340 /* Set BS = 1 i.e reuse input buffers as output buffers */
3341 DPAA2_SET_FLC_REUSE_BS(flc);
3342 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3343 DPAA2_SET_FLC_REUSE_FF(flc);
3345 session->ctxt = priv;
3349 rte_free(session->auth_key.data);
3350 rte_free(session->cipher_key.data);
3356 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3357 struct rte_security_session_conf *conf,
3360 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3361 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3362 struct rte_crypto_auth_xform *auth_xform = NULL;
3363 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3364 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3365 struct ctxt_priv *priv;
3366 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3367 struct alginfo authdata, cipherdata;
3368 struct alginfo *p_authdata = NULL;
3370 struct sec_flow_context *flc;
3371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3377 PMD_INIT_FUNC_TRACE();
3379 memset(session, 0, sizeof(dpaa2_sec_session));
3381 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3382 sizeof(struct ctxt_priv) +
3383 sizeof(struct sec_flc_desc),
3384 RTE_CACHE_LINE_SIZE);
3387 DPAA2_SEC_ERR("No memory for priv CTXT");
3391 priv->fle_pool = dev_priv->fle_pool;
3392 flc = &priv->flc_desc[0].flc;
3394 /* find xfrm types */
3395 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3396 cipher_xform = &xform->cipher;
3397 if (xform->next != NULL &&
3398 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3399 session->ext_params.aead_ctxt.auth_cipher_text = true;
3400 auth_xform = &xform->next->auth;
3402 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3403 auth_xform = &xform->auth;
3404 if (xform->next != NULL &&
3405 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3406 session->ext_params.aead_ctxt.auth_cipher_text = false;
3407 cipher_xform = &xform->next->cipher;
3410 DPAA2_SEC_ERR("Invalid crypto type");
3414 session->ctxt_type = DPAA2_SEC_PDCP;
3416 session->cipher_key.data = rte_zmalloc(NULL,
3417 cipher_xform->key.length,
3418 RTE_CACHE_LINE_SIZE);
3419 if (session->cipher_key.data == NULL &&
3420 cipher_xform->key.length > 0) {
3421 DPAA2_SEC_ERR("No Memory for cipher key");
3425 session->cipher_key.length = cipher_xform->key.length;
3426 memcpy(session->cipher_key.data, cipher_xform->key.data,
3427 cipher_xform->key.length);
3429 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3431 session->cipher_alg = cipher_xform->algo;
3433 session->cipher_key.data = NULL;
3434 session->cipher_key.length = 0;
3435 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3436 session->dir = DIR_ENC;
3439 session->pdcp.domain = pdcp_xform->domain;
3440 session->pdcp.bearer = pdcp_xform->bearer;
3441 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3442 session->pdcp.sn_size = pdcp_xform->sn_size;
3443 session->pdcp.hfn = pdcp_xform->hfn;
3444 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3445 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3446 /* hfv ovd offset location is stored in iv.offset value*/
3448 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3450 cipherdata.key = (size_t)session->cipher_key.data;
3451 cipherdata.keylen = session->cipher_key.length;
3452 cipherdata.key_enc_flags = 0;
3453 cipherdata.key_type = RTA_DATA_IMM;
3455 switch (session->cipher_alg) {
3456 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3457 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3459 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3460 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3462 case RTE_CRYPTO_CIPHER_AES_CTR:
3463 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3465 case RTE_CRYPTO_CIPHER_NULL:
3466 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3469 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3470 session->cipher_alg);
3475 session->auth_key.data = rte_zmalloc(NULL,
3476 auth_xform->key.length,
3477 RTE_CACHE_LINE_SIZE);
3478 if (!session->auth_key.data &&
3479 auth_xform->key.length > 0) {
3480 DPAA2_SEC_ERR("No Memory for auth key");
3481 rte_free(session->cipher_key.data);
3485 session->auth_key.length = auth_xform->key.length;
3486 memcpy(session->auth_key.data, auth_xform->key.data,
3487 auth_xform->key.length);
3488 session->auth_alg = auth_xform->algo;
3490 session->auth_key.data = NULL;
3491 session->auth_key.length = 0;
3492 session->auth_alg = 0;
3494 authdata.key = (size_t)session->auth_key.data;
3495 authdata.keylen = session->auth_key.length;
3496 authdata.key_enc_flags = 0;
3497 authdata.key_type = RTA_DATA_IMM;
3499 if (session->auth_alg) {
3500 switch (session->auth_alg) {
3501 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3502 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3504 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3505 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3507 case RTE_CRYPTO_AUTH_AES_CMAC:
3508 authdata.algtype = PDCP_AUTH_TYPE_AES;
3510 case RTE_CRYPTO_AUTH_NULL:
3511 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3514 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3519 p_authdata = &authdata;
3520 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3521 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3525 if (pdcp_xform->sdap_enabled) {
3526 int nb_keys_to_inline =
3527 rta_inline_pdcp_sdap_query(authdata.algtype,
3529 session->pdcp.sn_size,
3530 session->pdcp.hfn_ovd);
3531 if (nb_keys_to_inline >= 1) {
3532 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3533 cipherdata.key_type = RTA_DATA_PTR;
3535 if (nb_keys_to_inline >= 2) {
3536 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3537 authdata.key_type = RTA_DATA_PTR;
3540 if (rta_inline_pdcp_query(authdata.algtype,
3542 session->pdcp.sn_size,
3543 session->pdcp.hfn_ovd)) {
3544 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3545 cipherdata.key_type = RTA_DATA_PTR;
3549 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3550 if (session->dir == DIR_ENC)
3551 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3552 priv->flc_desc[0].desc, 1, swap,
3554 session->pdcp.sn_size,
3556 pdcp_xform->pkt_dir,
3557 pdcp_xform->hfn_threshold,
3558 &cipherdata, &authdata);
3559 else if (session->dir == DIR_DEC)
3560 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3561 priv->flc_desc[0].desc, 1, swap,
3563 session->pdcp.sn_size,
3565 pdcp_xform->pkt_dir,
3566 pdcp_xform->hfn_threshold,
3567 &cipherdata, &authdata);
3569 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3570 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3571 1, swap, &authdata);
3573 if (session->dir == DIR_ENC) {
3574 if (pdcp_xform->sdap_enabled)
3575 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3576 priv->flc_desc[0].desc, 1, swap,
3577 session->pdcp.sn_size,
3580 pdcp_xform->pkt_dir,
3581 pdcp_xform->hfn_threshold,
3582 &cipherdata, p_authdata);
3584 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3585 priv->flc_desc[0].desc, 1, swap,
3586 session->pdcp.sn_size,
3589 pdcp_xform->pkt_dir,
3590 pdcp_xform->hfn_threshold,
3591 &cipherdata, p_authdata);
3592 } else if (session->dir == DIR_DEC) {
3593 if (pdcp_xform->sdap_enabled)
3594 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3595 priv->flc_desc[0].desc, 1, swap,
3596 session->pdcp.sn_size,
3599 pdcp_xform->pkt_dir,
3600 pdcp_xform->hfn_threshold,
3601 &cipherdata, p_authdata);
3603 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3604 priv->flc_desc[0].desc, 1, swap,
3605 session->pdcp.sn_size,
3608 pdcp_xform->pkt_dir,
3609 pdcp_xform->hfn_threshold,
3610 &cipherdata, p_authdata);
3615 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3619 /* Enable the stashing control bit */
3620 DPAA2_SET_FLC_RSC(flc);
3621 flc->word2_rflc_31_0 = lower_32_bits(
3622 (size_t)&(((struct dpaa2_sec_qp *)
3623 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3624 flc->word3_rflc_63_32 = upper_32_bits(
3625 (size_t)&(((struct dpaa2_sec_qp *)
3626 dev->data->queue_pairs[0])->rx_vq));
3628 flc->word1_sdl = (uint8_t)bufsize;
3630 /* TODO - check the perf impact or
3631 * align as per descriptor type
3632 * Set EWS bit i.e. enable write-safe
3633 * DPAA2_SET_FLC_EWS(flc);
3636 /* Set BS = 1 i.e reuse input buffers as output buffers */
3637 DPAA2_SET_FLC_REUSE_BS(flc);
3638 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3639 DPAA2_SET_FLC_REUSE_FF(flc);
3641 session->ctxt = priv;
3645 rte_free(session->auth_key.data);
3646 rte_free(session->cipher_key.data);
3652 dpaa2_sec_security_session_create(void *dev,
3653 struct rte_security_session_conf *conf,
3654 struct rte_security_session *sess,
3655 struct rte_mempool *mempool)
3657 void *sess_private_data;
3658 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3661 if (rte_mempool_get(mempool, &sess_private_data)) {
3662 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3666 switch (conf->protocol) {
3667 case RTE_SECURITY_PROTOCOL_IPSEC:
3668 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3671 case RTE_SECURITY_PROTOCOL_MACSEC:
3673 case RTE_SECURITY_PROTOCOL_PDCP:
3674 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3681 DPAA2_SEC_ERR("Failed to configure session parameters");
3682 /* Return session to mempool */
3683 rte_mempool_put(mempool, sess_private_data);
3687 set_sec_session_private_data(sess, sess_private_data);
3692 /** Clear the memory of session so it doesn't leave key material behind */
3694 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3695 struct rte_security_session *sess)
3697 PMD_INIT_FUNC_TRACE();
3698 void *sess_priv = get_sec_session_private_data(sess);
3700 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3703 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3706 rte_free(s->cipher_key.data);
3707 rte_free(s->auth_key.data);
3708 memset(s, 0, sizeof(dpaa2_sec_session));
3709 set_sec_session_private_data(sess, NULL);
3710 rte_mempool_put(sess_mp, sess_priv);
3716 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3717 struct rte_crypto_sym_xform *xform,
3718 struct rte_cryptodev_sym_session *sess,
3719 struct rte_mempool *mempool)
3721 void *sess_private_data;
3724 if (rte_mempool_get(mempool, &sess_private_data)) {
3725 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3729 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3731 DPAA2_SEC_ERR("Failed to configure session parameters");
3732 /* Return session to mempool */
3733 rte_mempool_put(mempool, sess_private_data);
3737 set_sym_session_private_data(sess, dev->driver_id,
3743 /** Clear the memory of session so it doesn't leave key material behind */
3745 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3746 struct rte_cryptodev_sym_session *sess)
3748 PMD_INIT_FUNC_TRACE();
3749 uint8_t index = dev->driver_id;
3750 void *sess_priv = get_sym_session_private_data(sess, index);
3751 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3755 rte_free(s->cipher_key.data);
3756 rte_free(s->auth_key.data);
3757 memset(s, 0, sizeof(dpaa2_sec_session));
3758 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3759 set_sym_session_private_data(sess, index, NULL);
3760 rte_mempool_put(sess_mp, sess_priv);
3765 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3766 struct rte_cryptodev_config *config __rte_unused)
3768 PMD_INIT_FUNC_TRACE();
3774 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3776 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3777 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3778 struct dpseci_attr attr;
3779 struct dpaa2_queue *dpaa2_q;
3780 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3781 dev->data->queue_pairs;
3782 struct dpseci_rx_queue_attr rx_attr;
3783 struct dpseci_tx_queue_attr tx_attr;
3786 PMD_INIT_FUNC_TRACE();
3788 /* Change the tx burst function if ordered queues are used */
3789 if (priv->en_ordered)
3790 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3792 memset(&attr, 0, sizeof(struct dpseci_attr));
3794 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3796 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3798 goto get_attr_failure;
3800 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3802 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3803 goto get_attr_failure;
3805 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3806 dpaa2_q = &qp[i]->rx_vq;
3807 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3809 dpaa2_q->fqid = rx_attr.fqid;
3810 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3812 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3813 dpaa2_q = &qp[i]->tx_vq;
3814 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3816 dpaa2_q->fqid = tx_attr.fqid;
3817 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3822 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3827 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3829 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3830 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3833 PMD_INIT_FUNC_TRACE();
3835 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3837 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3842 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3844 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3850 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3852 PMD_INIT_FUNC_TRACE();
3858 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3859 struct rte_cryptodev_info *info)
3861 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3863 PMD_INIT_FUNC_TRACE();
3865 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3866 info->feature_flags = dev->feature_flags;
3867 info->capabilities = dpaa2_sec_capabilities;
3868 /* No limit of number of sessions */
3869 info->sym.max_nb_sessions = 0;
3870 info->driver_id = cryptodev_driver_id;
3875 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3876 struct rte_cryptodev_stats *stats)
3878 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3879 struct fsl_mc_io dpseci;
3880 struct dpseci_sec_counters counters = {0};
3881 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3882 dev->data->queue_pairs;
3885 PMD_INIT_FUNC_TRACE();
3886 if (stats == NULL) {
3887 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3890 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3891 if (qp == NULL || qp[i] == NULL) {
3892 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3896 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3897 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3898 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3899 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3902 /* In case as secondary process access stats, MCP portal in priv-hw
3903 * may have primary process address. Need the secondary process
3904 * based MCP portal address for this object.
3906 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3907 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3910 DPAA2_SEC_ERR("SEC counters failed");
3912 DPAA2_SEC_INFO("dpseci hardware stats:"
3913 "\n\tNum of Requests Dequeued = %" PRIu64
3914 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3915 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3916 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3917 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3918 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3919 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3920 counters.dequeued_requests,
3921 counters.ob_enc_requests,
3922 counters.ib_dec_requests,
3923 counters.ob_enc_bytes,
3924 counters.ob_prot_bytes,
3925 counters.ib_dec_bytes,
3926 counters.ib_valid_bytes);
3931 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3934 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3935 (dev->data->queue_pairs);
3937 PMD_INIT_FUNC_TRACE();
3939 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3940 if (qp[i] == NULL) {
3941 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3944 qp[i]->tx_vq.rx_pkts = 0;
3945 qp[i]->tx_vq.tx_pkts = 0;
3946 qp[i]->tx_vq.err_pkts = 0;
3947 qp[i]->rx_vq.rx_pkts = 0;
3948 qp[i]->rx_vq.tx_pkts = 0;
3949 qp[i]->rx_vq.err_pkts = 0;
3953 static void __rte_hot
3954 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3955 const struct qbman_fd *fd,
3956 const struct qbman_result *dq,
3957 struct dpaa2_queue *rxq,
3958 struct rte_event *ev)
3960 /* Prefetching mbuf */
3961 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3962 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3964 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3965 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3967 ev->flow_id = rxq->ev.flow_id;
3968 ev->sub_event_type = rxq->ev.sub_event_type;
3969 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3970 ev->op = RTE_EVENT_OP_NEW;
3971 ev->sched_type = rxq->ev.sched_type;
3972 ev->queue_id = rxq->ev.queue_id;
3973 ev->priority = rxq->ev.priority;
3974 ev->event_ptr = sec_fd_to_mbuf(fd);
3976 qbman_swp_dqrr_consume(swp, dq);
3979 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3980 const struct qbman_fd *fd,
3981 const struct qbman_result *dq,
3982 struct dpaa2_queue *rxq,
3983 struct rte_event *ev)
3986 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3987 /* Prefetching mbuf */
3988 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3989 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3991 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3992 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3994 ev->flow_id = rxq->ev.flow_id;
3995 ev->sub_event_type = rxq->ev.sub_event_type;
3996 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3997 ev->op = RTE_EVENT_OP_NEW;
3998 ev->sched_type = rxq->ev.sched_type;
3999 ev->queue_id = rxq->ev.queue_id;
4000 ev->priority = rxq->ev.priority;
4002 ev->event_ptr = sec_fd_to_mbuf(fd);
4003 dqrr_index = qbman_get_dqrr_idx(dq);
4004 *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4005 DPAA2_PER_LCORE_DQRR_SIZE++;
4006 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4007 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4010 static void __rte_hot
4011 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4012 const struct qbman_fd *fd,
4013 const struct qbman_result *dq,
4014 struct dpaa2_queue *rxq,
4015 struct rte_event *ev)
4017 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
4019 /* Prefetching mbuf */
4020 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4021 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4023 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4024 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4026 ev->flow_id = rxq->ev.flow_id;
4027 ev->sub_event_type = rxq->ev.sub_event_type;
4028 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4029 ev->op = RTE_EVENT_OP_NEW;
4030 ev->sched_type = rxq->ev.sched_type;
4031 ev->queue_id = rxq->ev.queue_id;
4032 ev->priority = rxq->ev.priority;
4033 ev->event_ptr = sec_fd_to_mbuf(fd);
4035 *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4036 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4037 DPAA2_EQCR_OPRID_SHIFT;
4038 *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4039 DPAA2_EQCR_SEQNUM_SHIFT;
4041 qbman_swp_dqrr_consume(swp, dq);
4045 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4047 struct dpaa2_dpcon_dev *dpcon,
4048 const struct rte_event *event)
4050 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4051 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4052 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4053 struct dpseci_rx_queue_cfg cfg;
4057 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4058 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4059 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4060 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4061 else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4062 qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4066 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4067 (dpcon->num_priorities - 1);
4069 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4070 cfg.options = DPSECI_QUEUE_OPT_DEST;
4071 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4072 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4073 cfg.dest_cfg.priority = priority;
4075 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4076 cfg.user_ctx = (size_t)(qp);
4077 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4078 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4079 cfg.order_preservation_en = 1;
4082 if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4083 struct opr_cfg ocfg;
4085 /* Restoration window size = 256 frames */
4087 /* Restoration window size = 512 frames for LX2 */
4088 if (dpaa2_svr_family == SVR_LX2160A)
4090 /* Auto advance NESN window enabled */
4092 /* Late arrival window size disabled */
4094 /* ORL resource exhaustaion advance NESN disabled */
4097 if (priv->en_loose_ordered)
4102 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4103 qp_id, OPR_OPT_CREATE, &ocfg);
4105 RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
4108 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4109 priv->en_ordered = 1;
4112 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4115 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4119 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4125 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4128 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4129 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4130 struct dpseci_rx_queue_cfg cfg;
4133 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4134 cfg.options = DPSECI_QUEUE_OPT_DEST;
4135 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4137 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4140 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4145 static struct rte_cryptodev_ops crypto_ops = {
4146 .dev_configure = dpaa2_sec_dev_configure,
4147 .dev_start = dpaa2_sec_dev_start,
4148 .dev_stop = dpaa2_sec_dev_stop,
4149 .dev_close = dpaa2_sec_dev_close,
4150 .dev_infos_get = dpaa2_sec_dev_infos_get,
4151 .stats_get = dpaa2_sec_stats_get,
4152 .stats_reset = dpaa2_sec_stats_reset,
4153 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
4154 .queue_pair_release = dpaa2_sec_queue_pair_release,
4155 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
4156 .sym_session_configure = dpaa2_sec_sym_session_configure,
4157 .sym_session_clear = dpaa2_sec_sym_session_clear,
4158 /* Raw data-path API related operations */
4159 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4160 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4163 #ifdef RTE_LIB_SECURITY
4164 static const struct rte_security_capability *
4165 dpaa2_sec_capabilities_get(void *device __rte_unused)
4167 return dpaa2_sec_security_cap;
4170 static const struct rte_security_ops dpaa2_sec_security_ops = {
4171 .session_create = dpaa2_sec_security_session_create,
4172 .session_update = NULL,
4173 .session_stats_get = NULL,
4174 .session_destroy = dpaa2_sec_security_session_destroy,
4175 .set_pkt_metadata = NULL,
4176 .capabilities_get = dpaa2_sec_capabilities_get
4181 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4183 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4184 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4187 PMD_INIT_FUNC_TRACE();
4189 /* Function is reverse of dpaa2_sec_dev_init.
4190 * It does the following:
4191 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4192 * 2. Close the DPSECI device
4193 * 3. Free the allocated resources.
4196 /*Close the device at underlying layer*/
4197 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4199 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4203 /*Free the allocated memory for ethernet private data and dpseci*/
4206 rte_free(dev->security_ctx);
4207 rte_mempool_free(priv->fle_pool);
4209 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4210 dev->data->name, rte_socket_id());
4216 check_devargs_handler(const char *key, const char *value,
4219 struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4220 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4222 if (!strcmp(key, "drv_strict_order")) {
4223 priv->en_loose_ordered = false;
4224 } else if (!strcmp(key, "drv_dump_mode")) {
4225 dpaa2_sec_dp_dump = atoi(value);
4226 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4227 DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4228 "supported, changing to FULL error"
4230 dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4239 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4241 struct rte_kvargs *kvlist;
4242 struct rte_devargs *devargs;
4244 devargs = cryptodev->device->devargs;
4248 kvlist = rte_kvargs_parse(devargs->args, NULL);
4252 if (!rte_kvargs_count(kvlist, key)) {
4253 rte_kvargs_free(kvlist);
4257 rte_kvargs_process(kvlist, key,
4258 check_devargs_handler, (void *)cryptodev);
4259 rte_kvargs_free(kvlist);
4263 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4265 struct dpaa2_sec_dev_private *internals;
4266 struct rte_device *dev = cryptodev->device;
4267 struct rte_dpaa2_device *dpaa2_dev;
4268 #ifdef RTE_LIB_SECURITY
4269 struct rte_security_ctx *security_instance;
4271 struct fsl_mc_io *dpseci;
4273 struct dpseci_attr attr;
4277 PMD_INIT_FUNC_TRACE();
4278 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4279 hw_id = dpaa2_dev->object_id;
4281 cryptodev->driver_id = cryptodev_driver_id;
4282 cryptodev->dev_ops = &crypto_ops;
4284 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4285 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4286 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4287 RTE_CRYPTODEV_FF_HW_ACCELERATED |
4288 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4289 RTE_CRYPTODEV_FF_SECURITY |
4290 RTE_CRYPTODEV_FF_SYM_RAW_DP |
4291 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4292 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4293 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4294 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4295 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4297 internals = cryptodev->data->dev_private;
4300 * For secondary processes, we don't initialise any further as primary
4301 * has already done this work. Only check we don't need a different
4304 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4305 DPAA2_SEC_DEBUG("Device already init by primary process");
4308 #ifdef RTE_LIB_SECURITY
4309 /* Initialize security_ctx only for primary process*/
4310 security_instance = rte_malloc("rte_security_instances_ops",
4311 sizeof(struct rte_security_ctx), 0);
4312 if (security_instance == NULL)
4314 security_instance->device = (void *)cryptodev;
4315 security_instance->ops = &dpaa2_sec_security_ops;
4316 security_instance->sess_cnt = 0;
4317 cryptodev->security_ctx = security_instance;
4319 /*Open the rte device via MC and save the handle for further use*/
4320 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4321 sizeof(struct fsl_mc_io), 0);
4324 "Error in allocating the memory for dpsec object");
4327 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4329 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4331 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4335 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4338 "Cannot get dpsec device attributed: Error = %x",
4342 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4345 internals->max_nb_queue_pairs = attr.num_tx_queues;
4346 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4347 internals->hw = dpseci;
4348 internals->token = token;
4349 internals->en_loose_ordered = true;
4351 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
4352 getpid(), cryptodev->data->dev_id);
4353 internals->fle_pool = rte_mempool_create((const char *)str,
4356 FLE_POOL_CACHE_SIZE, 0,
4357 NULL, NULL, NULL, NULL,
4359 if (!internals->fle_pool) {
4360 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
4364 dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4365 dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4366 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4370 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4372 /* dpaa2_sec_uninit(crypto_dev_name); */
4377 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4378 struct rte_dpaa2_device *dpaa2_dev)
4380 struct rte_cryptodev *cryptodev;
4381 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4385 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4386 dpaa2_dev->object_id);
4388 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4389 if (cryptodev == NULL)
4392 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4393 cryptodev->data->dev_private = rte_zmalloc_socket(
4394 "cryptodev private structure",
4395 sizeof(struct dpaa2_sec_dev_private),
4396 RTE_CACHE_LINE_SIZE,
4399 if (cryptodev->data->dev_private == NULL)
4400 rte_panic("Cannot allocate memzone for private "
4404 dpaa2_dev->cryptodev = cryptodev;
4405 cryptodev->device = &dpaa2_dev->device;
4407 /* init user callbacks */
4408 TAILQ_INIT(&(cryptodev->link_intr_cbs));
4410 if (dpaa2_svr_family == SVR_LX2160A)
4411 rta_set_sec_era(RTA_SEC_ERA_10);
4413 rta_set_sec_era(RTA_SEC_ERA_8);
4415 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4417 /* Invoke PMD device initialization function */
4418 retval = dpaa2_sec_dev_init(cryptodev);
4420 rte_cryptodev_pmd_probing_finish(cryptodev);
4424 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4425 rte_free(cryptodev->data->dev_private);
4427 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4433 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4435 struct rte_cryptodev *cryptodev;
4438 cryptodev = dpaa2_dev->cryptodev;
4439 if (cryptodev == NULL)
4442 ret = dpaa2_sec_uninit(cryptodev);
4446 return rte_cryptodev_pmd_destroy(cryptodev);
4449 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4450 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4451 .drv_type = DPAA2_CRYPTO,
4453 .name = "DPAA2 SEC PMD"
4455 .probe = cryptodev_dpaa2_sec_probe,
4456 .remove = cryptodev_dpaa2_sec_remove,
4459 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4461 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4462 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4463 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4464 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4465 DRIVER_STRICT_ORDER "=<int>"
4466 DRIVER_DUMP_MODE "=<int>");
4467 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);