1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43 * a pointer to the shared descriptor
45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID 0x1957
47 #define FSL_DEVICE_ID 0x410
48 #define FSL_SUBSYSTEM_SEC 1
49 #define FSL_MC_DPSECI_DEVID 3
53 uint8_t cryptodev_driver_id;
55 #ifdef RTE_LIB_SECURITY
57 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
58 struct rte_crypto_op *op,
59 struct qbman_fd *fd, uint16_t bpid)
61 struct rte_crypto_sym_op *sym_op = op->sym;
62 struct ctxt_priv *priv = sess->ctxt;
63 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
64 struct sec_flow_context *flc;
65 struct rte_mbuf *mbuf;
66 uint32_t in_len = 0, out_len = 0;
73 /* first FLE entry used to store mbuf and session ctxt */
74 fle = (struct qbman_fle *)rte_malloc(NULL,
75 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
78 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
81 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
82 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
83 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
85 /* Save the shared descriptor */
86 flc = &priv->flc_desc[0].flc;
92 if (likely(bpid < MAX_BPID)) {
93 DPAA2_SET_FD_BPID(fd, bpid);
94 DPAA2_SET_FLE_BPID(op_fle, bpid);
95 DPAA2_SET_FLE_BPID(ip_fle, bpid);
98 DPAA2_SET_FLE_IVP(op_fle);
99 DPAA2_SET_FLE_IVP(ip_fle);
102 /* Configure FD as a FRAME LIST */
103 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
104 DPAA2_SET_FD_COMPOUND_FMT(fd);
105 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
107 /* Configure Output FLE with Scatter/Gather Entry */
108 DPAA2_SET_FLE_SG_EXT(op_fle);
109 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
111 /* Configure Output SGE for Encap/Decap */
112 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
113 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
116 sge->length = mbuf->data_len;
117 out_len += sge->length;
120 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
121 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
123 /* using buf_len for last buf - so that extra data can be added */
124 sge->length = mbuf->buf_len - mbuf->data_off;
125 out_len += sge->length;
127 DPAA2_SET_FLE_FIN(sge);
128 op_fle->length = out_len;
131 mbuf = sym_op->m_src;
133 /* Configure Input FLE with Scatter/Gather Entry */
134 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
135 DPAA2_SET_FLE_SG_EXT(ip_fle);
136 DPAA2_SET_FLE_FIN(ip_fle);
138 /* Configure input SGE for Encap/Decap */
139 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
140 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
141 sge->length = mbuf->data_len;
142 in_len += sge->length;
148 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
149 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
150 sge->length = mbuf->data_len;
151 in_len += sge->length;
154 ip_fle->length = in_len;
155 DPAA2_SET_FLE_FIN(sge);
157 /* In case of PDCP, per packet HFN is stored in
158 * mbuf priv after sym_op.
160 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
161 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
162 sess->pdcp.hfn_ovd_offset);
163 /*enable HFN override override */
164 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
165 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
166 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
168 DPAA2_SET_FD_LEN(fd, ip_fle->length);
174 build_proto_compound_fd(dpaa2_sec_session *sess,
175 struct rte_crypto_op *op,
176 struct qbman_fd *fd, uint16_t bpid)
178 struct rte_crypto_sym_op *sym_op = op->sym;
179 struct ctxt_priv *priv = sess->ctxt;
180 struct qbman_fle *fle, *ip_fle, *op_fle;
181 struct sec_flow_context *flc;
182 struct rte_mbuf *src_mbuf = sym_op->m_src;
183 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
189 /* Save the shared descriptor */
190 flc = &priv->flc_desc[0].flc;
192 /* we are using the first FLE entry to store Mbuf */
193 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
195 DPAA2_SEC_DP_ERR("Memory alloc failed");
198 memset(fle, 0, FLE_POOL_BUF_SIZE);
199 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
200 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
205 if (likely(bpid < MAX_BPID)) {
206 DPAA2_SET_FD_BPID(fd, bpid);
207 DPAA2_SET_FLE_BPID(op_fle, bpid);
208 DPAA2_SET_FLE_BPID(ip_fle, bpid);
210 DPAA2_SET_FD_IVP(fd);
211 DPAA2_SET_FLE_IVP(op_fle);
212 DPAA2_SET_FLE_IVP(ip_fle);
215 /* Configure FD as a FRAME LIST */
216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
217 DPAA2_SET_FD_COMPOUND_FMT(fd);
218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
220 /* Configure Output FLE with dst mbuf data */
221 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
222 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
223 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
225 /* Configure Input FLE with src mbuf data */
226 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
227 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
228 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
230 DPAA2_SET_FD_LEN(fd, ip_fle->length);
231 DPAA2_SET_FLE_FIN(ip_fle);
233 /* In case of PDCP, per packet HFN is stored in
234 * mbuf priv after sym_op.
236 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
237 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
238 sess->pdcp.hfn_ovd_offset);
239 /*enable HFN override override */
240 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
241 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
242 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
250 build_proto_fd(dpaa2_sec_session *sess,
251 struct rte_crypto_op *op,
252 struct qbman_fd *fd, uint16_t bpid)
254 struct rte_crypto_sym_op *sym_op = op->sym;
256 return build_proto_compound_fd(sess, op, fd, bpid);
258 struct ctxt_priv *priv = sess->ctxt;
259 struct sec_flow_context *flc;
260 struct rte_mbuf *mbuf = sym_op->m_src;
262 if (likely(bpid < MAX_BPID))
263 DPAA2_SET_FD_BPID(fd, bpid);
265 DPAA2_SET_FD_IVP(fd);
267 /* Save the shared descriptor */
268 flc = &priv->flc_desc[0].flc;
270 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
271 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
272 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
273 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
275 /* save physical address of mbuf */
276 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
277 mbuf->buf_iova = (size_t)op;
284 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
285 struct rte_crypto_op *op,
286 struct qbman_fd *fd, __rte_unused uint16_t bpid)
288 struct rte_crypto_sym_op *sym_op = op->sym;
289 struct ctxt_priv *priv = sess->ctxt;
290 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
291 struct sec_flow_context *flc;
292 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
293 int icv_len = sess->digest_length;
295 struct rte_mbuf *mbuf;
296 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
300 mbuf = sym_op->m_dst;
302 mbuf = sym_op->m_src;
304 /* first FLE entry used to store mbuf and session ctxt */
305 fle = (struct qbman_fle *)rte_malloc(NULL,
306 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
307 RTE_CACHE_LINE_SIZE);
308 if (unlikely(!fle)) {
309 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
312 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
313 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
314 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
320 /* Save the shared descriptor */
321 flc = &priv->flc_desc[0].flc;
323 /* Configure FD as a FRAME LIST */
324 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
325 DPAA2_SET_FD_COMPOUND_FMT(fd);
326 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
328 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
329 "iv-len=%d data_off: 0x%x\n",
330 sym_op->aead.data.offset,
331 sym_op->aead.data.length,
334 sym_op->m_src->data_off);
336 /* Configure Output FLE with Scatter/Gather Entry */
337 DPAA2_SET_FLE_SG_EXT(op_fle);
338 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
341 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
343 op_fle->length = (sess->dir == DIR_ENC) ?
344 (sym_op->aead.data.length + icv_len) :
345 sym_op->aead.data.length;
347 /* Configure Output SGE for Encap/Decap */
348 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
349 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
350 sge->length = mbuf->data_len - sym_op->aead.data.offset;
356 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
357 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
358 sge->length = mbuf->data_len;
361 sge->length -= icv_len;
363 if (sess->dir == DIR_ENC) {
365 DPAA2_SET_FLE_ADDR(sge,
366 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
367 sge->length = icv_len;
369 DPAA2_SET_FLE_FIN(sge);
372 mbuf = sym_op->m_src;
374 /* Configure Input FLE with Scatter/Gather Entry */
375 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
376 DPAA2_SET_FLE_SG_EXT(ip_fle);
377 DPAA2_SET_FLE_FIN(ip_fle);
378 ip_fle->length = (sess->dir == DIR_ENC) ?
379 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
380 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
383 /* Configure Input SGE for Encap/Decap */
384 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
385 sge->length = sess->iv.length;
389 DPAA2_SET_FLE_ADDR(sge,
390 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
391 sge->length = auth_only_len;
395 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
396 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
398 sge->length = mbuf->data_len - sym_op->aead.data.offset;
404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
405 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
406 sge->length = mbuf->data_len;
410 if (sess->dir == DIR_DEC) {
412 old_icv = (uint8_t *)(sge + 1);
413 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
414 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
415 sge->length = icv_len;
418 DPAA2_SET_FLE_FIN(sge);
420 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
421 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
423 DPAA2_SET_FD_LEN(fd, ip_fle->length);
429 build_authenc_gcm_fd(dpaa2_sec_session *sess,
430 struct rte_crypto_op *op,
431 struct qbman_fd *fd, uint16_t bpid)
433 struct rte_crypto_sym_op *sym_op = op->sym;
434 struct ctxt_priv *priv = sess->ctxt;
435 struct qbman_fle *fle, *sge;
436 struct sec_flow_context *flc;
437 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
438 int icv_len = sess->digest_length, retval;
440 struct rte_mbuf *dst;
441 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
449 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
450 * Currently we donot know which FLE has the mbuf stored.
451 * So while retreiving we can go back 1 FLE from the FD -ADDR
452 * to get the MBUF Addr from the previous FLE.
453 * We can have a better approach to use the inline Mbuf
455 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
457 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
460 memset(fle, 0, FLE_POOL_BUF_SIZE);
461 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
462 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
465 if (likely(bpid < MAX_BPID)) {
466 DPAA2_SET_FD_BPID(fd, bpid);
467 DPAA2_SET_FLE_BPID(fle, bpid);
468 DPAA2_SET_FLE_BPID(fle + 1, bpid);
469 DPAA2_SET_FLE_BPID(sge, bpid);
470 DPAA2_SET_FLE_BPID(sge + 1, bpid);
471 DPAA2_SET_FLE_BPID(sge + 2, bpid);
472 DPAA2_SET_FLE_BPID(sge + 3, bpid);
474 DPAA2_SET_FD_IVP(fd);
475 DPAA2_SET_FLE_IVP(fle);
476 DPAA2_SET_FLE_IVP((fle + 1));
477 DPAA2_SET_FLE_IVP(sge);
478 DPAA2_SET_FLE_IVP((sge + 1));
479 DPAA2_SET_FLE_IVP((sge + 2));
480 DPAA2_SET_FLE_IVP((sge + 3));
483 /* Save the shared descriptor */
484 flc = &priv->flc_desc[0].flc;
485 /* Configure FD as a FRAME LIST */
486 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
487 DPAA2_SET_FD_COMPOUND_FMT(fd);
488 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
490 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
491 "iv-len=%d data_off: 0x%x\n",
492 sym_op->aead.data.offset,
493 sym_op->aead.data.length,
496 sym_op->m_src->data_off);
498 /* Configure Output FLE with Scatter/Gather Entry */
499 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
501 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
502 fle->length = (sess->dir == DIR_ENC) ?
503 (sym_op->aead.data.length + icv_len) :
504 sym_op->aead.data.length;
506 DPAA2_SET_FLE_SG_EXT(fle);
508 /* Configure Output SGE for Encap/Decap */
509 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
510 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
511 sge->length = sym_op->aead.data.length;
513 if (sess->dir == DIR_ENC) {
515 DPAA2_SET_FLE_ADDR(sge,
516 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
517 sge->length = sess->digest_length;
519 DPAA2_SET_FLE_FIN(sge);
524 /* Configure Input FLE with Scatter/Gather Entry */
525 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
526 DPAA2_SET_FLE_SG_EXT(fle);
527 DPAA2_SET_FLE_FIN(fle);
528 fle->length = (sess->dir == DIR_ENC) ?
529 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
530 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
531 sess->digest_length);
533 /* Configure Input SGE for Encap/Decap */
534 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
535 sge->length = sess->iv.length;
538 DPAA2_SET_FLE_ADDR(sge,
539 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
540 sge->length = auth_only_len;
541 DPAA2_SET_FLE_BPID(sge, bpid);
545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
546 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
547 sym_op->m_src->data_off);
548 sge->length = sym_op->aead.data.length;
549 if (sess->dir == DIR_DEC) {
551 old_icv = (uint8_t *)(sge + 1);
552 memcpy(old_icv, sym_op->aead.digest.data,
553 sess->digest_length);
554 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
555 sge->length = sess->digest_length;
557 DPAA2_SET_FLE_FIN(sge);
560 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
561 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
564 DPAA2_SET_FD_LEN(fd, fle->length);
569 build_authenc_sg_fd(dpaa2_sec_session *sess,
570 struct rte_crypto_op *op,
571 struct qbman_fd *fd, __rte_unused uint16_t bpid)
573 struct rte_crypto_sym_op *sym_op = op->sym;
574 struct ctxt_priv *priv = sess->ctxt;
575 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
576 struct sec_flow_context *flc;
577 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
578 sym_op->auth.data.offset;
579 uint16_t auth_tail_len = sym_op->auth.data.length -
580 sym_op->cipher.data.length - auth_hdr_len;
581 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
582 int icv_len = sess->digest_length;
584 struct rte_mbuf *mbuf;
585 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
589 mbuf = sym_op->m_dst;
591 mbuf = sym_op->m_src;
593 /* first FLE entry used to store mbuf and session ctxt */
594 fle = (struct qbman_fle *)rte_malloc(NULL,
595 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
596 RTE_CACHE_LINE_SIZE);
597 if (unlikely(!fle)) {
598 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
601 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
602 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
603 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
609 /* Save the shared descriptor */
610 flc = &priv->flc_desc[0].flc;
612 /* Configure FD as a FRAME LIST */
613 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
614 DPAA2_SET_FD_COMPOUND_FMT(fd);
615 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
618 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
619 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
620 sym_op->auth.data.offset,
621 sym_op->auth.data.length,
623 sym_op->cipher.data.offset,
624 sym_op->cipher.data.length,
626 sym_op->m_src->data_off);
628 /* Configure Output FLE with Scatter/Gather Entry */
629 DPAA2_SET_FLE_SG_EXT(op_fle);
630 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
633 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
635 op_fle->length = (sess->dir == DIR_ENC) ?
636 (sym_op->cipher.data.length + icv_len) :
637 sym_op->cipher.data.length;
639 /* Configure Output SGE for Encap/Decap */
640 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
641 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
642 sge->length = mbuf->data_len - sym_op->auth.data.offset;
648 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
649 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
650 sge->length = mbuf->data_len;
653 sge->length -= icv_len;
655 if (sess->dir == DIR_ENC) {
657 DPAA2_SET_FLE_ADDR(sge,
658 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
659 sge->length = icv_len;
661 DPAA2_SET_FLE_FIN(sge);
664 mbuf = sym_op->m_src;
666 /* Configure Input FLE with Scatter/Gather Entry */
667 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
668 DPAA2_SET_FLE_SG_EXT(ip_fle);
669 DPAA2_SET_FLE_FIN(ip_fle);
670 ip_fle->length = (sess->dir == DIR_ENC) ?
671 (sym_op->auth.data.length + sess->iv.length) :
672 (sym_op->auth.data.length + sess->iv.length +
675 /* Configure Input SGE for Encap/Decap */
676 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
677 sge->length = sess->iv.length;
680 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
681 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
683 sge->length = mbuf->data_len - sym_op->auth.data.offset;
689 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
690 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
691 sge->length = mbuf->data_len;
694 sge->length -= icv_len;
696 if (sess->dir == DIR_DEC) {
698 old_icv = (uint8_t *)(sge + 1);
699 memcpy(old_icv, sym_op->auth.digest.data,
701 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
702 sge->length = icv_len;
705 DPAA2_SET_FLE_FIN(sge);
707 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
708 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
710 DPAA2_SET_FD_LEN(fd, ip_fle->length);
716 build_authenc_fd(dpaa2_sec_session *sess,
717 struct rte_crypto_op *op,
718 struct qbman_fd *fd, uint16_t bpid)
720 struct rte_crypto_sym_op *sym_op = op->sym;
721 struct ctxt_priv *priv = sess->ctxt;
722 struct qbman_fle *fle, *sge;
723 struct sec_flow_context *flc;
724 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
725 sym_op->auth.data.offset;
726 uint16_t auth_tail_len = sym_op->auth.data.length -
727 sym_op->cipher.data.length - auth_hdr_len;
728 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
730 int icv_len = sess->digest_length, retval;
732 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
734 struct rte_mbuf *dst;
741 /* we are using the first FLE entry to store Mbuf.
742 * Currently we donot know which FLE has the mbuf stored.
743 * So while retreiving we can go back 1 FLE from the FD -ADDR
744 * to get the MBUF Addr from the previous FLE.
745 * We can have a better approach to use the inline Mbuf
747 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
749 DPAA2_SEC_ERR("Memory alloc failed for SGE");
752 memset(fle, 0, FLE_POOL_BUF_SIZE);
753 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
754 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
757 if (likely(bpid < MAX_BPID)) {
758 DPAA2_SET_FD_BPID(fd, bpid);
759 DPAA2_SET_FLE_BPID(fle, bpid);
760 DPAA2_SET_FLE_BPID(fle + 1, bpid);
761 DPAA2_SET_FLE_BPID(sge, bpid);
762 DPAA2_SET_FLE_BPID(sge + 1, bpid);
763 DPAA2_SET_FLE_BPID(sge + 2, bpid);
764 DPAA2_SET_FLE_BPID(sge + 3, bpid);
766 DPAA2_SET_FD_IVP(fd);
767 DPAA2_SET_FLE_IVP(fle);
768 DPAA2_SET_FLE_IVP((fle + 1));
769 DPAA2_SET_FLE_IVP(sge);
770 DPAA2_SET_FLE_IVP((sge + 1));
771 DPAA2_SET_FLE_IVP((sge + 2));
772 DPAA2_SET_FLE_IVP((sge + 3));
775 /* Save the shared descriptor */
776 flc = &priv->flc_desc[0].flc;
777 /* Configure FD as a FRAME LIST */
778 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
779 DPAA2_SET_FD_COMPOUND_FMT(fd);
780 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
783 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
784 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
785 sym_op->auth.data.offset,
786 sym_op->auth.data.length,
788 sym_op->cipher.data.offset,
789 sym_op->cipher.data.length,
791 sym_op->m_src->data_off);
793 /* Configure Output FLE with Scatter/Gather Entry */
794 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
796 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
797 fle->length = (sess->dir == DIR_ENC) ?
798 (sym_op->cipher.data.length + icv_len) :
799 sym_op->cipher.data.length;
801 DPAA2_SET_FLE_SG_EXT(fle);
803 /* Configure Output SGE for Encap/Decap */
804 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
805 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
807 sge->length = sym_op->cipher.data.length;
809 if (sess->dir == DIR_ENC) {
811 DPAA2_SET_FLE_ADDR(sge,
812 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
813 sge->length = sess->digest_length;
814 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
817 DPAA2_SET_FLE_FIN(sge);
822 /* Configure Input FLE with Scatter/Gather Entry */
823 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
824 DPAA2_SET_FLE_SG_EXT(fle);
825 DPAA2_SET_FLE_FIN(fle);
826 fle->length = (sess->dir == DIR_ENC) ?
827 (sym_op->auth.data.length + sess->iv.length) :
828 (sym_op->auth.data.length + sess->iv.length +
829 sess->digest_length);
831 /* Configure Input SGE for Encap/Decap */
832 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
833 sge->length = sess->iv.length;
836 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
837 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
838 sym_op->m_src->data_off);
839 sge->length = sym_op->auth.data.length;
840 if (sess->dir == DIR_DEC) {
842 old_icv = (uint8_t *)(sge + 1);
843 memcpy(old_icv, sym_op->auth.digest.data,
844 sess->digest_length);
845 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
846 sge->length = sess->digest_length;
847 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
848 sess->digest_length +
851 DPAA2_SET_FLE_FIN(sge);
853 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
854 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
859 static inline int build_auth_sg_fd(
860 dpaa2_sec_session *sess,
861 struct rte_crypto_op *op,
863 __rte_unused uint16_t bpid)
865 struct rte_crypto_sym_op *sym_op = op->sym;
866 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
867 struct sec_flow_context *flc;
868 struct ctxt_priv *priv = sess->ctxt;
869 int data_len, data_offset;
871 struct rte_mbuf *mbuf;
873 data_len = sym_op->auth.data.length;
874 data_offset = sym_op->auth.data.offset;
876 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
877 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
878 if ((data_len & 7) || (data_offset & 7)) {
879 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
883 data_len = data_len >> 3;
884 data_offset = data_offset >> 3;
887 mbuf = sym_op->m_src;
888 fle = (struct qbman_fle *)rte_malloc(NULL,
889 FLE_SG_MEM_SIZE(mbuf->nb_segs),
890 RTE_CACHE_LINE_SIZE);
891 if (unlikely(!fle)) {
892 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
895 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
896 /* first FLE entry used to store mbuf and session ctxt */
897 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
898 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
903 flc = &priv->flc_desc[DESC_INITFINAL].flc;
905 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
906 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
907 DPAA2_SET_FD_COMPOUND_FMT(fd);
910 DPAA2_SET_FLE_ADDR(op_fle,
911 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
912 op_fle->length = sess->digest_length;
915 DPAA2_SET_FLE_SG_EXT(ip_fle);
916 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
917 ip_fle->length = data_len;
919 if (sess->iv.length) {
922 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
925 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
926 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
928 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
929 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
932 sge->length = sess->iv.length;
934 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
935 ip_fle->length += sge->length;
939 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
940 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
942 if (data_len <= (mbuf->data_len - data_offset)) {
943 sge->length = data_len;
946 sge->length = mbuf->data_len - data_offset;
948 /* remaining i/p segs */
949 while ((data_len = data_len - sge->length) &&
950 (mbuf = mbuf->next)) {
952 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
953 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
954 if (data_len > mbuf->data_len)
955 sge->length = mbuf->data_len;
957 sge->length = data_len;
961 if (sess->dir == DIR_DEC) {
962 /* Digest verification case */
964 old_digest = (uint8_t *)(sge + 1);
965 rte_memcpy(old_digest, sym_op->auth.digest.data,
966 sess->digest_length);
967 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
968 sge->length = sess->digest_length;
969 ip_fle->length += sess->digest_length;
971 DPAA2_SET_FLE_FIN(sge);
972 DPAA2_SET_FLE_FIN(ip_fle);
973 DPAA2_SET_FD_LEN(fd, ip_fle->length);
979 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
980 struct qbman_fd *fd, uint16_t bpid)
982 struct rte_crypto_sym_op *sym_op = op->sym;
983 struct qbman_fle *fle, *sge;
984 struct sec_flow_context *flc;
985 struct ctxt_priv *priv = sess->ctxt;
986 int data_len, data_offset;
990 data_len = sym_op->auth.data.length;
991 data_offset = sym_op->auth.data.offset;
993 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
994 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
995 if ((data_len & 7) || (data_offset & 7)) {
996 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1000 data_len = data_len >> 3;
1001 data_offset = data_offset >> 3;
1004 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1006 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1009 memset(fle, 0, FLE_POOL_BUF_SIZE);
1010 /* TODO we are using the first FLE entry to store Mbuf.
1011 * Currently we donot know which FLE has the mbuf stored.
1012 * So while retreiving we can go back 1 FLE from the FD -ADDR
1013 * to get the MBUF Addr from the previous FLE.
1014 * We can have a better approach to use the inline Mbuf
1016 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1017 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1021 if (likely(bpid < MAX_BPID)) {
1022 DPAA2_SET_FD_BPID(fd, bpid);
1023 DPAA2_SET_FLE_BPID(fle, bpid);
1024 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1025 DPAA2_SET_FLE_BPID(sge, bpid);
1026 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1028 DPAA2_SET_FD_IVP(fd);
1029 DPAA2_SET_FLE_IVP(fle);
1030 DPAA2_SET_FLE_IVP((fle + 1));
1031 DPAA2_SET_FLE_IVP(sge);
1032 DPAA2_SET_FLE_IVP((sge + 1));
1035 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1036 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1037 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1038 DPAA2_SET_FD_COMPOUND_FMT(fd);
1040 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1041 fle->length = sess->digest_length;
1044 /* Setting input FLE */
1045 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1046 DPAA2_SET_FLE_SG_EXT(fle);
1047 fle->length = data_len;
1049 if (sess->iv.length) {
1052 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1055 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1056 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1058 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1059 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1062 sge->length = sess->iv.length;
1065 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1066 fle->length = fle->length + sge->length;
1070 /* Setting data to authenticate */
1071 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1072 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1073 sge->length = data_len;
1075 if (sess->dir == DIR_DEC) {
1077 old_digest = (uint8_t *)(sge + 1);
1078 rte_memcpy(old_digest, sym_op->auth.digest.data,
1079 sess->digest_length);
1080 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1081 sge->length = sess->digest_length;
1082 fle->length = fle->length + sess->digest_length;
1085 DPAA2_SET_FLE_FIN(sge);
1086 DPAA2_SET_FLE_FIN(fle);
1087 DPAA2_SET_FD_LEN(fd, fle->length);
1093 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1094 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1096 struct rte_crypto_sym_op *sym_op = op->sym;
1097 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1098 int data_len, data_offset;
1099 struct sec_flow_context *flc;
1100 struct ctxt_priv *priv = sess->ctxt;
1101 struct rte_mbuf *mbuf;
1102 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1105 data_len = sym_op->cipher.data.length;
1106 data_offset = sym_op->cipher.data.offset;
1108 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1109 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1110 if ((data_len & 7) || (data_offset & 7)) {
1111 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1115 data_len = data_len >> 3;
1116 data_offset = data_offset >> 3;
1120 mbuf = sym_op->m_dst;
1122 mbuf = sym_op->m_src;
1124 /* first FLE entry used to store mbuf and session ctxt */
1125 fle = (struct qbman_fle *)rte_malloc(NULL,
1126 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1127 RTE_CACHE_LINE_SIZE);
1129 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1132 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1133 /* first FLE entry used to store mbuf and session ctxt */
1134 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1135 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1141 flc = &priv->flc_desc[0].flc;
1144 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1145 " data_off: 0x%x\n",
1149 sym_op->m_src->data_off);
1152 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1153 op_fle->length = data_len;
1154 DPAA2_SET_FLE_SG_EXT(op_fle);
1157 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1158 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1159 sge->length = mbuf->data_len - data_offset;
1165 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1166 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1167 sge->length = mbuf->data_len;
1170 DPAA2_SET_FLE_FIN(sge);
1173 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1174 flc, fle, fle->addr_hi, fle->addr_lo,
1178 mbuf = sym_op->m_src;
1180 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1181 ip_fle->length = sess->iv.length + data_len;
1182 DPAA2_SET_FLE_SG_EXT(ip_fle);
1185 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1186 DPAA2_SET_FLE_OFFSET(sge, 0);
1187 sge->length = sess->iv.length;
1192 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1193 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1194 sge->length = mbuf->data_len - data_offset;
1200 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1201 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1202 sge->length = mbuf->data_len;
1205 DPAA2_SET_FLE_FIN(sge);
1206 DPAA2_SET_FLE_FIN(ip_fle);
1209 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1210 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1211 DPAA2_SET_FD_COMPOUND_FMT(fd);
1212 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1215 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1216 " off =%d, len =%d\n",
1217 DPAA2_GET_FD_ADDR(fd),
1218 DPAA2_GET_FD_BPID(fd),
1219 rte_dpaa2_bpid_info[bpid].meta_data_size,
1220 DPAA2_GET_FD_OFFSET(fd),
1221 DPAA2_GET_FD_LEN(fd));
1226 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1227 struct qbman_fd *fd, uint16_t bpid)
1229 struct rte_crypto_sym_op *sym_op = op->sym;
1230 struct qbman_fle *fle, *sge;
1231 int retval, data_len, data_offset;
1232 struct sec_flow_context *flc;
1233 struct ctxt_priv *priv = sess->ctxt;
1234 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1236 struct rte_mbuf *dst;
1238 data_len = sym_op->cipher.data.length;
1239 data_offset = sym_op->cipher.data.offset;
1241 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1242 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1243 if ((data_len & 7) || (data_offset & 7)) {
1244 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1248 data_len = data_len >> 3;
1249 data_offset = data_offset >> 3;
1253 dst = sym_op->m_dst;
1255 dst = sym_op->m_src;
1257 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1259 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1262 memset(fle, 0, FLE_POOL_BUF_SIZE);
1263 /* TODO we are using the first FLE entry to store Mbuf.
1264 * Currently we donot know which FLE has the mbuf stored.
1265 * So while retreiving we can go back 1 FLE from the FD -ADDR
1266 * to get the MBUF Addr from the previous FLE.
1267 * We can have a better approach to use the inline Mbuf
1269 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1270 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1274 if (likely(bpid < MAX_BPID)) {
1275 DPAA2_SET_FD_BPID(fd, bpid);
1276 DPAA2_SET_FLE_BPID(fle, bpid);
1277 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1278 DPAA2_SET_FLE_BPID(sge, bpid);
1279 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1281 DPAA2_SET_FD_IVP(fd);
1282 DPAA2_SET_FLE_IVP(fle);
1283 DPAA2_SET_FLE_IVP((fle + 1));
1284 DPAA2_SET_FLE_IVP(sge);
1285 DPAA2_SET_FLE_IVP((sge + 1));
1288 flc = &priv->flc_desc[0].flc;
1289 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1290 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1291 DPAA2_SET_FD_COMPOUND_FMT(fd);
1292 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1295 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1296 " data_off: 0x%x\n",
1300 sym_op->m_src->data_off);
1302 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1303 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1305 fle->length = data_len + sess->iv.length;
1308 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1309 flc, fle, fle->addr_hi, fle->addr_lo,
1314 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1315 fle->length = data_len + sess->iv.length;
1317 DPAA2_SET_FLE_SG_EXT(fle);
1319 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1320 sge->length = sess->iv.length;
1323 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1324 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1326 sge->length = data_len;
1327 DPAA2_SET_FLE_FIN(sge);
1328 DPAA2_SET_FLE_FIN(fle);
1331 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1332 " off =%d, len =%d\n",
1333 DPAA2_GET_FD_ADDR(fd),
1334 DPAA2_GET_FD_BPID(fd),
1335 rte_dpaa2_bpid_info[bpid].meta_data_size,
1336 DPAA2_GET_FD_OFFSET(fd),
1337 DPAA2_GET_FD_LEN(fd));
1343 build_sec_fd(struct rte_crypto_op *op,
1344 struct qbman_fd *fd, uint16_t bpid)
1347 dpaa2_sec_session *sess;
1349 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1350 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1351 op->sym->session, cryptodev_driver_id);
1352 #ifdef RTE_LIB_SECURITY
1353 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1354 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1355 op->sym->sec_session);
1363 /* Any of the buffer is segmented*/
1364 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1365 ((op->sym->m_dst != NULL) &&
1366 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1367 switch (sess->ctxt_type) {
1368 case DPAA2_SEC_CIPHER:
1369 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1371 case DPAA2_SEC_AUTH:
1372 ret = build_auth_sg_fd(sess, op, fd, bpid);
1374 case DPAA2_SEC_AEAD:
1375 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1377 case DPAA2_SEC_CIPHER_HASH:
1378 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1380 #ifdef RTE_LIB_SECURITY
1381 case DPAA2_SEC_IPSEC:
1382 case DPAA2_SEC_PDCP:
1383 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1386 case DPAA2_SEC_HASH_CIPHER:
1388 DPAA2_SEC_ERR("error: Unsupported session");
1391 switch (sess->ctxt_type) {
1392 case DPAA2_SEC_CIPHER:
1393 ret = build_cipher_fd(sess, op, fd, bpid);
1395 case DPAA2_SEC_AUTH:
1396 ret = build_auth_fd(sess, op, fd, bpid);
1398 case DPAA2_SEC_AEAD:
1399 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1401 case DPAA2_SEC_CIPHER_HASH:
1402 ret = build_authenc_fd(sess, op, fd, bpid);
1404 #ifdef RTE_LIB_SECURITY
1405 case DPAA2_SEC_IPSEC:
1406 ret = build_proto_fd(sess, op, fd, bpid);
1408 case DPAA2_SEC_PDCP:
1409 ret = build_proto_compound_fd(sess, op, fd, bpid);
1412 case DPAA2_SEC_HASH_CIPHER:
1414 DPAA2_SEC_ERR("error: Unsupported session");
1422 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1425 /* Function to transmit the frames to given device and VQ*/
1428 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1429 uint32_t frames_to_send, retry_count;
1430 struct qbman_eq_desc eqdesc;
1431 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1432 struct qbman_swp *swp;
1433 uint16_t num_tx = 0;
1434 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1435 /*todo - need to support multiple buffer pools */
1437 struct rte_mempool *mb_pool;
1439 if (unlikely(nb_ops == 0))
1442 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1443 DPAA2_SEC_ERR("sessionless crypto op not supported");
1446 /*Prepare enqueue descriptor*/
1447 qbman_eq_desc_clear(&eqdesc);
1448 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1449 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1450 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1452 if (!DPAA2_PER_LCORE_DPIO) {
1453 ret = dpaa2_affine_qbman_swp();
1456 "Failed to allocate IO portal, tid: %d\n",
1461 swp = DPAA2_PER_LCORE_PORTAL;
1464 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1465 dpaa2_eqcr_size : nb_ops;
1467 for (loop = 0; loop < frames_to_send; loop++) {
1468 if (*dpaa2_seqn((*ops)->sym->m_src)) {
1469 uint8_t dqrr_index =
1470 *dpaa2_seqn((*ops)->sym->m_src) - 1;
1472 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1473 DPAA2_PER_LCORE_DQRR_SIZE--;
1474 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1475 *dpaa2_seqn((*ops)->sym->m_src) =
1476 DPAA2_INVALID_MBUF_SEQN;
1479 /*Clear the unused FD fields before sending*/
1480 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1481 mb_pool = (*ops)->sym->m_src->pool;
1482 bpid = mempool_to_bpid(mb_pool);
1483 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1485 DPAA2_SEC_ERR("error: Improper packet contents"
1486 " for crypto operation");
1494 while (loop < frames_to_send) {
1495 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1498 frames_to_send - loop);
1499 if (unlikely(ret < 0)) {
1501 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1516 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1517 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1521 #ifdef RTE_LIB_SECURITY
1522 static inline struct rte_crypto_op *
1523 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1525 struct rte_crypto_op *op;
1526 uint16_t len = DPAA2_GET_FD_LEN(fd);
1528 dpaa2_sec_session *sess_priv __rte_unused;
1530 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1531 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1532 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1534 diff = len - mbuf->pkt_len;
1535 mbuf->pkt_len += diff;
1536 mbuf->data_len += diff;
1537 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1538 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1539 op->sym->aead.digest.phys_addr = 0L;
1541 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1542 op->sym->sec_session);
1543 if (sess_priv->dir == DIR_ENC)
1544 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1546 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1552 static inline struct rte_crypto_op *
1553 sec_fd_to_mbuf(const struct qbman_fd *fd)
1555 struct qbman_fle *fle;
1556 struct rte_crypto_op *op;
1557 struct ctxt_priv *priv;
1558 struct rte_mbuf *dst, *src;
1560 #ifdef RTE_LIB_SECURITY
1561 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1562 return sec_simple_fd_to_mbuf(fd);
1564 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1566 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1567 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1569 /* we are using the first FLE entry to store Mbuf.
1570 * Currently we donot know which FLE has the mbuf stored.
1571 * So while retreiving we can go back 1 FLE from the FD -ADDR
1572 * to get the MBUF Addr from the previous FLE.
1573 * We can have a better approach to use the inline Mbuf
1576 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1577 /* TODO complete it. */
1578 DPAA2_SEC_ERR("error: non inline buffer");
1581 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1584 src = op->sym->m_src;
1587 if (op->sym->m_dst) {
1588 dst = op->sym->m_dst;
1593 #ifdef RTE_LIB_SECURITY
1594 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1595 uint16_t len = DPAA2_GET_FD_LEN(fd);
1597 while (dst->next != NULL) {
1598 len -= dst->data_len;
1601 dst->data_len = len;
1604 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1605 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1608 DPAA2_GET_FD_ADDR(fd),
1609 DPAA2_GET_FD_BPID(fd),
1610 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1611 DPAA2_GET_FD_OFFSET(fd),
1612 DPAA2_GET_FD_LEN(fd));
1614 /* free the fle memory */
1615 if (likely(rte_pktmbuf_is_contiguous(src))) {
1616 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1617 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1619 rte_free((void *)(fle-1));
1625 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1628 /* Function is responsible to receive frames for a given device and VQ*/
1629 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1630 struct qbman_result *dq_storage;
1631 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1632 int ret, num_rx = 0;
1633 uint8_t is_last = 0, status;
1634 struct qbman_swp *swp;
1635 const struct qbman_fd *fd;
1636 struct qbman_pull_desc pulldesc;
1638 if (!DPAA2_PER_LCORE_DPIO) {
1639 ret = dpaa2_affine_qbman_swp();
1642 "Failed to allocate IO portal, tid: %d\n",
1647 swp = DPAA2_PER_LCORE_PORTAL;
1648 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1650 qbman_pull_desc_clear(&pulldesc);
1651 qbman_pull_desc_set_numframes(&pulldesc,
1652 (nb_ops > dpaa2_dqrr_size) ?
1653 dpaa2_dqrr_size : nb_ops);
1654 qbman_pull_desc_set_fq(&pulldesc, fqid);
1655 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1656 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1659 /*Issue a volatile dequeue command. */
1661 if (qbman_swp_pull(swp, &pulldesc)) {
1663 "SEC VDQ command is not issued : QBMAN busy");
1664 /* Portal was busy, try again */
1670 /* Receive the packets till Last Dequeue entry is found with
1671 * respect to the above issues PULL command.
1674 /* Check if the previous issued command is completed.
1675 * Also seems like the SWP is shared between the Ethernet Driver
1676 * and the SEC driver.
1678 while (!qbman_check_command_complete(dq_storage))
1681 /* Loop until the dq_storage is updated with
1682 * new token by QBMAN
1684 while (!qbman_check_new_result(dq_storage))
1686 /* Check whether Last Pull command is Expired and
1687 * setting Condition for Loop termination
1689 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1691 /* Check for valid frame. */
1692 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1694 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1695 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1700 fd = qbman_result_DQ_fd(dq_storage);
1701 ops[num_rx] = sec_fd_to_mbuf(fd);
1703 if (unlikely(fd->simple.frc)) {
1704 /* TODO Parse SEC errors */
1705 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1707 dpaa2_qp->rx_vq.err_pkts += 1;
1708 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1710 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1715 } /* End of Packet Rx loop */
1717 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1719 DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1720 dpaa2_qp->rx_vq.err_pkts);
1721 /*Return the total number of packets received to DPAA2 app*/
1725 /** Release queue pair */
1727 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1729 struct dpaa2_sec_qp *qp =
1730 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1732 PMD_INIT_FUNC_TRACE();
1734 if (qp->rx_vq.q_storage) {
1735 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1736 rte_free(qp->rx_vq.q_storage);
1740 dev->data->queue_pairs[queue_pair_id] = NULL;
1745 /** Setup a queue pair */
1747 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1748 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1749 __rte_unused int socket_id)
1751 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1752 struct dpaa2_sec_qp *qp;
1753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1754 struct dpseci_rx_queue_cfg cfg;
1757 PMD_INIT_FUNC_TRACE();
1759 /* If qp is already in use free ring memory and qp metadata. */
1760 if (dev->data->queue_pairs[qp_id] != NULL) {
1761 DPAA2_SEC_INFO("QP already setup");
1765 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1766 dev, qp_id, qp_conf);
1768 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1770 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1771 RTE_CACHE_LINE_SIZE);
1773 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1777 qp->rx_vq.crypto_data = dev->data;
1778 qp->tx_vq.crypto_data = dev->data;
1779 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1780 sizeof(struct queue_storage_info_t),
1781 RTE_CACHE_LINE_SIZE);
1782 if (!qp->rx_vq.q_storage) {
1783 DPAA2_SEC_ERR("malloc failed for q_storage");
1786 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1788 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1789 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1793 dev->data->queue_pairs[qp_id] = qp;
1795 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1796 cfg.user_ctx = (size_t)(&qp->rx_vq);
1797 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1802 /** Returns the size of the aesni gcm session structure */
1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1806 PMD_INIT_FUNC_TRACE();
1808 return sizeof(dpaa2_sec_session);
1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1813 struct rte_crypto_sym_xform *xform,
1814 dpaa2_sec_session *session)
1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1817 struct alginfo cipherdata;
1818 int bufsize, ret = 0;
1819 struct ctxt_priv *priv;
1820 struct sec_flow_context *flc;
1822 PMD_INIT_FUNC_TRACE();
1824 /* For SEC CIPHER only one descriptor is required. */
1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1827 RTE_CACHE_LINE_SIZE);
1829 DPAA2_SEC_ERR("No Memory for priv CTXT");
1833 priv->fle_pool = dev_priv->fle_pool;
1835 flc = &priv->flc_desc[0].flc;
1837 session->ctxt_type = DPAA2_SEC_CIPHER;
1838 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1839 RTE_CACHE_LINE_SIZE);
1840 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1841 DPAA2_SEC_ERR("No Memory for cipher key");
1845 session->cipher_key.length = xform->cipher.key.length;
1847 memcpy(session->cipher_key.data, xform->cipher.key.data,
1848 xform->cipher.key.length);
1849 cipherdata.key = (size_t)session->cipher_key.data;
1850 cipherdata.keylen = session->cipher_key.length;
1851 cipherdata.key_enc_flags = 0;
1852 cipherdata.key_type = RTA_DATA_IMM;
1854 /* Set IV parameters */
1855 session->iv.offset = xform->cipher.iv.offset;
1856 session->iv.length = xform->cipher.iv.length;
1857 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1860 switch (xform->cipher.algo) {
1861 case RTE_CRYPTO_CIPHER_AES_CBC:
1862 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1863 cipherdata.algmode = OP_ALG_AAI_CBC;
1864 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1865 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1866 SHR_NEVER, &cipherdata,
1870 case RTE_CRYPTO_CIPHER_3DES_CBC:
1871 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1872 cipherdata.algmode = OP_ALG_AAI_CBC;
1873 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1874 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1875 SHR_NEVER, &cipherdata,
1879 case RTE_CRYPTO_CIPHER_DES_CBC:
1880 cipherdata.algtype = OP_ALG_ALGSEL_DES;
1881 cipherdata.algmode = OP_ALG_AAI_CBC;
1882 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1883 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1884 SHR_NEVER, &cipherdata,
1888 case RTE_CRYPTO_CIPHER_AES_CTR:
1889 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1890 cipherdata.algmode = OP_ALG_AAI_CTR;
1891 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1892 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1893 SHR_NEVER, &cipherdata,
1897 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1898 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1899 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1900 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1904 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1905 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1906 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1907 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1911 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1912 case RTE_CRYPTO_CIPHER_AES_F8:
1913 case RTE_CRYPTO_CIPHER_AES_ECB:
1914 case RTE_CRYPTO_CIPHER_3DES_ECB:
1915 case RTE_CRYPTO_CIPHER_3DES_CTR:
1916 case RTE_CRYPTO_CIPHER_AES_XTS:
1917 case RTE_CRYPTO_CIPHER_ARC4:
1918 case RTE_CRYPTO_CIPHER_NULL:
1919 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1920 xform->cipher.algo);
1924 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1925 xform->cipher.algo);
1931 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1936 flc->word1_sdl = (uint8_t)bufsize;
1937 session->ctxt = priv;
1939 #ifdef CAAM_DESC_DEBUG
1941 for (i = 0; i < bufsize; i++)
1942 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1947 rte_free(session->cipher_key.data);
1953 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1954 struct rte_crypto_sym_xform *xform,
1955 dpaa2_sec_session *session)
1957 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1958 struct alginfo authdata;
1959 int bufsize, ret = 0;
1960 struct ctxt_priv *priv;
1961 struct sec_flow_context *flc;
1963 PMD_INIT_FUNC_TRACE();
1965 /* For SEC AUTH three descriptors are required for various stages */
1966 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1967 sizeof(struct ctxt_priv) + 3 *
1968 sizeof(struct sec_flc_desc),
1969 RTE_CACHE_LINE_SIZE);
1971 DPAA2_SEC_ERR("No Memory for priv CTXT");
1975 priv->fle_pool = dev_priv->fle_pool;
1976 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1978 session->ctxt_type = DPAA2_SEC_AUTH;
1979 session->auth_key.length = xform->auth.key.length;
1980 if (xform->auth.key.length) {
1981 session->auth_key.data = rte_zmalloc(NULL,
1982 xform->auth.key.length,
1983 RTE_CACHE_LINE_SIZE);
1984 if (session->auth_key.data == NULL) {
1985 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1989 memcpy(session->auth_key.data, xform->auth.key.data,
1990 xform->auth.key.length);
1991 authdata.key = (size_t)session->auth_key.data;
1992 authdata.key_enc_flags = 0;
1993 authdata.key_type = RTA_DATA_IMM;
1995 authdata.keylen = session->auth_key.length;
1997 session->digest_length = xform->auth.digest_length;
1998 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2001 switch (xform->auth.algo) {
2002 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2003 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2004 authdata.algmode = OP_ALG_AAI_HMAC;
2005 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2006 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2007 1, 0, SHR_NEVER, &authdata,
2009 session->digest_length);
2011 case RTE_CRYPTO_AUTH_MD5_HMAC:
2012 authdata.algtype = OP_ALG_ALGSEL_MD5;
2013 authdata.algmode = OP_ALG_AAI_HMAC;
2014 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2015 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2016 1, 0, SHR_NEVER, &authdata,
2018 session->digest_length);
2020 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2021 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2022 authdata.algmode = OP_ALG_AAI_HMAC;
2023 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2024 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2025 1, 0, SHR_NEVER, &authdata,
2027 session->digest_length);
2029 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2030 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2031 authdata.algmode = OP_ALG_AAI_HMAC;
2032 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2033 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2034 1, 0, SHR_NEVER, &authdata,
2036 session->digest_length);
2038 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2039 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2040 authdata.algmode = OP_ALG_AAI_HMAC;
2041 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2042 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2043 1, 0, SHR_NEVER, &authdata,
2045 session->digest_length);
2047 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2048 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2049 authdata.algmode = OP_ALG_AAI_HMAC;
2050 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2051 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2052 1, 0, SHR_NEVER, &authdata,
2054 session->digest_length);
2056 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2057 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2058 authdata.algmode = OP_ALG_AAI_F9;
2059 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2060 session->iv.offset = xform->auth.iv.offset;
2061 session->iv.length = xform->auth.iv.length;
2062 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2065 session->digest_length);
2067 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2068 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2069 authdata.algmode = OP_ALG_AAI_F9;
2070 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2071 session->iv.offset = xform->auth.iv.offset;
2072 session->iv.length = xform->auth.iv.length;
2073 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2076 session->digest_length);
2078 case RTE_CRYPTO_AUTH_SHA1:
2079 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2080 authdata.algmode = OP_ALG_AAI_HASH;
2081 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2082 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2083 1, 0, SHR_NEVER, &authdata,
2085 session->digest_length);
2087 case RTE_CRYPTO_AUTH_MD5:
2088 authdata.algtype = OP_ALG_ALGSEL_MD5;
2089 authdata.algmode = OP_ALG_AAI_HASH;
2090 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2091 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2092 1, 0, SHR_NEVER, &authdata,
2094 session->digest_length);
2096 case RTE_CRYPTO_AUTH_SHA256:
2097 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2098 authdata.algmode = OP_ALG_AAI_HASH;
2099 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2100 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2101 1, 0, SHR_NEVER, &authdata,
2103 session->digest_length);
2105 case RTE_CRYPTO_AUTH_SHA384:
2106 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2107 authdata.algmode = OP_ALG_AAI_HASH;
2108 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2109 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2110 1, 0, SHR_NEVER, &authdata,
2112 session->digest_length);
2114 case RTE_CRYPTO_AUTH_SHA512:
2115 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2116 authdata.algmode = OP_ALG_AAI_HASH;
2117 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2118 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2119 1, 0, SHR_NEVER, &authdata,
2121 session->digest_length);
2123 case RTE_CRYPTO_AUTH_SHA224:
2124 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2125 authdata.algmode = OP_ALG_AAI_HASH;
2126 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2127 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2128 1, 0, SHR_NEVER, &authdata,
2130 session->digest_length);
2132 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2133 authdata.algtype = OP_ALG_ALGSEL_AES;
2134 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2135 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2136 bufsize = cnstr_shdsc_aes_mac(
2137 priv->flc_desc[DESC_INITFINAL].desc,
2138 1, 0, SHR_NEVER, &authdata,
2140 session->digest_length);
2142 case RTE_CRYPTO_AUTH_AES_CMAC:
2143 authdata.algtype = OP_ALG_ALGSEL_AES;
2144 authdata.algmode = OP_ALG_AAI_CMAC;
2145 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2146 bufsize = cnstr_shdsc_aes_mac(
2147 priv->flc_desc[DESC_INITFINAL].desc,
2148 1, 0, SHR_NEVER, &authdata,
2150 session->digest_length);
2152 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2153 case RTE_CRYPTO_AUTH_AES_GMAC:
2154 case RTE_CRYPTO_AUTH_KASUMI_F9:
2155 case RTE_CRYPTO_AUTH_NULL:
2156 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2161 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2168 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2173 flc->word1_sdl = (uint8_t)bufsize;
2174 session->ctxt = priv;
2175 #ifdef CAAM_DESC_DEBUG
2177 for (i = 0; i < bufsize; i++)
2178 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2179 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2185 rte_free(session->auth_key.data);
2191 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2192 struct rte_crypto_sym_xform *xform,
2193 dpaa2_sec_session *session)
2195 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2196 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2197 struct alginfo aeaddata;
2199 struct ctxt_priv *priv;
2200 struct sec_flow_context *flc;
2201 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2204 PMD_INIT_FUNC_TRACE();
2206 /* Set IV parameters */
2207 session->iv.offset = aead_xform->iv.offset;
2208 session->iv.length = aead_xform->iv.length;
2209 session->ctxt_type = DPAA2_SEC_AEAD;
2211 /* For SEC AEAD only one descriptor is required */
2212 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2213 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2214 RTE_CACHE_LINE_SIZE);
2216 DPAA2_SEC_ERR("No Memory for priv CTXT");
2220 priv->fle_pool = dev_priv->fle_pool;
2221 flc = &priv->flc_desc[0].flc;
2223 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2224 RTE_CACHE_LINE_SIZE);
2225 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2226 DPAA2_SEC_ERR("No Memory for aead key");
2230 memcpy(session->aead_key.data, aead_xform->key.data,
2231 aead_xform->key.length);
2233 session->digest_length = aead_xform->digest_length;
2234 session->aead_key.length = aead_xform->key.length;
2235 ctxt->auth_only_len = aead_xform->aad_length;
2237 aeaddata.key = (size_t)session->aead_key.data;
2238 aeaddata.keylen = session->aead_key.length;
2239 aeaddata.key_enc_flags = 0;
2240 aeaddata.key_type = RTA_DATA_IMM;
2242 switch (aead_xform->algo) {
2243 case RTE_CRYPTO_AEAD_AES_GCM:
2244 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2245 aeaddata.algmode = OP_ALG_AAI_GCM;
2246 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2248 case RTE_CRYPTO_AEAD_AES_CCM:
2249 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2254 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2259 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2262 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2263 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2265 (unsigned int *)priv->flc_desc[0].desc,
2266 &priv->flc_desc[0].desc[1], 1);
2269 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2273 if (priv->flc_desc[0].desc[1] & 1) {
2274 aeaddata.key_type = RTA_DATA_IMM;
2276 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2277 aeaddata.key_type = RTA_DATA_PTR;
2279 priv->flc_desc[0].desc[0] = 0;
2280 priv->flc_desc[0].desc[1] = 0;
2282 if (session->dir == DIR_ENC)
2283 bufsize = cnstr_shdsc_gcm_encap(
2284 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2285 &aeaddata, session->iv.length,
2286 session->digest_length);
2288 bufsize = cnstr_shdsc_gcm_decap(
2289 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2290 &aeaddata, session->iv.length,
2291 session->digest_length);
2293 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2298 flc->word1_sdl = (uint8_t)bufsize;
2299 session->ctxt = priv;
2300 #ifdef CAAM_DESC_DEBUG
2302 for (i = 0; i < bufsize; i++)
2303 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2304 i, priv->flc_desc[0].desc[i]);
2309 rte_free(session->aead_key.data);
2316 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2317 struct rte_crypto_sym_xform *xform,
2318 dpaa2_sec_session *session)
2320 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2321 struct alginfo authdata, cipherdata;
2323 struct ctxt_priv *priv;
2324 struct sec_flow_context *flc;
2325 struct rte_crypto_cipher_xform *cipher_xform;
2326 struct rte_crypto_auth_xform *auth_xform;
2329 PMD_INIT_FUNC_TRACE();
2331 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2332 cipher_xform = &xform->cipher;
2333 auth_xform = &xform->next->auth;
2334 session->ctxt_type =
2335 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2336 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2338 cipher_xform = &xform->next->cipher;
2339 auth_xform = &xform->auth;
2340 session->ctxt_type =
2341 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2342 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2345 /* Set IV parameters */
2346 session->iv.offset = cipher_xform->iv.offset;
2347 session->iv.length = cipher_xform->iv.length;
2349 /* For SEC AEAD only one descriptor is required */
2350 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2351 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2352 RTE_CACHE_LINE_SIZE);
2354 DPAA2_SEC_ERR("No Memory for priv CTXT");
2358 priv->fle_pool = dev_priv->fle_pool;
2359 flc = &priv->flc_desc[0].flc;
2361 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2362 RTE_CACHE_LINE_SIZE);
2363 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2364 DPAA2_SEC_ERR("No Memory for cipher key");
2368 session->cipher_key.length = cipher_xform->key.length;
2369 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2370 RTE_CACHE_LINE_SIZE);
2371 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2372 DPAA2_SEC_ERR("No Memory for auth key");
2373 rte_free(session->cipher_key.data);
2377 session->auth_key.length = auth_xform->key.length;
2378 memcpy(session->cipher_key.data, cipher_xform->key.data,
2379 cipher_xform->key.length);
2380 memcpy(session->auth_key.data, auth_xform->key.data,
2381 auth_xform->key.length);
2383 authdata.key = (size_t)session->auth_key.data;
2384 authdata.keylen = session->auth_key.length;
2385 authdata.key_enc_flags = 0;
2386 authdata.key_type = RTA_DATA_IMM;
2388 session->digest_length = auth_xform->digest_length;
2390 switch (auth_xform->algo) {
2391 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2392 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2393 authdata.algmode = OP_ALG_AAI_HMAC;
2394 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2396 case RTE_CRYPTO_AUTH_MD5_HMAC:
2397 authdata.algtype = OP_ALG_ALGSEL_MD5;
2398 authdata.algmode = OP_ALG_AAI_HMAC;
2399 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2401 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2402 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2403 authdata.algmode = OP_ALG_AAI_HMAC;
2404 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2406 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2407 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2408 authdata.algmode = OP_ALG_AAI_HMAC;
2409 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2411 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2412 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2413 authdata.algmode = OP_ALG_AAI_HMAC;
2414 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2416 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2417 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2418 authdata.algmode = OP_ALG_AAI_HMAC;
2419 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2421 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2422 authdata.algtype = OP_ALG_ALGSEL_AES;
2423 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2424 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2426 case RTE_CRYPTO_AUTH_AES_CMAC:
2427 authdata.algtype = OP_ALG_ALGSEL_AES;
2428 authdata.algmode = OP_ALG_AAI_CMAC;
2429 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2431 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2432 case RTE_CRYPTO_AUTH_AES_GMAC:
2433 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2434 case RTE_CRYPTO_AUTH_NULL:
2435 case RTE_CRYPTO_AUTH_SHA1:
2436 case RTE_CRYPTO_AUTH_SHA256:
2437 case RTE_CRYPTO_AUTH_SHA512:
2438 case RTE_CRYPTO_AUTH_SHA224:
2439 case RTE_CRYPTO_AUTH_SHA384:
2440 case RTE_CRYPTO_AUTH_MD5:
2441 case RTE_CRYPTO_AUTH_KASUMI_F9:
2442 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2443 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2448 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2453 cipherdata.key = (size_t)session->cipher_key.data;
2454 cipherdata.keylen = session->cipher_key.length;
2455 cipherdata.key_enc_flags = 0;
2456 cipherdata.key_type = RTA_DATA_IMM;
2458 switch (cipher_xform->algo) {
2459 case RTE_CRYPTO_CIPHER_AES_CBC:
2460 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2461 cipherdata.algmode = OP_ALG_AAI_CBC;
2462 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2464 case RTE_CRYPTO_CIPHER_3DES_CBC:
2465 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2466 cipherdata.algmode = OP_ALG_AAI_CBC;
2467 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2469 case RTE_CRYPTO_CIPHER_DES_CBC:
2470 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2471 cipherdata.algmode = OP_ALG_AAI_CBC;
2472 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2474 case RTE_CRYPTO_CIPHER_AES_CTR:
2475 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2476 cipherdata.algmode = OP_ALG_AAI_CTR;
2477 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2479 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2480 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2481 case RTE_CRYPTO_CIPHER_NULL:
2482 case RTE_CRYPTO_CIPHER_3DES_ECB:
2483 case RTE_CRYPTO_CIPHER_3DES_CTR:
2484 case RTE_CRYPTO_CIPHER_AES_ECB:
2485 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2486 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2487 cipher_xform->algo);
2491 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2492 cipher_xform->algo);
2496 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2499 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2500 priv->flc_desc[0].desc[1] = authdata.keylen;
2501 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2503 (unsigned int *)priv->flc_desc[0].desc,
2504 &priv->flc_desc[0].desc[2], 2);
2507 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2511 if (priv->flc_desc[0].desc[2] & 1) {
2512 cipherdata.key_type = RTA_DATA_IMM;
2514 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2515 cipherdata.key_type = RTA_DATA_PTR;
2517 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2518 authdata.key_type = RTA_DATA_IMM;
2520 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2521 authdata.key_type = RTA_DATA_PTR;
2523 priv->flc_desc[0].desc[0] = 0;
2524 priv->flc_desc[0].desc[1] = 0;
2525 priv->flc_desc[0].desc[2] = 0;
2527 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2528 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2530 &cipherdata, &authdata,
2532 session->digest_length,
2535 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2540 DPAA2_SEC_ERR("Hash before cipher not supported");
2545 flc->word1_sdl = (uint8_t)bufsize;
2546 session->ctxt = priv;
2547 #ifdef CAAM_DESC_DEBUG
2549 for (i = 0; i < bufsize; i++)
2550 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2551 i, priv->flc_desc[0].desc[i]);
2557 rte_free(session->cipher_key.data);
2558 rte_free(session->auth_key.data);
2564 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2565 struct rte_crypto_sym_xform *xform, void *sess)
2567 dpaa2_sec_session *session = sess;
2570 PMD_INIT_FUNC_TRACE();
2572 if (unlikely(sess == NULL)) {
2573 DPAA2_SEC_ERR("Invalid session struct");
2577 memset(session, 0, sizeof(dpaa2_sec_session));
2578 /* Default IV length = 0 */
2579 session->iv.length = 0;
2582 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2583 ret = dpaa2_sec_cipher_init(dev, xform, session);
2585 /* Authentication Only */
2586 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2587 xform->next == NULL) {
2588 ret = dpaa2_sec_auth_init(dev, xform, session);
2590 /* Cipher then Authenticate */
2591 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2592 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2593 session->ext_params.aead_ctxt.auth_cipher_text = true;
2594 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2595 ret = dpaa2_sec_auth_init(dev, xform, session);
2596 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2597 ret = dpaa2_sec_cipher_init(dev, xform, session);
2599 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2600 /* Authenticate then Cipher */
2601 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2602 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2603 session->ext_params.aead_ctxt.auth_cipher_text = false;
2604 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2605 ret = dpaa2_sec_cipher_init(dev, xform, session);
2606 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2607 ret = dpaa2_sec_auth_init(dev, xform, session);
2609 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2610 /* AEAD operation for AES-GCM kind of Algorithms */
2611 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2612 xform->next == NULL) {
2613 ret = dpaa2_sec_aead_init(dev, xform, session);
2616 DPAA2_SEC_ERR("Invalid crypto type");
2623 #ifdef RTE_LIB_SECURITY
2625 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2626 dpaa2_sec_session *session,
2627 struct alginfo *aeaddata)
2629 PMD_INIT_FUNC_TRACE();
2631 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2632 RTE_CACHE_LINE_SIZE);
2633 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2634 DPAA2_SEC_ERR("No Memory for aead key");
2637 memcpy(session->aead_key.data, aead_xform->key.data,
2638 aead_xform->key.length);
2640 session->digest_length = aead_xform->digest_length;
2641 session->aead_key.length = aead_xform->key.length;
2643 aeaddata->key = (size_t)session->aead_key.data;
2644 aeaddata->keylen = session->aead_key.length;
2645 aeaddata->key_enc_flags = 0;
2646 aeaddata->key_type = RTA_DATA_IMM;
2648 switch (aead_xform->algo) {
2649 case RTE_CRYPTO_AEAD_AES_GCM:
2650 switch (session->digest_length) {
2652 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2655 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2658 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2661 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2662 session->digest_length);
2665 aeaddata->algmode = OP_ALG_AAI_GCM;
2666 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2668 case RTE_CRYPTO_AEAD_AES_CCM:
2669 switch (session->digest_length) {
2671 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2674 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2677 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2680 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2681 session->digest_length);
2684 aeaddata->algmode = OP_ALG_AAI_CCM;
2685 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2688 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2692 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2699 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2700 struct rte_crypto_auth_xform *auth_xform,
2701 dpaa2_sec_session *session,
2702 struct alginfo *cipherdata,
2703 struct alginfo *authdata)
2706 session->cipher_key.data = rte_zmalloc(NULL,
2707 cipher_xform->key.length,
2708 RTE_CACHE_LINE_SIZE);
2709 if (session->cipher_key.data == NULL &&
2710 cipher_xform->key.length > 0) {
2711 DPAA2_SEC_ERR("No Memory for cipher key");
2715 session->cipher_key.length = cipher_xform->key.length;
2716 memcpy(session->cipher_key.data, cipher_xform->key.data,
2717 cipher_xform->key.length);
2718 session->cipher_alg = cipher_xform->algo;
2720 session->cipher_key.data = NULL;
2721 session->cipher_key.length = 0;
2722 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2726 session->auth_key.data = rte_zmalloc(NULL,
2727 auth_xform->key.length,
2728 RTE_CACHE_LINE_SIZE);
2729 if (session->auth_key.data == NULL &&
2730 auth_xform->key.length > 0) {
2731 DPAA2_SEC_ERR("No Memory for auth key");
2734 session->auth_key.length = auth_xform->key.length;
2735 memcpy(session->auth_key.data, auth_xform->key.data,
2736 auth_xform->key.length);
2737 session->auth_alg = auth_xform->algo;
2738 session->digest_length = auth_xform->digest_length;
2740 session->auth_key.data = NULL;
2741 session->auth_key.length = 0;
2742 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2745 authdata->key = (size_t)session->auth_key.data;
2746 authdata->keylen = session->auth_key.length;
2747 authdata->key_enc_flags = 0;
2748 authdata->key_type = RTA_DATA_IMM;
2749 switch (session->auth_alg) {
2750 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2751 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2752 authdata->algmode = OP_ALG_AAI_HMAC;
2754 case RTE_CRYPTO_AUTH_MD5_HMAC:
2755 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2756 authdata->algmode = OP_ALG_AAI_HMAC;
2758 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2759 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2760 authdata->algmode = OP_ALG_AAI_HMAC;
2761 if (session->digest_length != 16)
2763 "+++Using sha256-hmac truncated len is non-standard,"
2764 "it will not work with lookaside proto");
2766 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2767 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2768 authdata->algmode = OP_ALG_AAI_HMAC;
2770 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2771 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2772 authdata->algmode = OP_ALG_AAI_HMAC;
2774 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2775 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
2776 authdata->algmode = OP_ALG_AAI_XCBC_MAC;
2778 case RTE_CRYPTO_AUTH_AES_CMAC:
2779 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2780 authdata->algmode = OP_ALG_AAI_CMAC;
2782 case RTE_CRYPTO_AUTH_NULL:
2783 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2785 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2786 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2787 case RTE_CRYPTO_AUTH_SHA1:
2788 case RTE_CRYPTO_AUTH_SHA256:
2789 case RTE_CRYPTO_AUTH_SHA512:
2790 case RTE_CRYPTO_AUTH_SHA224:
2791 case RTE_CRYPTO_AUTH_SHA384:
2792 case RTE_CRYPTO_AUTH_MD5:
2793 case RTE_CRYPTO_AUTH_AES_GMAC:
2794 case RTE_CRYPTO_AUTH_KASUMI_F9:
2795 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2796 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2797 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2801 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2805 cipherdata->key = (size_t)session->cipher_key.data;
2806 cipherdata->keylen = session->cipher_key.length;
2807 cipherdata->key_enc_flags = 0;
2808 cipherdata->key_type = RTA_DATA_IMM;
2810 switch (session->cipher_alg) {
2811 case RTE_CRYPTO_CIPHER_AES_CBC:
2812 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2813 cipherdata->algmode = OP_ALG_AAI_CBC;
2815 case RTE_CRYPTO_CIPHER_3DES_CBC:
2816 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2817 cipherdata->algmode = OP_ALG_AAI_CBC;
2819 case RTE_CRYPTO_CIPHER_DES_CBC:
2820 cipherdata->algtype = OP_PCL_IPSEC_DES;
2821 cipherdata->algmode = OP_ALG_AAI_CBC;
2823 case RTE_CRYPTO_CIPHER_AES_CTR:
2824 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2825 cipherdata->algmode = OP_ALG_AAI_CTR;
2827 case RTE_CRYPTO_CIPHER_NULL:
2828 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2830 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2831 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2832 case RTE_CRYPTO_CIPHER_3DES_ECB:
2833 case RTE_CRYPTO_CIPHER_3DES_CTR:
2834 case RTE_CRYPTO_CIPHER_AES_ECB:
2835 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2836 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2837 session->cipher_alg);
2840 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2841 session->cipher_alg);
2849 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2850 struct rte_security_session_conf *conf,
2853 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2854 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2855 struct rte_crypto_auth_xform *auth_xform = NULL;
2856 struct rte_crypto_aead_xform *aead_xform = NULL;
2857 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2858 struct ctxt_priv *priv;
2859 struct alginfo authdata, cipherdata;
2861 struct sec_flow_context *flc;
2862 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2865 PMD_INIT_FUNC_TRACE();
2867 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2868 sizeof(struct ctxt_priv) +
2869 sizeof(struct sec_flc_desc),
2870 RTE_CACHE_LINE_SIZE);
2873 DPAA2_SEC_ERR("No memory for priv CTXT");
2877 priv->fle_pool = dev_priv->fle_pool;
2878 flc = &priv->flc_desc[0].flc;
2880 memset(session, 0, sizeof(dpaa2_sec_session));
2882 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2883 cipher_xform = &conf->crypto_xform->cipher;
2884 if (conf->crypto_xform->next)
2885 auth_xform = &conf->crypto_xform->next->auth;
2886 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2887 session, &cipherdata, &authdata);
2888 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2889 auth_xform = &conf->crypto_xform->auth;
2890 if (conf->crypto_xform->next)
2891 cipher_xform = &conf->crypto_xform->next->cipher;
2892 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2893 session, &cipherdata, &authdata);
2894 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2895 aead_xform = &conf->crypto_xform->aead;
2896 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2897 session, &cipherdata);
2898 authdata.keylen = 0;
2899 authdata.algtype = 0;
2901 DPAA2_SEC_ERR("XFORM not specified");
2906 DPAA2_SEC_ERR("Failed to process xform");
2910 session->ctxt_type = DPAA2_SEC_IPSEC;
2911 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2912 uint8_t *hdr = NULL;
2914 struct rte_ipv6_hdr ip6_hdr;
2915 struct ipsec_encap_pdb encap_pdb;
2917 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2918 /* For Sec Proto only one descriptor is required. */
2919 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2921 /* copy algo specific data to PDB */
2922 switch (cipherdata.algtype) {
2923 case OP_PCL_IPSEC_AES_CTR:
2924 encap_pdb.ctr.ctr_initial = 0x00000001;
2925 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2927 case OP_PCL_IPSEC_AES_GCM8:
2928 case OP_PCL_IPSEC_AES_GCM12:
2929 case OP_PCL_IPSEC_AES_GCM16:
2930 memcpy(encap_pdb.gcm.salt,
2931 (uint8_t *)&(ipsec_xform->salt), 4);
2935 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2936 PDBOPTS_ESP_OIHI_PDB_INL |
2938 PDBHMO_ESP_ENCAP_DTTL |
2940 if (ipsec_xform->options.esn)
2941 encap_pdb.options |= PDBOPTS_ESP_ESN;
2942 encap_pdb.spi = ipsec_xform->spi;
2943 session->dir = DIR_ENC;
2944 if (ipsec_xform->tunnel.type ==
2945 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2946 encap_pdb.ip_hdr_len = sizeof(struct ip);
2947 ip4_hdr.ip_v = IPVERSION;
2949 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2950 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2953 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2954 ip4_hdr.ip_p = IPPROTO_ESP;
2956 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2957 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2958 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2959 &ip4_hdr, sizeof(struct ip));
2960 hdr = (uint8_t *)&ip4_hdr;
2961 } else if (ipsec_xform->tunnel.type ==
2962 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2963 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2964 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2965 ((ipsec_xform->tunnel.ipv6.dscp <<
2966 RTE_IPV6_HDR_TC_SHIFT) &
2967 RTE_IPV6_HDR_TC_MASK) |
2968 ((ipsec_xform->tunnel.ipv6.flabel <<
2969 RTE_IPV6_HDR_FL_SHIFT) &
2970 RTE_IPV6_HDR_FL_MASK));
2971 /* Payload length will be updated by HW */
2972 ip6_hdr.payload_len = 0;
2973 ip6_hdr.hop_limits =
2974 ipsec_xform->tunnel.ipv6.hlimit;
2975 ip6_hdr.proto = (ipsec_xform->proto ==
2976 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2977 IPPROTO_ESP : IPPROTO_AH;
2978 memcpy(&ip6_hdr.src_addr,
2979 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2980 memcpy(&ip6_hdr.dst_addr,
2981 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2982 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2983 hdr = (uint8_t *)&ip6_hdr;
2986 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2987 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2988 SHR_WAIT : SHR_SERIAL, &encap_pdb,
2989 hdr, &cipherdata, &authdata);
2990 } else if (ipsec_xform->direction ==
2991 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2992 struct ipsec_decap_pdb decap_pdb;
2994 flc->dhr = SEC_FLC_DHR_INBOUND;
2995 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2996 /* copy algo specific data to PDB */
2997 switch (cipherdata.algtype) {
2998 case OP_PCL_IPSEC_AES_CTR:
2999 decap_pdb.ctr.ctr_initial = 0x00000001;
3000 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3002 case OP_PCL_IPSEC_AES_GCM8:
3003 case OP_PCL_IPSEC_AES_GCM12:
3004 case OP_PCL_IPSEC_AES_GCM16:
3005 memcpy(decap_pdb.gcm.salt,
3006 (uint8_t *)&(ipsec_xform->salt), 4);
3010 decap_pdb.options = (ipsec_xform->tunnel.type ==
3011 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3012 sizeof(struct ip) << 16 :
3013 sizeof(struct rte_ipv6_hdr) << 16;
3014 if (ipsec_xform->options.esn)
3015 decap_pdb.options |= PDBOPTS_ESP_ESN;
3017 if (ipsec_xform->replay_win_sz) {
3019 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3021 if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3022 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3032 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3035 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3038 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3041 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3044 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3048 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3051 session->dir = DIR_DEC;
3052 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3053 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3054 SHR_WAIT : SHR_SERIAL,
3055 &decap_pdb, &cipherdata, &authdata);
3060 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3064 flc->word1_sdl = (uint8_t)bufsize;
3066 /* Enable the stashing control bit */
3067 DPAA2_SET_FLC_RSC(flc);
3068 flc->word2_rflc_31_0 = lower_32_bits(
3069 (size_t)&(((struct dpaa2_sec_qp *)
3070 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3071 flc->word3_rflc_63_32 = upper_32_bits(
3072 (size_t)&(((struct dpaa2_sec_qp *)
3073 dev->data->queue_pairs[0])->rx_vq));
3075 /* Set EWS bit i.e. enable write-safe */
3076 DPAA2_SET_FLC_EWS(flc);
3077 /* Set BS = 1 i.e reuse input buffers as output buffers */
3078 DPAA2_SET_FLC_REUSE_BS(flc);
3079 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3080 DPAA2_SET_FLC_REUSE_FF(flc);
3082 session->ctxt = priv;
3086 rte_free(session->auth_key.data);
3087 rte_free(session->cipher_key.data);
3093 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3094 struct rte_security_session_conf *conf,
3097 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3098 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3099 struct rte_crypto_auth_xform *auth_xform = NULL;
3100 struct rte_crypto_cipher_xform *cipher_xform = NULL;
3101 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3102 struct ctxt_priv *priv;
3103 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3104 struct alginfo authdata, cipherdata;
3105 struct alginfo *p_authdata = NULL;
3107 struct sec_flow_context *flc;
3108 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3114 PMD_INIT_FUNC_TRACE();
3116 memset(session, 0, sizeof(dpaa2_sec_session));
3118 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3119 sizeof(struct ctxt_priv) +
3120 sizeof(struct sec_flc_desc),
3121 RTE_CACHE_LINE_SIZE);
3124 DPAA2_SEC_ERR("No memory for priv CTXT");
3128 priv->fle_pool = dev_priv->fle_pool;
3129 flc = &priv->flc_desc[0].flc;
3131 /* find xfrm types */
3132 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3133 cipher_xform = &xform->cipher;
3134 if (xform->next != NULL) {
3135 session->ext_params.aead_ctxt.auth_cipher_text = true;
3136 auth_xform = &xform->next->auth;
3138 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3139 auth_xform = &xform->auth;
3140 if (xform->next != NULL) {
3141 session->ext_params.aead_ctxt.auth_cipher_text = false;
3142 cipher_xform = &xform->next->cipher;
3145 DPAA2_SEC_ERR("Invalid crypto type");
3149 session->ctxt_type = DPAA2_SEC_PDCP;
3151 session->cipher_key.data = rte_zmalloc(NULL,
3152 cipher_xform->key.length,
3153 RTE_CACHE_LINE_SIZE);
3154 if (session->cipher_key.data == NULL &&
3155 cipher_xform->key.length > 0) {
3156 DPAA2_SEC_ERR("No Memory for cipher key");
3160 session->cipher_key.length = cipher_xform->key.length;
3161 memcpy(session->cipher_key.data, cipher_xform->key.data,
3162 cipher_xform->key.length);
3164 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3166 session->cipher_alg = cipher_xform->algo;
3168 session->cipher_key.data = NULL;
3169 session->cipher_key.length = 0;
3170 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3171 session->dir = DIR_ENC;
3174 session->pdcp.domain = pdcp_xform->domain;
3175 session->pdcp.bearer = pdcp_xform->bearer;
3176 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3177 session->pdcp.sn_size = pdcp_xform->sn_size;
3178 session->pdcp.hfn = pdcp_xform->hfn;
3179 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3180 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3181 /* hfv ovd offset location is stored in iv.offset value*/
3183 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3185 cipherdata.key = (size_t)session->cipher_key.data;
3186 cipherdata.keylen = session->cipher_key.length;
3187 cipherdata.key_enc_flags = 0;
3188 cipherdata.key_type = RTA_DATA_IMM;
3190 switch (session->cipher_alg) {
3191 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3192 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3194 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3195 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3197 case RTE_CRYPTO_CIPHER_AES_CTR:
3198 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3200 case RTE_CRYPTO_CIPHER_NULL:
3201 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3204 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3205 session->cipher_alg);
3210 session->auth_key.data = rte_zmalloc(NULL,
3211 auth_xform->key.length,
3212 RTE_CACHE_LINE_SIZE);
3213 if (!session->auth_key.data &&
3214 auth_xform->key.length > 0) {
3215 DPAA2_SEC_ERR("No Memory for auth key");
3216 rte_free(session->cipher_key.data);
3220 session->auth_key.length = auth_xform->key.length;
3221 memcpy(session->auth_key.data, auth_xform->key.data,
3222 auth_xform->key.length);
3223 session->auth_alg = auth_xform->algo;
3225 session->auth_key.data = NULL;
3226 session->auth_key.length = 0;
3227 session->auth_alg = 0;
3229 authdata.key = (size_t)session->auth_key.data;
3230 authdata.keylen = session->auth_key.length;
3231 authdata.key_enc_flags = 0;
3232 authdata.key_type = RTA_DATA_IMM;
3234 if (session->auth_alg) {
3235 switch (session->auth_alg) {
3236 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3237 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3239 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3240 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3242 case RTE_CRYPTO_AUTH_AES_CMAC:
3243 authdata.algtype = PDCP_AUTH_TYPE_AES;
3245 case RTE_CRYPTO_AUTH_NULL:
3246 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3249 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3254 p_authdata = &authdata;
3255 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3256 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3260 if (pdcp_xform->sdap_enabled) {
3261 int nb_keys_to_inline =
3262 rta_inline_pdcp_sdap_query(authdata.algtype,
3264 session->pdcp.sn_size,
3265 session->pdcp.hfn_ovd);
3266 if (nb_keys_to_inline >= 1) {
3267 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3268 cipherdata.key_type = RTA_DATA_PTR;
3270 if (nb_keys_to_inline >= 2) {
3271 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3272 authdata.key_type = RTA_DATA_PTR;
3275 if (rta_inline_pdcp_query(authdata.algtype,
3277 session->pdcp.sn_size,
3278 session->pdcp.hfn_ovd)) {
3279 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3280 cipherdata.key_type = RTA_DATA_PTR;
3284 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3285 if (session->dir == DIR_ENC)
3286 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3287 priv->flc_desc[0].desc, 1, swap,
3289 session->pdcp.sn_size,
3291 pdcp_xform->pkt_dir,
3292 pdcp_xform->hfn_threshold,
3293 &cipherdata, &authdata,
3295 else if (session->dir == DIR_DEC)
3296 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3297 priv->flc_desc[0].desc, 1, swap,
3299 session->pdcp.sn_size,
3301 pdcp_xform->pkt_dir,
3302 pdcp_xform->hfn_threshold,
3303 &cipherdata, &authdata,
3306 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3307 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3308 1, swap, &authdata);
3310 if (session->dir == DIR_ENC) {
3311 if (pdcp_xform->sdap_enabled)
3312 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3313 priv->flc_desc[0].desc, 1, swap,
3314 session->pdcp.sn_size,
3317 pdcp_xform->pkt_dir,
3318 pdcp_xform->hfn_threshold,
3319 &cipherdata, p_authdata, 0);
3321 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3322 priv->flc_desc[0].desc, 1, swap,
3323 session->pdcp.sn_size,
3326 pdcp_xform->pkt_dir,
3327 pdcp_xform->hfn_threshold,
3328 &cipherdata, p_authdata, 0);
3329 } else if (session->dir == DIR_DEC) {
3330 if (pdcp_xform->sdap_enabled)
3331 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3332 priv->flc_desc[0].desc, 1, swap,
3333 session->pdcp.sn_size,
3336 pdcp_xform->pkt_dir,
3337 pdcp_xform->hfn_threshold,
3338 &cipherdata, p_authdata, 0);
3340 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3341 priv->flc_desc[0].desc, 1, swap,
3342 session->pdcp.sn_size,
3345 pdcp_xform->pkt_dir,
3346 pdcp_xform->hfn_threshold,
3347 &cipherdata, p_authdata, 0);
3352 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3356 /* Enable the stashing control bit */
3357 DPAA2_SET_FLC_RSC(flc);
3358 flc->word2_rflc_31_0 = lower_32_bits(
3359 (size_t)&(((struct dpaa2_sec_qp *)
3360 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3361 flc->word3_rflc_63_32 = upper_32_bits(
3362 (size_t)&(((struct dpaa2_sec_qp *)
3363 dev->data->queue_pairs[0])->rx_vq));
3365 flc->word1_sdl = (uint8_t)bufsize;
3367 /* TODO - check the perf impact or
3368 * align as per descriptor type
3369 * Set EWS bit i.e. enable write-safe
3370 * DPAA2_SET_FLC_EWS(flc);
3373 /* Set BS = 1 i.e reuse input buffers as output buffers */
3374 DPAA2_SET_FLC_REUSE_BS(flc);
3375 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3376 DPAA2_SET_FLC_REUSE_FF(flc);
3378 session->ctxt = priv;
3382 rte_free(session->auth_key.data);
3383 rte_free(session->cipher_key.data);
3389 dpaa2_sec_security_session_create(void *dev,
3390 struct rte_security_session_conf *conf,
3391 struct rte_security_session *sess,
3392 struct rte_mempool *mempool)
3394 void *sess_private_data;
3395 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3398 if (rte_mempool_get(mempool, &sess_private_data)) {
3399 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3403 switch (conf->protocol) {
3404 case RTE_SECURITY_PROTOCOL_IPSEC:
3405 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3408 case RTE_SECURITY_PROTOCOL_MACSEC:
3410 case RTE_SECURITY_PROTOCOL_PDCP:
3411 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3418 DPAA2_SEC_ERR("Failed to configure session parameters");
3419 /* Return session to mempool */
3420 rte_mempool_put(mempool, sess_private_data);
3424 set_sec_session_private_data(sess, sess_private_data);
3429 /** Clear the memory of session so it doesn't leave key material behind */
3431 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3432 struct rte_security_session *sess)
3434 PMD_INIT_FUNC_TRACE();
3435 void *sess_priv = get_sec_session_private_data(sess);
3437 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3440 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3443 rte_free(s->cipher_key.data);
3444 rte_free(s->auth_key.data);
3445 memset(s, 0, sizeof(dpaa2_sec_session));
3446 set_sec_session_private_data(sess, NULL);
3447 rte_mempool_put(sess_mp, sess_priv);
3453 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3454 struct rte_crypto_sym_xform *xform,
3455 struct rte_cryptodev_sym_session *sess,
3456 struct rte_mempool *mempool)
3458 void *sess_private_data;
3461 if (rte_mempool_get(mempool, &sess_private_data)) {
3462 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3466 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3468 DPAA2_SEC_ERR("Failed to configure session parameters");
3469 /* Return session to mempool */
3470 rte_mempool_put(mempool, sess_private_data);
3474 set_sym_session_private_data(sess, dev->driver_id,
3480 /** Clear the memory of session so it doesn't leave key material behind */
3482 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3483 struct rte_cryptodev_sym_session *sess)
3485 PMD_INIT_FUNC_TRACE();
3486 uint8_t index = dev->driver_id;
3487 void *sess_priv = get_sym_session_private_data(sess, index);
3488 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3492 rte_free(s->cipher_key.data);
3493 rte_free(s->auth_key.data);
3494 memset(s, 0, sizeof(dpaa2_sec_session));
3495 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3496 set_sym_session_private_data(sess, index, NULL);
3497 rte_mempool_put(sess_mp, sess_priv);
3502 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3503 struct rte_cryptodev_config *config __rte_unused)
3505 PMD_INIT_FUNC_TRACE();
3511 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3513 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3514 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3515 struct dpseci_attr attr;
3516 struct dpaa2_queue *dpaa2_q;
3517 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3518 dev->data->queue_pairs;
3519 struct dpseci_rx_queue_attr rx_attr;
3520 struct dpseci_tx_queue_attr tx_attr;
3523 PMD_INIT_FUNC_TRACE();
3525 memset(&attr, 0, sizeof(struct dpseci_attr));
3527 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3529 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3531 goto get_attr_failure;
3533 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3535 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3536 goto get_attr_failure;
3538 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3539 dpaa2_q = &qp[i]->rx_vq;
3540 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3542 dpaa2_q->fqid = rx_attr.fqid;
3543 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3545 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3546 dpaa2_q = &qp[i]->tx_vq;
3547 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3549 dpaa2_q->fqid = tx_attr.fqid;
3550 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3555 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3560 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3562 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3563 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3566 PMD_INIT_FUNC_TRACE();
3568 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3570 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3575 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3577 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3583 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3585 PMD_INIT_FUNC_TRACE();
3591 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3592 struct rte_cryptodev_info *info)
3594 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3596 PMD_INIT_FUNC_TRACE();
3598 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3599 info->feature_flags = dev->feature_flags;
3600 info->capabilities = dpaa2_sec_capabilities;
3601 /* No limit of number of sessions */
3602 info->sym.max_nb_sessions = 0;
3603 info->driver_id = cryptodev_driver_id;
3608 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3609 struct rte_cryptodev_stats *stats)
3611 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3612 struct fsl_mc_io dpseci;
3613 struct dpseci_sec_counters counters = {0};
3614 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3615 dev->data->queue_pairs;
3618 PMD_INIT_FUNC_TRACE();
3619 if (stats == NULL) {
3620 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3623 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3624 if (qp == NULL || qp[i] == NULL) {
3625 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3629 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3630 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3631 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3632 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3635 /* In case as secondary process access stats, MCP portal in priv-hw
3636 * may have primary process address. Need the secondary process
3637 * based MCP portal address for this object.
3639 dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3640 ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3643 DPAA2_SEC_ERR("SEC counters failed");
3645 DPAA2_SEC_INFO("dpseci hardware stats:"
3646 "\n\tNum of Requests Dequeued = %" PRIu64
3647 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3648 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3649 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3650 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3651 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3652 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3653 counters.dequeued_requests,
3654 counters.ob_enc_requests,
3655 counters.ib_dec_requests,
3656 counters.ob_enc_bytes,
3657 counters.ob_prot_bytes,
3658 counters.ib_dec_bytes,
3659 counters.ib_valid_bytes);
3664 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3667 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3668 (dev->data->queue_pairs);
3670 PMD_INIT_FUNC_TRACE();
3672 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3673 if (qp[i] == NULL) {
3674 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3677 qp[i]->tx_vq.rx_pkts = 0;
3678 qp[i]->tx_vq.tx_pkts = 0;
3679 qp[i]->tx_vq.err_pkts = 0;
3680 qp[i]->rx_vq.rx_pkts = 0;
3681 qp[i]->rx_vq.tx_pkts = 0;
3682 qp[i]->rx_vq.err_pkts = 0;
3686 static void __rte_hot
3687 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3688 const struct qbman_fd *fd,
3689 const struct qbman_result *dq,
3690 struct dpaa2_queue *rxq,
3691 struct rte_event *ev)
3693 /* Prefetching mbuf */
3694 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3695 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3697 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3698 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3700 ev->flow_id = rxq->ev.flow_id;
3701 ev->sub_event_type = rxq->ev.sub_event_type;
3702 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3703 ev->op = RTE_EVENT_OP_NEW;
3704 ev->sched_type = rxq->ev.sched_type;
3705 ev->queue_id = rxq->ev.queue_id;
3706 ev->priority = rxq->ev.priority;
3707 ev->event_ptr = sec_fd_to_mbuf(fd);
3709 qbman_swp_dqrr_consume(swp, dq);
3712 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3713 const struct qbman_fd *fd,
3714 const struct qbman_result *dq,
3715 struct dpaa2_queue *rxq,
3716 struct rte_event *ev)
3719 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3720 /* Prefetching mbuf */
3721 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3722 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3724 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3725 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3727 ev->flow_id = rxq->ev.flow_id;
3728 ev->sub_event_type = rxq->ev.sub_event_type;
3729 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3730 ev->op = RTE_EVENT_OP_NEW;
3731 ev->sched_type = rxq->ev.sched_type;
3732 ev->queue_id = rxq->ev.queue_id;
3733 ev->priority = rxq->ev.priority;
3735 ev->event_ptr = sec_fd_to_mbuf(fd);
3736 dqrr_index = qbman_get_dqrr_idx(dq);
3737 *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
3738 DPAA2_PER_LCORE_DQRR_SIZE++;
3739 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3740 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3744 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3746 struct dpaa2_dpcon_dev *dpcon,
3747 const struct rte_event *event)
3749 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3750 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3751 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3752 struct dpseci_rx_queue_cfg cfg;
3756 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3757 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3758 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3759 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3763 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3764 (dpcon->num_priorities - 1);
3766 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3767 cfg.options = DPSECI_QUEUE_OPT_DEST;
3768 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3769 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3770 cfg.dest_cfg.priority = priority;
3772 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3773 cfg.user_ctx = (size_t)(qp);
3774 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3775 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3776 cfg.order_preservation_en = 1;
3778 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3781 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3785 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3791 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3794 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3795 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3796 struct dpseci_rx_queue_cfg cfg;
3799 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3800 cfg.options = DPSECI_QUEUE_OPT_DEST;
3801 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3803 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3806 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3811 static struct rte_cryptodev_ops crypto_ops = {
3812 .dev_configure = dpaa2_sec_dev_configure,
3813 .dev_start = dpaa2_sec_dev_start,
3814 .dev_stop = dpaa2_sec_dev_stop,
3815 .dev_close = dpaa2_sec_dev_close,
3816 .dev_infos_get = dpaa2_sec_dev_infos_get,
3817 .stats_get = dpaa2_sec_stats_get,
3818 .stats_reset = dpaa2_sec_stats_reset,
3819 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3820 .queue_pair_release = dpaa2_sec_queue_pair_release,
3821 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3822 .sym_session_configure = dpaa2_sec_sym_session_configure,
3823 .sym_session_clear = dpaa2_sec_sym_session_clear,
3824 /* Raw data-path API related operations */
3825 .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
3826 .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
3829 #ifdef RTE_LIB_SECURITY
3830 static const struct rte_security_capability *
3831 dpaa2_sec_capabilities_get(void *device __rte_unused)
3833 return dpaa2_sec_security_cap;
3836 static const struct rte_security_ops dpaa2_sec_security_ops = {
3837 .session_create = dpaa2_sec_security_session_create,
3838 .session_update = NULL,
3839 .session_stats_get = NULL,
3840 .session_destroy = dpaa2_sec_security_session_destroy,
3841 .set_pkt_metadata = NULL,
3842 .capabilities_get = dpaa2_sec_capabilities_get
3847 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3849 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3850 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3853 PMD_INIT_FUNC_TRACE();
3855 /* Function is reverse of dpaa2_sec_dev_init.
3856 * It does the following:
3857 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3858 * 2. Close the DPSECI device
3859 * 3. Free the allocated resources.
3862 /*Close the device at underlying layer*/
3863 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3865 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3869 /*Free the allocated memory for ethernet private data and dpseci*/
3872 rte_free(dev->security_ctx);
3873 rte_mempool_free(priv->fle_pool);
3875 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3876 dev->data->name, rte_socket_id());
3882 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3884 struct dpaa2_sec_dev_private *internals;
3885 struct rte_device *dev = cryptodev->device;
3886 struct rte_dpaa2_device *dpaa2_dev;
3887 #ifdef RTE_LIB_SECURITY
3888 struct rte_security_ctx *security_instance;
3890 struct fsl_mc_io *dpseci;
3892 struct dpseci_attr attr;
3896 PMD_INIT_FUNC_TRACE();
3897 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3898 hw_id = dpaa2_dev->object_id;
3900 cryptodev->driver_id = cryptodev_driver_id;
3901 cryptodev->dev_ops = &crypto_ops;
3903 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3904 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3905 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3906 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3907 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3908 RTE_CRYPTODEV_FF_SECURITY |
3909 RTE_CRYPTODEV_FF_SYM_RAW_DP |
3910 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3911 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3912 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3913 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3914 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3916 internals = cryptodev->data->dev_private;
3919 * For secondary processes, we don't initialise any further as primary
3920 * has already done this work. Only check we don't need a different
3923 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3924 DPAA2_SEC_DEBUG("Device already init by primary process");
3927 #ifdef RTE_LIB_SECURITY
3928 /* Initialize security_ctx only for primary process*/
3929 security_instance = rte_malloc("rte_security_instances_ops",
3930 sizeof(struct rte_security_ctx), 0);
3931 if (security_instance == NULL)
3933 security_instance->device = (void *)cryptodev;
3934 security_instance->ops = &dpaa2_sec_security_ops;
3935 security_instance->sess_cnt = 0;
3936 cryptodev->security_ctx = security_instance;
3938 /*Open the rte device via MC and save the handle for further use*/
3939 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3940 sizeof(struct fsl_mc_io), 0);
3943 "Error in allocating the memory for dpsec object");
3946 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3948 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3950 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3954 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3957 "Cannot get dpsec device attributed: Error = %x",
3961 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3964 internals->max_nb_queue_pairs = attr.num_tx_queues;
3965 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3966 internals->hw = dpseci;
3967 internals->token = token;
3969 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3970 getpid(), cryptodev->data->dev_id);
3971 internals->fle_pool = rte_mempool_create((const char *)str,
3974 FLE_POOL_CACHE_SIZE, 0,
3975 NULL, NULL, NULL, NULL,
3977 if (!internals->fle_pool) {
3978 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3982 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3986 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3988 /* dpaa2_sec_uninit(crypto_dev_name); */
3993 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3994 struct rte_dpaa2_device *dpaa2_dev)
3996 struct rte_cryptodev *cryptodev;
3997 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4001 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4002 dpaa2_dev->object_id);
4004 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4005 if (cryptodev == NULL)
4008 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4009 cryptodev->data->dev_private = rte_zmalloc_socket(
4010 "cryptodev private structure",
4011 sizeof(struct dpaa2_sec_dev_private),
4012 RTE_CACHE_LINE_SIZE,
4015 if (cryptodev->data->dev_private == NULL)
4016 rte_panic("Cannot allocate memzone for private "
4020 dpaa2_dev->cryptodev = cryptodev;
4021 cryptodev->device = &dpaa2_dev->device;
4023 /* init user callbacks */
4024 TAILQ_INIT(&(cryptodev->link_intr_cbs));
4026 if (dpaa2_svr_family == SVR_LX2160A)
4027 rta_set_sec_era(RTA_SEC_ERA_10);
4029 rta_set_sec_era(RTA_SEC_ERA_8);
4031 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4033 /* Invoke PMD device initialization function */
4034 retval = dpaa2_sec_dev_init(cryptodev);
4038 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4039 rte_free(cryptodev->data->dev_private);
4041 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4047 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4049 struct rte_cryptodev *cryptodev;
4052 cryptodev = dpaa2_dev->cryptodev;
4053 if (cryptodev == NULL)
4056 ret = dpaa2_sec_uninit(cryptodev);
4060 return rte_cryptodev_pmd_destroy(cryptodev);
4063 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4064 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4065 .drv_type = DPAA2_CRYPTO,
4067 .name = "DPAA2 SEC PMD"
4069 .probe = cryptodev_dpaa2_sec_probe,
4070 .remove = cryptodev_dpaa2_sec_remove,
4073 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4075 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4076 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4077 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4078 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);