1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
37 typedef uint64_t dma_addr_t;
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45 * a pointer to the shared descriptor
47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID 0x1957
49 #define FSL_DEVICE_ID 0x410
50 #define FSL_SUBSYSTEM_SEC 1
51 #define FSL_MC_DPSECI_DEVID 3
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS 32000
56 #define FLE_POOL_BUF_SIZE 256
57 #define FLE_POOL_CACHE_SIZE 512
58 #define FLE_SG_MEM_SIZE 2048
59 #define SEC_FLC_DHR_OUTBOUND -114
60 #define SEC_FLC_DHR_INBOUND 0
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
64 static uint8_t cryptodev_driver_id;
66 int dpaa2_logtype_sec;
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 struct rte_crypto_op *op,
71 struct qbman_fd *fd, uint16_t bpid)
73 struct rte_crypto_sym_op *sym_op = op->sym;
74 struct ctxt_priv *priv = sess->ctxt;
75 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 struct sec_flow_context *flc;
77 struct rte_mbuf *mbuf;
78 uint32_t in_len = 0, out_len = 0;
85 /* first FLE entry used to store mbuf and session ctxt */
86 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
89 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
92 memset(fle, 0, FLE_SG_MEM_SIZE);
93 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
94 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96 /* Save the shared descriptor */
97 flc = &priv->flc_desc[0].flc;
103 if (likely(bpid < MAX_BPID)) {
104 DPAA2_SET_FD_BPID(fd, bpid);
105 DPAA2_SET_FLE_BPID(op_fle, bpid);
106 DPAA2_SET_FLE_BPID(ip_fle, bpid);
108 DPAA2_SET_FD_IVP(fd);
109 DPAA2_SET_FLE_IVP(op_fle);
110 DPAA2_SET_FLE_IVP(ip_fle);
113 /* Configure FD as a FRAME LIST */
114 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
115 DPAA2_SET_FD_COMPOUND_FMT(fd);
116 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
118 /* Configure Output FLE with Scatter/Gather Entry */
119 DPAA2_SET_FLE_SG_EXT(op_fle);
120 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
122 /* Configure Output SGE for Encap/Decap */
123 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
124 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
127 sge->length = mbuf->data_len;
128 out_len += sge->length;
131 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
132 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
134 /* using buf_len for last buf - so that extra data can be added */
135 sge->length = mbuf->buf_len - mbuf->data_off;
136 out_len += sge->length;
138 DPAA2_SET_FLE_FIN(sge);
139 op_fle->length = out_len;
142 mbuf = sym_op->m_src;
144 /* Configure Input FLE with Scatter/Gather Entry */
145 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
146 DPAA2_SET_FLE_SG_EXT(ip_fle);
147 DPAA2_SET_FLE_FIN(ip_fle);
149 /* Configure input SGE for Encap/Decap */
150 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
151 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
152 sge->length = mbuf->data_len;
153 in_len += sge->length;
159 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
160 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
161 sge->length = mbuf->data_len;
162 in_len += sge->length;
165 ip_fle->length = in_len;
166 DPAA2_SET_FLE_FIN(sge);
168 /* In case of PDCP, per packet HFN is stored in
169 * mbuf priv after sym_op.
171 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
172 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
173 /*enable HFN override override */
174 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
175 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
176 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
178 DPAA2_SET_FD_LEN(fd, ip_fle->length);
184 build_proto_compound_fd(dpaa2_sec_session *sess,
185 struct rte_crypto_op *op,
186 struct qbman_fd *fd, uint16_t bpid)
188 struct rte_crypto_sym_op *sym_op = op->sym;
189 struct ctxt_priv *priv = sess->ctxt;
190 struct qbman_fle *fle, *ip_fle, *op_fle;
191 struct sec_flow_context *flc;
192 struct rte_mbuf *src_mbuf = sym_op->m_src;
193 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
199 /* Save the shared descriptor */
200 flc = &priv->flc_desc[0].flc;
202 /* we are using the first FLE entry to store Mbuf */
203 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
205 DPAA2_SEC_DP_ERR("Memory alloc failed");
208 memset(fle, 0, FLE_POOL_BUF_SIZE);
209 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
210 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
215 if (likely(bpid < MAX_BPID)) {
216 DPAA2_SET_FD_BPID(fd, bpid);
217 DPAA2_SET_FLE_BPID(op_fle, bpid);
218 DPAA2_SET_FLE_BPID(ip_fle, bpid);
220 DPAA2_SET_FD_IVP(fd);
221 DPAA2_SET_FLE_IVP(op_fle);
222 DPAA2_SET_FLE_IVP(ip_fle);
225 /* Configure FD as a FRAME LIST */
226 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
227 DPAA2_SET_FD_COMPOUND_FMT(fd);
228 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
230 /* Configure Output FLE with dst mbuf data */
231 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
232 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
233 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
235 /* Configure Input FLE with src mbuf data */
236 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
237 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
238 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
240 DPAA2_SET_FD_LEN(fd, ip_fle->length);
241 DPAA2_SET_FLE_FIN(ip_fle);
243 /* In case of PDCP, per packet HFN is stored in
244 * mbuf priv after sym_op.
246 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
247 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
248 /*enable HFN override override */
249 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
250 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
251 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
259 build_proto_fd(dpaa2_sec_session *sess,
260 struct rte_crypto_op *op,
261 struct qbman_fd *fd, uint16_t bpid)
263 struct rte_crypto_sym_op *sym_op = op->sym;
265 return build_proto_compound_fd(sess, op, fd, bpid);
267 struct ctxt_priv *priv = sess->ctxt;
268 struct sec_flow_context *flc;
269 struct rte_mbuf *mbuf = sym_op->m_src;
271 if (likely(bpid < MAX_BPID))
272 DPAA2_SET_FD_BPID(fd, bpid);
274 DPAA2_SET_FD_IVP(fd);
276 /* Save the shared descriptor */
277 flc = &priv->flc_desc[0].flc;
279 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
280 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
281 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
282 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
284 /* save physical address of mbuf */
285 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
286 mbuf->buf_iova = (size_t)op;
292 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
293 struct rte_crypto_op *op,
294 struct qbman_fd *fd, __rte_unused uint16_t bpid)
296 struct rte_crypto_sym_op *sym_op = op->sym;
297 struct ctxt_priv *priv = sess->ctxt;
298 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
299 struct sec_flow_context *flc;
300 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
301 int icv_len = sess->digest_length;
303 struct rte_mbuf *mbuf;
304 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
307 PMD_INIT_FUNC_TRACE();
310 mbuf = sym_op->m_dst;
312 mbuf = sym_op->m_src;
314 /* first FLE entry used to store mbuf and session ctxt */
315 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
316 RTE_CACHE_LINE_SIZE);
317 if (unlikely(!fle)) {
318 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
321 memset(fle, 0, FLE_SG_MEM_SIZE);
322 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
323 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
329 /* Save the shared descriptor */
330 flc = &priv->flc_desc[0].flc;
332 /* Configure FD as a FRAME LIST */
333 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
334 DPAA2_SET_FD_COMPOUND_FMT(fd);
335 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
337 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
338 "iv-len=%d data_off: 0x%x\n",
339 sym_op->aead.data.offset,
340 sym_op->aead.data.length,
343 sym_op->m_src->data_off);
345 /* Configure Output FLE with Scatter/Gather Entry */
346 DPAA2_SET_FLE_SG_EXT(op_fle);
347 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
350 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
352 op_fle->length = (sess->dir == DIR_ENC) ?
353 (sym_op->aead.data.length + icv_len + auth_only_len) :
354 sym_op->aead.data.length + auth_only_len;
356 /* Configure Output SGE for Encap/Decap */
357 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
358 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
359 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
360 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
366 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
367 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
368 sge->length = mbuf->data_len;
371 sge->length -= icv_len;
373 if (sess->dir == DIR_ENC) {
375 DPAA2_SET_FLE_ADDR(sge,
376 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
377 sge->length = icv_len;
379 DPAA2_SET_FLE_FIN(sge);
382 mbuf = sym_op->m_src;
384 /* Configure Input FLE with Scatter/Gather Entry */
385 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
386 DPAA2_SET_FLE_SG_EXT(ip_fle);
387 DPAA2_SET_FLE_FIN(ip_fle);
388 ip_fle->length = (sess->dir == DIR_ENC) ?
389 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
390 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
393 /* Configure Input SGE for Encap/Decap */
394 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
395 sge->length = sess->iv.length;
399 DPAA2_SET_FLE_ADDR(sge,
400 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
401 sge->length = auth_only_len;
405 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
406 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
408 sge->length = mbuf->data_len - sym_op->aead.data.offset;
414 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
415 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
416 sge->length = mbuf->data_len;
420 if (sess->dir == DIR_DEC) {
422 old_icv = (uint8_t *)(sge + 1);
423 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
424 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
425 sge->length = icv_len;
428 DPAA2_SET_FLE_FIN(sge);
430 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
431 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
433 DPAA2_SET_FD_LEN(fd, ip_fle->length);
439 build_authenc_gcm_fd(dpaa2_sec_session *sess,
440 struct rte_crypto_op *op,
441 struct qbman_fd *fd, uint16_t bpid)
443 struct rte_crypto_sym_op *sym_op = op->sym;
444 struct ctxt_priv *priv = sess->ctxt;
445 struct qbman_fle *fle, *sge;
446 struct sec_flow_context *flc;
447 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
448 int icv_len = sess->digest_length, retval;
450 struct rte_mbuf *dst;
451 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
454 PMD_INIT_FUNC_TRACE();
461 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
462 * Currently we donot know which FLE has the mbuf stored.
463 * So while retreiving we can go back 1 FLE from the FD -ADDR
464 * to get the MBUF Addr from the previous FLE.
465 * We can have a better approach to use the inline Mbuf
467 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
469 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
472 memset(fle, 0, FLE_POOL_BUF_SIZE);
473 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
474 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
477 if (likely(bpid < MAX_BPID)) {
478 DPAA2_SET_FD_BPID(fd, bpid);
479 DPAA2_SET_FLE_BPID(fle, bpid);
480 DPAA2_SET_FLE_BPID(fle + 1, bpid);
481 DPAA2_SET_FLE_BPID(sge, bpid);
482 DPAA2_SET_FLE_BPID(sge + 1, bpid);
483 DPAA2_SET_FLE_BPID(sge + 2, bpid);
484 DPAA2_SET_FLE_BPID(sge + 3, bpid);
486 DPAA2_SET_FD_IVP(fd);
487 DPAA2_SET_FLE_IVP(fle);
488 DPAA2_SET_FLE_IVP((fle + 1));
489 DPAA2_SET_FLE_IVP(sge);
490 DPAA2_SET_FLE_IVP((sge + 1));
491 DPAA2_SET_FLE_IVP((sge + 2));
492 DPAA2_SET_FLE_IVP((sge + 3));
495 /* Save the shared descriptor */
496 flc = &priv->flc_desc[0].flc;
497 /* Configure FD as a FRAME LIST */
498 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
499 DPAA2_SET_FD_COMPOUND_FMT(fd);
500 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
502 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
503 "iv-len=%d data_off: 0x%x\n",
504 sym_op->aead.data.offset,
505 sym_op->aead.data.length,
508 sym_op->m_src->data_off);
510 /* Configure Output FLE with Scatter/Gather Entry */
511 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
513 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
514 fle->length = (sess->dir == DIR_ENC) ?
515 (sym_op->aead.data.length + icv_len + auth_only_len) :
516 sym_op->aead.data.length + auth_only_len;
518 DPAA2_SET_FLE_SG_EXT(fle);
520 /* Configure Output SGE for Encap/Decap */
521 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
522 DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
523 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
524 sge->length = sym_op->aead.data.length + auth_only_len;
526 if (sess->dir == DIR_ENC) {
528 DPAA2_SET_FLE_ADDR(sge,
529 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
530 sge->length = sess->digest_length;
531 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
532 sess->iv.length + auth_only_len));
534 DPAA2_SET_FLE_FIN(sge);
539 /* Configure Input FLE with Scatter/Gather Entry */
540 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
541 DPAA2_SET_FLE_SG_EXT(fle);
542 DPAA2_SET_FLE_FIN(fle);
543 fle->length = (sess->dir == DIR_ENC) ?
544 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
545 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
546 sess->digest_length);
548 /* Configure Input SGE for Encap/Decap */
549 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
550 sge->length = sess->iv.length;
553 DPAA2_SET_FLE_ADDR(sge,
554 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
555 sge->length = auth_only_len;
556 DPAA2_SET_FLE_BPID(sge, bpid);
560 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
561 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
562 sym_op->m_src->data_off);
563 sge->length = sym_op->aead.data.length;
564 if (sess->dir == DIR_DEC) {
566 old_icv = (uint8_t *)(sge + 1);
567 memcpy(old_icv, sym_op->aead.digest.data,
568 sess->digest_length);
569 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
570 sge->length = sess->digest_length;
571 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
572 sess->digest_length +
576 DPAA2_SET_FLE_FIN(sge);
579 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
580 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
587 build_authenc_sg_fd(dpaa2_sec_session *sess,
588 struct rte_crypto_op *op,
589 struct qbman_fd *fd, __rte_unused uint16_t bpid)
591 struct rte_crypto_sym_op *sym_op = op->sym;
592 struct ctxt_priv *priv = sess->ctxt;
593 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
594 struct sec_flow_context *flc;
595 uint32_t auth_only_len = sym_op->auth.data.length -
596 sym_op->cipher.data.length;
597 int icv_len = sess->digest_length;
599 struct rte_mbuf *mbuf;
600 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
603 PMD_INIT_FUNC_TRACE();
606 mbuf = sym_op->m_dst;
608 mbuf = sym_op->m_src;
610 /* first FLE entry used to store mbuf and session ctxt */
611 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
612 RTE_CACHE_LINE_SIZE);
613 if (unlikely(!fle)) {
614 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
617 memset(fle, 0, FLE_SG_MEM_SIZE);
618 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
619 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
625 /* Save the shared descriptor */
626 flc = &priv->flc_desc[0].flc;
628 /* Configure FD as a FRAME LIST */
629 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
630 DPAA2_SET_FD_COMPOUND_FMT(fd);
631 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
634 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
635 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
636 sym_op->auth.data.offset,
637 sym_op->auth.data.length,
639 sym_op->cipher.data.offset,
640 sym_op->cipher.data.length,
642 sym_op->m_src->data_off);
644 /* Configure Output FLE with Scatter/Gather Entry */
645 DPAA2_SET_FLE_SG_EXT(op_fle);
646 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
649 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
651 op_fle->length = (sess->dir == DIR_ENC) ?
652 (sym_op->cipher.data.length + icv_len) :
653 sym_op->cipher.data.length;
655 /* Configure Output SGE for Encap/Decap */
656 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
657 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
658 sge->length = mbuf->data_len - sym_op->auth.data.offset;
664 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
665 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
666 sge->length = mbuf->data_len;
669 sge->length -= icv_len;
671 if (sess->dir == DIR_ENC) {
673 DPAA2_SET_FLE_ADDR(sge,
674 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
675 sge->length = icv_len;
677 DPAA2_SET_FLE_FIN(sge);
680 mbuf = sym_op->m_src;
682 /* Configure Input FLE with Scatter/Gather Entry */
683 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
684 DPAA2_SET_FLE_SG_EXT(ip_fle);
685 DPAA2_SET_FLE_FIN(ip_fle);
686 ip_fle->length = (sess->dir == DIR_ENC) ?
687 (sym_op->auth.data.length + sess->iv.length) :
688 (sym_op->auth.data.length + sess->iv.length +
691 /* Configure Input SGE for Encap/Decap */
692 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
693 sge->length = sess->iv.length;
696 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
699 sge->length = mbuf->data_len - sym_op->auth.data.offset;
705 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
706 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
707 sge->length = mbuf->data_len;
710 sge->length -= icv_len;
712 if (sess->dir == DIR_DEC) {
714 old_icv = (uint8_t *)(sge + 1);
715 memcpy(old_icv, sym_op->auth.digest.data,
717 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
718 sge->length = icv_len;
721 DPAA2_SET_FLE_FIN(sge);
723 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
724 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
726 DPAA2_SET_FD_LEN(fd, ip_fle->length);
732 build_authenc_fd(dpaa2_sec_session *sess,
733 struct rte_crypto_op *op,
734 struct qbman_fd *fd, uint16_t bpid)
736 struct rte_crypto_sym_op *sym_op = op->sym;
737 struct ctxt_priv *priv = sess->ctxt;
738 struct qbman_fle *fle, *sge;
739 struct sec_flow_context *flc;
740 uint32_t auth_only_len = sym_op->auth.data.length -
741 sym_op->cipher.data.length;
742 int icv_len = sess->digest_length, retval;
744 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
746 struct rte_mbuf *dst;
748 PMD_INIT_FUNC_TRACE();
755 /* we are using the first FLE entry to store Mbuf.
756 * Currently we donot know which FLE has the mbuf stored.
757 * So while retreiving we can go back 1 FLE from the FD -ADDR
758 * to get the MBUF Addr from the previous FLE.
759 * We can have a better approach to use the inline Mbuf
761 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
763 DPAA2_SEC_ERR("Memory alloc failed for SGE");
766 memset(fle, 0, FLE_POOL_BUF_SIZE);
767 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
768 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
771 if (likely(bpid < MAX_BPID)) {
772 DPAA2_SET_FD_BPID(fd, bpid);
773 DPAA2_SET_FLE_BPID(fle, bpid);
774 DPAA2_SET_FLE_BPID(fle + 1, bpid);
775 DPAA2_SET_FLE_BPID(sge, bpid);
776 DPAA2_SET_FLE_BPID(sge + 1, bpid);
777 DPAA2_SET_FLE_BPID(sge + 2, bpid);
778 DPAA2_SET_FLE_BPID(sge + 3, bpid);
780 DPAA2_SET_FD_IVP(fd);
781 DPAA2_SET_FLE_IVP(fle);
782 DPAA2_SET_FLE_IVP((fle + 1));
783 DPAA2_SET_FLE_IVP(sge);
784 DPAA2_SET_FLE_IVP((sge + 1));
785 DPAA2_SET_FLE_IVP((sge + 2));
786 DPAA2_SET_FLE_IVP((sge + 3));
789 /* Save the shared descriptor */
790 flc = &priv->flc_desc[0].flc;
791 /* Configure FD as a FRAME LIST */
792 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
793 DPAA2_SET_FD_COMPOUND_FMT(fd);
794 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
797 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
798 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
799 sym_op->auth.data.offset,
800 sym_op->auth.data.length,
802 sym_op->cipher.data.offset,
803 sym_op->cipher.data.length,
805 sym_op->m_src->data_off);
807 /* Configure Output FLE with Scatter/Gather Entry */
808 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
810 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
811 fle->length = (sess->dir == DIR_ENC) ?
812 (sym_op->cipher.data.length + icv_len) :
813 sym_op->cipher.data.length;
815 DPAA2_SET_FLE_SG_EXT(fle);
817 /* Configure Output SGE for Encap/Decap */
818 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
819 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
821 sge->length = sym_op->cipher.data.length;
823 if (sess->dir == DIR_ENC) {
825 DPAA2_SET_FLE_ADDR(sge,
826 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
827 sge->length = sess->digest_length;
828 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
831 DPAA2_SET_FLE_FIN(sge);
836 /* Configure Input FLE with Scatter/Gather Entry */
837 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
838 DPAA2_SET_FLE_SG_EXT(fle);
839 DPAA2_SET_FLE_FIN(fle);
840 fle->length = (sess->dir == DIR_ENC) ?
841 (sym_op->auth.data.length + sess->iv.length) :
842 (sym_op->auth.data.length + sess->iv.length +
843 sess->digest_length);
845 /* Configure Input SGE for Encap/Decap */
846 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
847 sge->length = sess->iv.length;
850 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
851 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
852 sym_op->m_src->data_off);
853 sge->length = sym_op->auth.data.length;
854 if (sess->dir == DIR_DEC) {
856 old_icv = (uint8_t *)(sge + 1);
857 memcpy(old_icv, sym_op->auth.digest.data,
858 sess->digest_length);
859 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
860 sge->length = sess->digest_length;
861 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
862 sess->digest_length +
865 DPAA2_SET_FLE_FIN(sge);
867 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
868 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
873 static inline int build_auth_sg_fd(
874 dpaa2_sec_session *sess,
875 struct rte_crypto_op *op,
877 __rte_unused uint16_t bpid)
879 struct rte_crypto_sym_op *sym_op = op->sym;
880 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
881 struct sec_flow_context *flc;
882 struct ctxt_priv *priv = sess->ctxt;
883 int data_len, data_offset;
885 struct rte_mbuf *mbuf;
887 PMD_INIT_FUNC_TRACE();
889 data_len = sym_op->auth.data.length;
890 data_offset = sym_op->auth.data.offset;
892 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
893 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
894 if ((data_len & 7) || (data_offset & 7)) {
895 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
899 data_len = data_len >> 3;
900 data_offset = data_offset >> 3;
903 mbuf = sym_op->m_src;
904 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
905 RTE_CACHE_LINE_SIZE);
906 if (unlikely(!fle)) {
907 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
910 memset(fle, 0, FLE_SG_MEM_SIZE);
911 /* first FLE entry used to store mbuf and session ctxt */
912 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
913 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
918 flc = &priv->flc_desc[DESC_INITFINAL].flc;
920 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
921 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
922 DPAA2_SET_FD_COMPOUND_FMT(fd);
925 DPAA2_SET_FLE_ADDR(op_fle,
926 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
927 op_fle->length = sess->digest_length;
930 DPAA2_SET_FLE_SG_EXT(ip_fle);
931 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
932 ip_fle->length = data_len;
934 if (sess->iv.length) {
937 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
940 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
941 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
943 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
944 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
947 sge->length = sess->iv.length;
949 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
950 ip_fle->length += sge->length;
954 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
955 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
957 if (data_len <= (mbuf->data_len - data_offset)) {
958 sge->length = data_len;
961 sge->length = mbuf->data_len - data_offset;
963 /* remaining i/p segs */
964 while ((data_len = data_len - sge->length) &&
965 (mbuf = mbuf->next)) {
967 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
968 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
969 if (data_len > mbuf->data_len)
970 sge->length = mbuf->data_len;
972 sge->length = data_len;
976 if (sess->dir == DIR_DEC) {
977 /* Digest verification case */
979 old_digest = (uint8_t *)(sge + 1);
980 rte_memcpy(old_digest, sym_op->auth.digest.data,
981 sess->digest_length);
982 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
983 sge->length = sess->digest_length;
984 ip_fle->length += sess->digest_length;
986 DPAA2_SET_FLE_FIN(sge);
987 DPAA2_SET_FLE_FIN(ip_fle);
988 DPAA2_SET_FD_LEN(fd, ip_fle->length);
994 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
995 struct qbman_fd *fd, uint16_t bpid)
997 struct rte_crypto_sym_op *sym_op = op->sym;
998 struct qbman_fle *fle, *sge;
999 struct sec_flow_context *flc;
1000 struct ctxt_priv *priv = sess->ctxt;
1001 int data_len, data_offset;
1002 uint8_t *old_digest;
1005 PMD_INIT_FUNC_TRACE();
1007 data_len = sym_op->auth.data.length;
1008 data_offset = sym_op->auth.data.offset;
1010 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1011 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1012 if ((data_len & 7) || (data_offset & 7)) {
1013 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1017 data_len = data_len >> 3;
1018 data_offset = data_offset >> 3;
1021 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1023 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1026 memset(fle, 0, FLE_POOL_BUF_SIZE);
1027 /* TODO we are using the first FLE entry to store Mbuf.
1028 * Currently we donot know which FLE has the mbuf stored.
1029 * So while retreiving we can go back 1 FLE from the FD -ADDR
1030 * to get the MBUF Addr from the previous FLE.
1031 * We can have a better approach to use the inline Mbuf
1033 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1034 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1038 if (likely(bpid < MAX_BPID)) {
1039 DPAA2_SET_FD_BPID(fd, bpid);
1040 DPAA2_SET_FLE_BPID(fle, bpid);
1041 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1042 DPAA2_SET_FLE_BPID(sge, bpid);
1043 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1045 DPAA2_SET_FD_IVP(fd);
1046 DPAA2_SET_FLE_IVP(fle);
1047 DPAA2_SET_FLE_IVP((fle + 1));
1048 DPAA2_SET_FLE_IVP(sge);
1049 DPAA2_SET_FLE_IVP((sge + 1));
1052 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1053 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1054 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1055 DPAA2_SET_FD_COMPOUND_FMT(fd);
1057 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1058 fle->length = sess->digest_length;
1061 /* Setting input FLE */
1062 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1063 DPAA2_SET_FLE_SG_EXT(fle);
1064 fle->length = data_len;
1066 if (sess->iv.length) {
1069 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1072 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1073 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1076 sge->length = sess->iv.length;
1079 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1080 fle->length = fle->length + sge->length;
1084 /* Setting data to authenticate */
1085 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1086 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1087 sge->length = data_len;
1089 if (sess->dir == DIR_DEC) {
1091 old_digest = (uint8_t *)(sge + 1);
1092 rte_memcpy(old_digest, sym_op->auth.digest.data,
1093 sess->digest_length);
1094 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1095 sge->length = sess->digest_length;
1096 fle->length = fle->length + sess->digest_length;
1099 DPAA2_SET_FLE_FIN(sge);
1100 DPAA2_SET_FLE_FIN(fle);
1101 DPAA2_SET_FD_LEN(fd, fle->length);
1107 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1108 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1110 struct rte_crypto_sym_op *sym_op = op->sym;
1111 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1112 int data_len, data_offset;
1113 struct sec_flow_context *flc;
1114 struct ctxt_priv *priv = sess->ctxt;
1115 struct rte_mbuf *mbuf;
1116 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1119 PMD_INIT_FUNC_TRACE();
1121 data_len = sym_op->cipher.data.length;
1122 data_offset = sym_op->cipher.data.offset;
1124 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1125 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1126 if ((data_len & 7) || (data_offset & 7)) {
1127 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1131 data_len = data_len >> 3;
1132 data_offset = data_offset >> 3;
1136 mbuf = sym_op->m_dst;
1138 mbuf = sym_op->m_src;
1140 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
1141 RTE_CACHE_LINE_SIZE);
1143 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1146 memset(fle, 0, FLE_SG_MEM_SIZE);
1147 /* first FLE entry used to store mbuf and session ctxt */
1148 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1149 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1155 flc = &priv->flc_desc[0].flc;
1158 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1159 " data_off: 0x%x\n",
1163 sym_op->m_src->data_off);
1166 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1167 op_fle->length = data_len;
1168 DPAA2_SET_FLE_SG_EXT(op_fle);
1171 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1172 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1173 sge->length = mbuf->data_len - data_offset;
1179 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1180 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1181 sge->length = mbuf->data_len;
1184 DPAA2_SET_FLE_FIN(sge);
1187 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1188 flc, fle, fle->addr_hi, fle->addr_lo,
1192 mbuf = sym_op->m_src;
1194 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1195 ip_fle->length = sess->iv.length + data_len;
1196 DPAA2_SET_FLE_SG_EXT(ip_fle);
1199 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1200 DPAA2_SET_FLE_OFFSET(sge, 0);
1201 sge->length = sess->iv.length;
1206 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1207 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1208 sge->length = mbuf->data_len - data_offset;
1214 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1215 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1216 sge->length = mbuf->data_len;
1219 DPAA2_SET_FLE_FIN(sge);
1220 DPAA2_SET_FLE_FIN(ip_fle);
1223 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1224 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1225 DPAA2_SET_FD_COMPOUND_FMT(fd);
1226 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1229 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1230 " off =%d, len =%d\n",
1231 DPAA2_GET_FD_ADDR(fd),
1232 DPAA2_GET_FD_BPID(fd),
1233 rte_dpaa2_bpid_info[bpid].meta_data_size,
1234 DPAA2_GET_FD_OFFSET(fd),
1235 DPAA2_GET_FD_LEN(fd));
1240 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1241 struct qbman_fd *fd, uint16_t bpid)
1243 struct rte_crypto_sym_op *sym_op = op->sym;
1244 struct qbman_fle *fle, *sge;
1245 int retval, data_len, data_offset;
1246 struct sec_flow_context *flc;
1247 struct ctxt_priv *priv = sess->ctxt;
1248 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1250 struct rte_mbuf *dst;
1252 PMD_INIT_FUNC_TRACE();
1254 data_len = sym_op->cipher.data.length;
1255 data_offset = sym_op->cipher.data.offset;
1257 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1258 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1259 if ((data_len & 7) || (data_offset & 7)) {
1260 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1264 data_len = data_len >> 3;
1265 data_offset = data_offset >> 3;
1269 dst = sym_op->m_dst;
1271 dst = sym_op->m_src;
1273 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1275 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1278 memset(fle, 0, FLE_POOL_BUF_SIZE);
1279 /* TODO we are using the first FLE entry to store Mbuf.
1280 * Currently we donot know which FLE has the mbuf stored.
1281 * So while retreiving we can go back 1 FLE from the FD -ADDR
1282 * to get the MBUF Addr from the previous FLE.
1283 * We can have a better approach to use the inline Mbuf
1285 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1286 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1290 if (likely(bpid < MAX_BPID)) {
1291 DPAA2_SET_FD_BPID(fd, bpid);
1292 DPAA2_SET_FLE_BPID(fle, bpid);
1293 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1294 DPAA2_SET_FLE_BPID(sge, bpid);
1295 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1297 DPAA2_SET_FD_IVP(fd);
1298 DPAA2_SET_FLE_IVP(fle);
1299 DPAA2_SET_FLE_IVP((fle + 1));
1300 DPAA2_SET_FLE_IVP(sge);
1301 DPAA2_SET_FLE_IVP((sge + 1));
1304 flc = &priv->flc_desc[0].flc;
1305 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1306 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1307 DPAA2_SET_FD_COMPOUND_FMT(fd);
1308 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1311 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1312 " data_off: 0x%x\n",
1316 sym_op->m_src->data_off);
1318 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1319 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1321 fle->length = data_len + sess->iv.length;
1324 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1325 flc, fle, fle->addr_hi, fle->addr_lo,
1330 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1331 fle->length = data_len + sess->iv.length;
1333 DPAA2_SET_FLE_SG_EXT(fle);
1335 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1336 sge->length = sess->iv.length;
1339 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1340 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1342 sge->length = data_len;
1343 DPAA2_SET_FLE_FIN(sge);
1344 DPAA2_SET_FLE_FIN(fle);
1347 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1348 " off =%d, len =%d\n",
1349 DPAA2_GET_FD_ADDR(fd),
1350 DPAA2_GET_FD_BPID(fd),
1351 rte_dpaa2_bpid_info[bpid].meta_data_size,
1352 DPAA2_GET_FD_OFFSET(fd),
1353 DPAA2_GET_FD_LEN(fd));
1359 build_sec_fd(struct rte_crypto_op *op,
1360 struct qbman_fd *fd, uint16_t bpid)
1363 dpaa2_sec_session *sess;
1365 PMD_INIT_FUNC_TRACE();
1367 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1368 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1369 op->sym->session, cryptodev_driver_id);
1370 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1371 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1372 op->sym->sec_session);
1376 /* Any of the buffer is segmented*/
1377 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1378 ((op->sym->m_dst != NULL) &&
1379 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1380 switch (sess->ctxt_type) {
1381 case DPAA2_SEC_CIPHER:
1382 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1384 case DPAA2_SEC_AUTH:
1385 ret = build_auth_sg_fd(sess, op, fd, bpid);
1387 case DPAA2_SEC_AEAD:
1388 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1390 case DPAA2_SEC_CIPHER_HASH:
1391 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1393 case DPAA2_SEC_IPSEC:
1394 case DPAA2_SEC_PDCP:
1395 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1397 case DPAA2_SEC_HASH_CIPHER:
1399 DPAA2_SEC_ERR("error: Unsupported session");
1402 switch (sess->ctxt_type) {
1403 case DPAA2_SEC_CIPHER:
1404 ret = build_cipher_fd(sess, op, fd, bpid);
1406 case DPAA2_SEC_AUTH:
1407 ret = build_auth_fd(sess, op, fd, bpid);
1409 case DPAA2_SEC_AEAD:
1410 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1412 case DPAA2_SEC_CIPHER_HASH:
1413 ret = build_authenc_fd(sess, op, fd, bpid);
1415 case DPAA2_SEC_IPSEC:
1416 ret = build_proto_fd(sess, op, fd, bpid);
1418 case DPAA2_SEC_PDCP:
1419 ret = build_proto_compound_fd(sess, op, fd, bpid);
1421 case DPAA2_SEC_HASH_CIPHER:
1423 DPAA2_SEC_ERR("error: Unsupported session");
1430 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1433 /* Function to transmit the frames to given device and VQ*/
1436 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1437 uint32_t frames_to_send;
1438 struct qbman_eq_desc eqdesc;
1439 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1440 struct qbman_swp *swp;
1441 uint16_t num_tx = 0;
1442 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1443 /*todo - need to support multiple buffer pools */
1445 struct rte_mempool *mb_pool;
1447 if (unlikely(nb_ops == 0))
1450 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1451 DPAA2_SEC_ERR("sessionless crypto op not supported");
1454 /*Prepare enqueue descriptor*/
1455 qbman_eq_desc_clear(&eqdesc);
1456 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1457 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1458 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1460 if (!DPAA2_PER_LCORE_DPIO) {
1461 ret = dpaa2_affine_qbman_swp();
1463 DPAA2_SEC_ERR("Failure in affining portal");
1467 swp = DPAA2_PER_LCORE_PORTAL;
1470 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1471 dpaa2_eqcr_size : nb_ops;
1473 for (loop = 0; loop < frames_to_send; loop++) {
1474 if ((*ops)->sym->m_src->seqn) {
1475 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1477 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1478 DPAA2_PER_LCORE_DQRR_SIZE--;
1479 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1480 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1483 /*Clear the unused FD fields before sending*/
1484 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1485 mb_pool = (*ops)->sym->m_src->pool;
1486 bpid = mempool_to_bpid(mb_pool);
1487 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1489 DPAA2_SEC_ERR("error: Improper packet contents"
1490 " for crypto operation");
1496 while (loop < frames_to_send) {
1497 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1500 frames_to_send - loop);
1503 num_tx += frames_to_send;
1504 nb_ops -= frames_to_send;
1507 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1508 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1512 static inline struct rte_crypto_op *
1513 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1515 struct rte_crypto_op *op;
1516 uint16_t len = DPAA2_GET_FD_LEN(fd);
1518 dpaa2_sec_session *sess_priv;
1520 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1521 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1522 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1524 diff = len - mbuf->pkt_len;
1525 mbuf->pkt_len += diff;
1526 mbuf->data_len += diff;
1527 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1528 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1529 op->sym->aead.digest.phys_addr = 0L;
1531 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1532 op->sym->sec_session);
1533 if (sess_priv->dir == DIR_ENC)
1534 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1536 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1541 static inline struct rte_crypto_op *
1542 sec_fd_to_mbuf(const struct qbman_fd *fd)
1544 struct qbman_fle *fle;
1545 struct rte_crypto_op *op;
1546 struct ctxt_priv *priv;
1547 struct rte_mbuf *dst, *src;
1549 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1550 return sec_simple_fd_to_mbuf(fd);
1552 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1554 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1555 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1557 /* we are using the first FLE entry to store Mbuf.
1558 * Currently we donot know which FLE has the mbuf stored.
1559 * So while retreiving we can go back 1 FLE from the FD -ADDR
1560 * to get the MBUF Addr from the previous FLE.
1561 * We can have a better approach to use the inline Mbuf
1564 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1565 /* TODO complete it. */
1566 DPAA2_SEC_ERR("error: non inline buffer");
1569 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1572 src = op->sym->m_src;
1575 if (op->sym->m_dst) {
1576 dst = op->sym->m_dst;
1581 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1582 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1583 get_sec_session_private_data(op->sym->sec_session);
1584 if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1585 sess->ctxt_type == DPAA2_SEC_PDCP) {
1586 uint16_t len = DPAA2_GET_FD_LEN(fd);
1588 while (dst->next != NULL) {
1589 len -= dst->data_len;
1592 dst->data_len = len;
1596 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1597 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1600 DPAA2_GET_FD_ADDR(fd),
1601 DPAA2_GET_FD_BPID(fd),
1602 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1603 DPAA2_GET_FD_OFFSET(fd),
1604 DPAA2_GET_FD_LEN(fd));
1606 /* free the fle memory */
1607 if (likely(rte_pktmbuf_is_contiguous(src))) {
1608 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1609 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1611 rte_free((void *)(fle-1));
1617 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1620 /* Function is responsible to receive frames for a given device and VQ*/
1621 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1622 struct qbman_result *dq_storage;
1623 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1624 int ret, num_rx = 0;
1625 uint8_t is_last = 0, status;
1626 struct qbman_swp *swp;
1627 const struct qbman_fd *fd;
1628 struct qbman_pull_desc pulldesc;
1630 if (!DPAA2_PER_LCORE_DPIO) {
1631 ret = dpaa2_affine_qbman_swp();
1633 DPAA2_SEC_ERR("Failure in affining portal");
1637 swp = DPAA2_PER_LCORE_PORTAL;
1638 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1640 qbman_pull_desc_clear(&pulldesc);
1641 qbman_pull_desc_set_numframes(&pulldesc,
1642 (nb_ops > dpaa2_dqrr_size) ?
1643 dpaa2_dqrr_size : nb_ops);
1644 qbman_pull_desc_set_fq(&pulldesc, fqid);
1645 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1646 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1649 /*Issue a volatile dequeue command. */
1651 if (qbman_swp_pull(swp, &pulldesc)) {
1653 "SEC VDQ command is not issued : QBMAN busy");
1654 /* Portal was busy, try again */
1660 /* Receive the packets till Last Dequeue entry is found with
1661 * respect to the above issues PULL command.
1664 /* Check if the previous issued command is completed.
1665 * Also seems like the SWP is shared between the Ethernet Driver
1666 * and the SEC driver.
1668 while (!qbman_check_command_complete(dq_storage))
1671 /* Loop until the dq_storage is updated with
1672 * new token by QBMAN
1674 while (!qbman_check_new_result(dq_storage))
1676 /* Check whether Last Pull command is Expired and
1677 * setting Condition for Loop termination
1679 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1681 /* Check for valid frame. */
1682 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1684 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1685 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1690 fd = qbman_result_DQ_fd(dq_storage);
1691 ops[num_rx] = sec_fd_to_mbuf(fd);
1693 if (unlikely(fd->simple.frc)) {
1694 /* TODO Parse SEC errors */
1695 DPAA2_SEC_ERR("SEC returned Error - %x",
1697 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1699 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1704 } /* End of Packet Rx loop */
1706 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1708 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1709 /*Return the total number of packets received to DPAA2 app*/
1713 /** Release queue pair */
1715 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1717 struct dpaa2_sec_qp *qp =
1718 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1720 PMD_INIT_FUNC_TRACE();
1722 if (qp->rx_vq.q_storage) {
1723 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1724 rte_free(qp->rx_vq.q_storage);
1728 dev->data->queue_pairs[queue_pair_id] = NULL;
1733 /** Setup a queue pair */
1735 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1736 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1737 __rte_unused int socket_id)
1739 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1740 struct dpaa2_sec_qp *qp;
1741 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1742 struct dpseci_rx_queue_cfg cfg;
1745 PMD_INIT_FUNC_TRACE();
1747 /* If qp is already in use free ring memory and qp metadata. */
1748 if (dev->data->queue_pairs[qp_id] != NULL) {
1749 DPAA2_SEC_INFO("QP already setup");
1753 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1754 dev, qp_id, qp_conf);
1756 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1758 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1759 RTE_CACHE_LINE_SIZE);
1761 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1765 qp->rx_vq.crypto_data = dev->data;
1766 qp->tx_vq.crypto_data = dev->data;
1767 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1768 sizeof(struct queue_storage_info_t),
1769 RTE_CACHE_LINE_SIZE);
1770 if (!qp->rx_vq.q_storage) {
1771 DPAA2_SEC_ERR("malloc failed for q_storage");
1774 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1776 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1777 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1781 dev->data->queue_pairs[qp_id] = qp;
1783 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1784 cfg.user_ctx = (size_t)(&qp->rx_vq);
1785 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1790 /** Return the number of allocated queue pairs */
1792 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1794 PMD_INIT_FUNC_TRACE();
1796 return dev->data->nb_queue_pairs;
1799 /** Returns the size of the aesni gcm session structure */
1801 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1803 PMD_INIT_FUNC_TRACE();
1805 return sizeof(dpaa2_sec_session);
1809 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1810 struct rte_crypto_sym_xform *xform,
1811 dpaa2_sec_session *session)
1813 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1814 struct alginfo cipherdata;
1816 struct ctxt_priv *priv;
1817 struct sec_flow_context *flc;
1819 PMD_INIT_FUNC_TRACE();
1821 /* For SEC CIPHER only one descriptor is required. */
1822 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1823 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1824 RTE_CACHE_LINE_SIZE);
1826 DPAA2_SEC_ERR("No Memory for priv CTXT");
1830 priv->fle_pool = dev_priv->fle_pool;
1832 flc = &priv->flc_desc[0].flc;
1834 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1835 RTE_CACHE_LINE_SIZE);
1836 if (session->cipher_key.data == NULL) {
1837 DPAA2_SEC_ERR("No Memory for cipher key");
1841 session->cipher_key.length = xform->cipher.key.length;
1843 memcpy(session->cipher_key.data, xform->cipher.key.data,
1844 xform->cipher.key.length);
1845 cipherdata.key = (size_t)session->cipher_key.data;
1846 cipherdata.keylen = session->cipher_key.length;
1847 cipherdata.key_enc_flags = 0;
1848 cipherdata.key_type = RTA_DATA_IMM;
1850 /* Set IV parameters */
1851 session->iv.offset = xform->cipher.iv.offset;
1852 session->iv.length = xform->cipher.iv.length;
1853 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1856 switch (xform->cipher.algo) {
1857 case RTE_CRYPTO_CIPHER_AES_CBC:
1858 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1859 cipherdata.algmode = OP_ALG_AAI_CBC;
1860 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1861 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1862 SHR_NEVER, &cipherdata, NULL,
1866 case RTE_CRYPTO_CIPHER_3DES_CBC:
1867 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1868 cipherdata.algmode = OP_ALG_AAI_CBC;
1869 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1870 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871 SHR_NEVER, &cipherdata, NULL,
1875 case RTE_CRYPTO_CIPHER_AES_CTR:
1876 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1877 cipherdata.algmode = OP_ALG_AAI_CTR;
1878 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1879 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880 SHR_NEVER, &cipherdata, NULL,
1884 case RTE_CRYPTO_CIPHER_3DES_CTR:
1885 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1886 cipherdata.algmode = OP_ALG_AAI_CTR;
1887 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1888 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1889 SHR_NEVER, &cipherdata, NULL,
1893 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1894 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1895 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1896 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1900 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1901 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1902 case RTE_CRYPTO_CIPHER_AES_F8:
1903 case RTE_CRYPTO_CIPHER_AES_ECB:
1904 case RTE_CRYPTO_CIPHER_3DES_ECB:
1905 case RTE_CRYPTO_CIPHER_AES_XTS:
1906 case RTE_CRYPTO_CIPHER_ARC4:
1907 case RTE_CRYPTO_CIPHER_NULL:
1908 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1909 xform->cipher.algo);
1912 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1913 xform->cipher.algo);
1918 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1922 flc->word1_sdl = (uint8_t)bufsize;
1923 session->ctxt = priv;
1925 for (i = 0; i < bufsize; i++)
1926 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1931 rte_free(session->cipher_key.data);
1937 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1938 struct rte_crypto_sym_xform *xform,
1939 dpaa2_sec_session *session)
1941 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1942 struct alginfo authdata;
1944 struct ctxt_priv *priv;
1945 struct sec_flow_context *flc;
1947 PMD_INIT_FUNC_TRACE();
1949 /* For SEC AUTH three descriptors are required for various stages */
1950 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1951 sizeof(struct ctxt_priv) + 3 *
1952 sizeof(struct sec_flc_desc),
1953 RTE_CACHE_LINE_SIZE);
1955 DPAA2_SEC_ERR("No Memory for priv CTXT");
1959 priv->fle_pool = dev_priv->fle_pool;
1960 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1962 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1963 RTE_CACHE_LINE_SIZE);
1964 if (session->auth_key.data == NULL) {
1965 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1969 session->auth_key.length = xform->auth.key.length;
1971 memcpy(session->auth_key.data, xform->auth.key.data,
1972 xform->auth.key.length);
1973 authdata.key = (size_t)session->auth_key.data;
1974 authdata.keylen = session->auth_key.length;
1975 authdata.key_enc_flags = 0;
1976 authdata.key_type = RTA_DATA_IMM;
1978 session->digest_length = xform->auth.digest_length;
1979 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1982 switch (xform->auth.algo) {
1983 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1984 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1985 authdata.algmode = OP_ALG_AAI_HMAC;
1986 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1987 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1988 1, 0, SHR_NEVER, &authdata,
1990 session->digest_length);
1992 case RTE_CRYPTO_AUTH_MD5_HMAC:
1993 authdata.algtype = OP_ALG_ALGSEL_MD5;
1994 authdata.algmode = OP_ALG_AAI_HMAC;
1995 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1996 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1997 1, 0, SHR_NEVER, &authdata,
1999 session->digest_length);
2001 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2002 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2003 authdata.algmode = OP_ALG_AAI_HMAC;
2004 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2005 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2006 1, 0, SHR_NEVER, &authdata,
2008 session->digest_length);
2010 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2011 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2012 authdata.algmode = OP_ALG_AAI_HMAC;
2013 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2014 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2015 1, 0, SHR_NEVER, &authdata,
2017 session->digest_length);
2019 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2020 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2021 authdata.algmode = OP_ALG_AAI_HMAC;
2022 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2023 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2024 1, 0, SHR_NEVER, &authdata,
2026 session->digest_length);
2028 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2029 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2030 authdata.algmode = OP_ALG_AAI_HMAC;
2031 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2032 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2033 1, 0, SHR_NEVER, &authdata,
2035 session->digest_length);
2037 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2038 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2039 authdata.algmode = OP_ALG_AAI_F9;
2040 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2041 session->iv.offset = xform->auth.iv.offset;
2042 session->iv.length = xform->auth.iv.length;
2043 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2046 session->digest_length);
2048 case RTE_CRYPTO_AUTH_KASUMI_F9:
2049 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2050 case RTE_CRYPTO_AUTH_NULL:
2051 case RTE_CRYPTO_AUTH_SHA1:
2052 case RTE_CRYPTO_AUTH_SHA256:
2053 case RTE_CRYPTO_AUTH_SHA512:
2054 case RTE_CRYPTO_AUTH_SHA224:
2055 case RTE_CRYPTO_AUTH_SHA384:
2056 case RTE_CRYPTO_AUTH_MD5:
2057 case RTE_CRYPTO_AUTH_AES_GMAC:
2058 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2059 case RTE_CRYPTO_AUTH_AES_CMAC:
2060 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2061 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2065 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2071 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2075 flc->word1_sdl = (uint8_t)bufsize;
2076 session->ctxt = priv;
2077 for (i = 0; i < bufsize; i++)
2078 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2079 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2085 rte_free(session->auth_key.data);
2091 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2092 struct rte_crypto_sym_xform *xform,
2093 dpaa2_sec_session *session)
2095 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2096 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2097 struct alginfo aeaddata;
2099 struct ctxt_priv *priv;
2100 struct sec_flow_context *flc;
2101 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2104 PMD_INIT_FUNC_TRACE();
2106 /* Set IV parameters */
2107 session->iv.offset = aead_xform->iv.offset;
2108 session->iv.length = aead_xform->iv.length;
2109 session->ctxt_type = DPAA2_SEC_AEAD;
2111 /* For SEC AEAD only one descriptor is required */
2112 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2113 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2114 RTE_CACHE_LINE_SIZE);
2116 DPAA2_SEC_ERR("No Memory for priv CTXT");
2120 priv->fle_pool = dev_priv->fle_pool;
2121 flc = &priv->flc_desc[0].flc;
2123 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2124 RTE_CACHE_LINE_SIZE);
2125 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2126 DPAA2_SEC_ERR("No Memory for aead key");
2130 memcpy(session->aead_key.data, aead_xform->key.data,
2131 aead_xform->key.length);
2133 session->digest_length = aead_xform->digest_length;
2134 session->aead_key.length = aead_xform->key.length;
2135 ctxt->auth_only_len = aead_xform->aad_length;
2137 aeaddata.key = (size_t)session->aead_key.data;
2138 aeaddata.keylen = session->aead_key.length;
2139 aeaddata.key_enc_flags = 0;
2140 aeaddata.key_type = RTA_DATA_IMM;
2142 switch (aead_xform->algo) {
2143 case RTE_CRYPTO_AEAD_AES_GCM:
2144 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2145 aeaddata.algmode = OP_ALG_AAI_GCM;
2146 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2148 case RTE_CRYPTO_AEAD_AES_CCM:
2149 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2153 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2157 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2160 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2161 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2163 (unsigned int *)priv->flc_desc[0].desc,
2164 &priv->flc_desc[0].desc[1], 1);
2167 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2170 if (priv->flc_desc[0].desc[1] & 1) {
2171 aeaddata.key_type = RTA_DATA_IMM;
2173 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2174 aeaddata.key_type = RTA_DATA_PTR;
2176 priv->flc_desc[0].desc[0] = 0;
2177 priv->flc_desc[0].desc[1] = 0;
2179 if (session->dir == DIR_ENC)
2180 bufsize = cnstr_shdsc_gcm_encap(
2181 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2182 &aeaddata, session->iv.length,
2183 session->digest_length);
2185 bufsize = cnstr_shdsc_gcm_decap(
2186 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2187 &aeaddata, session->iv.length,
2188 session->digest_length);
2190 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2194 flc->word1_sdl = (uint8_t)bufsize;
2195 session->ctxt = priv;
2196 for (i = 0; i < bufsize; i++)
2197 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2198 i, priv->flc_desc[0].desc[i]);
2203 rte_free(session->aead_key.data);
2210 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2211 struct rte_crypto_sym_xform *xform,
2212 dpaa2_sec_session *session)
2214 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2215 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2216 struct alginfo authdata, cipherdata;
2218 struct ctxt_priv *priv;
2219 struct sec_flow_context *flc;
2220 struct rte_crypto_cipher_xform *cipher_xform;
2221 struct rte_crypto_auth_xform *auth_xform;
2224 PMD_INIT_FUNC_TRACE();
2226 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2227 cipher_xform = &xform->cipher;
2228 auth_xform = &xform->next->auth;
2229 session->ctxt_type =
2230 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2231 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2233 cipher_xform = &xform->next->cipher;
2234 auth_xform = &xform->auth;
2235 session->ctxt_type =
2236 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2237 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2240 /* Set IV parameters */
2241 session->iv.offset = cipher_xform->iv.offset;
2242 session->iv.length = cipher_xform->iv.length;
2244 /* For SEC AEAD only one descriptor is required */
2245 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2246 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2247 RTE_CACHE_LINE_SIZE);
2249 DPAA2_SEC_ERR("No Memory for priv CTXT");
2253 priv->fle_pool = dev_priv->fle_pool;
2254 flc = &priv->flc_desc[0].flc;
2256 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2257 RTE_CACHE_LINE_SIZE);
2258 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2259 DPAA2_SEC_ERR("No Memory for cipher key");
2263 session->cipher_key.length = cipher_xform->key.length;
2264 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2265 RTE_CACHE_LINE_SIZE);
2266 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2267 DPAA2_SEC_ERR("No Memory for auth key");
2268 rte_free(session->cipher_key.data);
2272 session->auth_key.length = auth_xform->key.length;
2273 memcpy(session->cipher_key.data, cipher_xform->key.data,
2274 cipher_xform->key.length);
2275 memcpy(session->auth_key.data, auth_xform->key.data,
2276 auth_xform->key.length);
2278 authdata.key = (size_t)session->auth_key.data;
2279 authdata.keylen = session->auth_key.length;
2280 authdata.key_enc_flags = 0;
2281 authdata.key_type = RTA_DATA_IMM;
2283 session->digest_length = auth_xform->digest_length;
2285 switch (auth_xform->algo) {
2286 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2287 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2288 authdata.algmode = OP_ALG_AAI_HMAC;
2289 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2291 case RTE_CRYPTO_AUTH_MD5_HMAC:
2292 authdata.algtype = OP_ALG_ALGSEL_MD5;
2293 authdata.algmode = OP_ALG_AAI_HMAC;
2294 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2296 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2297 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2298 authdata.algmode = OP_ALG_AAI_HMAC;
2299 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2301 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2302 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2303 authdata.algmode = OP_ALG_AAI_HMAC;
2304 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2306 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2307 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2308 authdata.algmode = OP_ALG_AAI_HMAC;
2309 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2311 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2312 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2313 authdata.algmode = OP_ALG_AAI_HMAC;
2314 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2316 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2317 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2318 case RTE_CRYPTO_AUTH_NULL:
2319 case RTE_CRYPTO_AUTH_SHA1:
2320 case RTE_CRYPTO_AUTH_SHA256:
2321 case RTE_CRYPTO_AUTH_SHA512:
2322 case RTE_CRYPTO_AUTH_SHA224:
2323 case RTE_CRYPTO_AUTH_SHA384:
2324 case RTE_CRYPTO_AUTH_MD5:
2325 case RTE_CRYPTO_AUTH_AES_GMAC:
2326 case RTE_CRYPTO_AUTH_KASUMI_F9:
2327 case RTE_CRYPTO_AUTH_AES_CMAC:
2328 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2329 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2330 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2334 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2338 cipherdata.key = (size_t)session->cipher_key.data;
2339 cipherdata.keylen = session->cipher_key.length;
2340 cipherdata.key_enc_flags = 0;
2341 cipherdata.key_type = RTA_DATA_IMM;
2343 switch (cipher_xform->algo) {
2344 case RTE_CRYPTO_CIPHER_AES_CBC:
2345 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2346 cipherdata.algmode = OP_ALG_AAI_CBC;
2347 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2349 case RTE_CRYPTO_CIPHER_3DES_CBC:
2350 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2351 cipherdata.algmode = OP_ALG_AAI_CBC;
2352 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2354 case RTE_CRYPTO_CIPHER_AES_CTR:
2355 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2356 cipherdata.algmode = OP_ALG_AAI_CTR;
2357 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2359 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2360 case RTE_CRYPTO_CIPHER_NULL:
2361 case RTE_CRYPTO_CIPHER_3DES_ECB:
2362 case RTE_CRYPTO_CIPHER_AES_ECB:
2363 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2364 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2365 cipher_xform->algo);
2368 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2369 cipher_xform->algo);
2372 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2375 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2376 priv->flc_desc[0].desc[1] = authdata.keylen;
2377 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2379 (unsigned int *)priv->flc_desc[0].desc,
2380 &priv->flc_desc[0].desc[2], 2);
2383 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2386 if (priv->flc_desc[0].desc[2] & 1) {
2387 cipherdata.key_type = RTA_DATA_IMM;
2389 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2390 cipherdata.key_type = RTA_DATA_PTR;
2392 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2393 authdata.key_type = RTA_DATA_IMM;
2395 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2396 authdata.key_type = RTA_DATA_PTR;
2398 priv->flc_desc[0].desc[0] = 0;
2399 priv->flc_desc[0].desc[1] = 0;
2400 priv->flc_desc[0].desc[2] = 0;
2402 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2403 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2405 &cipherdata, &authdata,
2407 ctxt->auth_only_len,
2408 session->digest_length,
2411 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2415 DPAA2_SEC_ERR("Hash before cipher not supported");
2419 flc->word1_sdl = (uint8_t)bufsize;
2420 session->ctxt = priv;
2421 for (i = 0; i < bufsize; i++)
2422 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2423 i, priv->flc_desc[0].desc[i]);
2428 rte_free(session->cipher_key.data);
2429 rte_free(session->auth_key.data);
2435 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2436 struct rte_crypto_sym_xform *xform, void *sess)
2438 dpaa2_sec_session *session = sess;
2441 PMD_INIT_FUNC_TRACE();
2443 if (unlikely(sess == NULL)) {
2444 DPAA2_SEC_ERR("Invalid session struct");
2448 memset(session, 0, sizeof(dpaa2_sec_session));
2449 /* Default IV length = 0 */
2450 session->iv.length = 0;
2453 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2454 session->ctxt_type = DPAA2_SEC_CIPHER;
2455 ret = dpaa2_sec_cipher_init(dev, xform, session);
2457 /* Authentication Only */
2458 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2459 xform->next == NULL) {
2460 session->ctxt_type = DPAA2_SEC_AUTH;
2461 ret = dpaa2_sec_auth_init(dev, xform, session);
2463 /* Cipher then Authenticate */
2464 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2465 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2466 session->ext_params.aead_ctxt.auth_cipher_text = true;
2467 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2469 /* Authenticate then Cipher */
2470 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2471 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2472 session->ext_params.aead_ctxt.auth_cipher_text = false;
2473 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2475 /* AEAD operation for AES-GCM kind of Algorithms */
2476 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2477 xform->next == NULL) {
2478 ret = dpaa2_sec_aead_init(dev, xform, session);
2481 DPAA2_SEC_ERR("Invalid crypto type");
2489 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2490 dpaa2_sec_session *session,
2491 struct alginfo *aeaddata)
2493 PMD_INIT_FUNC_TRACE();
2495 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2496 RTE_CACHE_LINE_SIZE);
2497 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2498 DPAA2_SEC_ERR("No Memory for aead key");
2501 memcpy(session->aead_key.data, aead_xform->key.data,
2502 aead_xform->key.length);
2504 session->digest_length = aead_xform->digest_length;
2505 session->aead_key.length = aead_xform->key.length;
2507 aeaddata->key = (size_t)session->aead_key.data;
2508 aeaddata->keylen = session->aead_key.length;
2509 aeaddata->key_enc_flags = 0;
2510 aeaddata->key_type = RTA_DATA_IMM;
2512 switch (aead_xform->algo) {
2513 case RTE_CRYPTO_AEAD_AES_GCM:
2514 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2515 aeaddata->algmode = OP_ALG_AAI_GCM;
2516 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2518 case RTE_CRYPTO_AEAD_AES_CCM:
2519 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2520 aeaddata->algmode = OP_ALG_AAI_CCM;
2521 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2524 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2528 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2535 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2536 struct rte_crypto_auth_xform *auth_xform,
2537 dpaa2_sec_session *session,
2538 struct alginfo *cipherdata,
2539 struct alginfo *authdata)
2542 session->cipher_key.data = rte_zmalloc(NULL,
2543 cipher_xform->key.length,
2544 RTE_CACHE_LINE_SIZE);
2545 if (session->cipher_key.data == NULL &&
2546 cipher_xform->key.length > 0) {
2547 DPAA2_SEC_ERR("No Memory for cipher key");
2551 session->cipher_key.length = cipher_xform->key.length;
2552 memcpy(session->cipher_key.data, cipher_xform->key.data,
2553 cipher_xform->key.length);
2554 session->cipher_alg = cipher_xform->algo;
2556 session->cipher_key.data = NULL;
2557 session->cipher_key.length = 0;
2558 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2562 session->auth_key.data = rte_zmalloc(NULL,
2563 auth_xform->key.length,
2564 RTE_CACHE_LINE_SIZE);
2565 if (session->auth_key.data == NULL &&
2566 auth_xform->key.length > 0) {
2567 DPAA2_SEC_ERR("No Memory for auth key");
2570 session->auth_key.length = auth_xform->key.length;
2571 memcpy(session->auth_key.data, auth_xform->key.data,
2572 auth_xform->key.length);
2573 session->auth_alg = auth_xform->algo;
2575 session->auth_key.data = NULL;
2576 session->auth_key.length = 0;
2577 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2580 authdata->key = (size_t)session->auth_key.data;
2581 authdata->keylen = session->auth_key.length;
2582 authdata->key_enc_flags = 0;
2583 authdata->key_type = RTA_DATA_IMM;
2584 switch (session->auth_alg) {
2585 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2586 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2587 authdata->algmode = OP_ALG_AAI_HMAC;
2589 case RTE_CRYPTO_AUTH_MD5_HMAC:
2590 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2591 authdata->algmode = OP_ALG_AAI_HMAC;
2593 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2594 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2595 authdata->algmode = OP_ALG_AAI_HMAC;
2597 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2598 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2599 authdata->algmode = OP_ALG_AAI_HMAC;
2601 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2602 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2603 authdata->algmode = OP_ALG_AAI_HMAC;
2605 case RTE_CRYPTO_AUTH_AES_CMAC:
2606 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2608 case RTE_CRYPTO_AUTH_NULL:
2609 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2611 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2612 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2613 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2614 case RTE_CRYPTO_AUTH_SHA1:
2615 case RTE_CRYPTO_AUTH_SHA256:
2616 case RTE_CRYPTO_AUTH_SHA512:
2617 case RTE_CRYPTO_AUTH_SHA224:
2618 case RTE_CRYPTO_AUTH_SHA384:
2619 case RTE_CRYPTO_AUTH_MD5:
2620 case RTE_CRYPTO_AUTH_AES_GMAC:
2621 case RTE_CRYPTO_AUTH_KASUMI_F9:
2622 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2623 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2624 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2628 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2632 cipherdata->key = (size_t)session->cipher_key.data;
2633 cipherdata->keylen = session->cipher_key.length;
2634 cipherdata->key_enc_flags = 0;
2635 cipherdata->key_type = RTA_DATA_IMM;
2637 switch (session->cipher_alg) {
2638 case RTE_CRYPTO_CIPHER_AES_CBC:
2639 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2640 cipherdata->algmode = OP_ALG_AAI_CBC;
2642 case RTE_CRYPTO_CIPHER_3DES_CBC:
2643 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2644 cipherdata->algmode = OP_ALG_AAI_CBC;
2646 case RTE_CRYPTO_CIPHER_AES_CTR:
2647 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2648 cipherdata->algmode = OP_ALG_AAI_CTR;
2650 case RTE_CRYPTO_CIPHER_NULL:
2651 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2653 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2654 case RTE_CRYPTO_CIPHER_3DES_ECB:
2655 case RTE_CRYPTO_CIPHER_AES_ECB:
2656 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2657 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2658 session->cipher_alg);
2661 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2662 session->cipher_alg);
2669 #ifdef RTE_LIBRTE_SECURITY_TEST
2670 static uint8_t aes_cbc_iv[] = {
2671 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2672 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2676 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2677 struct rte_security_session_conf *conf,
2680 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2681 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2682 struct rte_crypto_auth_xform *auth_xform = NULL;
2683 struct rte_crypto_aead_xform *aead_xform = NULL;
2684 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2685 struct ctxt_priv *priv;
2686 struct ipsec_encap_pdb encap_pdb;
2687 struct ipsec_decap_pdb decap_pdb;
2688 struct alginfo authdata, cipherdata;
2690 struct sec_flow_context *flc;
2691 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2694 PMD_INIT_FUNC_TRACE();
2696 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2697 sizeof(struct ctxt_priv) +
2698 sizeof(struct sec_flc_desc),
2699 RTE_CACHE_LINE_SIZE);
2702 DPAA2_SEC_ERR("No memory for priv CTXT");
2706 priv->fle_pool = dev_priv->fle_pool;
2707 flc = &priv->flc_desc[0].flc;
2709 memset(session, 0, sizeof(dpaa2_sec_session));
2711 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2712 cipher_xform = &conf->crypto_xform->cipher;
2713 if (conf->crypto_xform->next)
2714 auth_xform = &conf->crypto_xform->next->auth;
2715 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2716 session, &cipherdata, &authdata);
2717 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2718 auth_xform = &conf->crypto_xform->auth;
2719 if (conf->crypto_xform->next)
2720 cipher_xform = &conf->crypto_xform->next->cipher;
2721 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2722 session, &cipherdata, &authdata);
2723 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2724 aead_xform = &conf->crypto_xform->aead;
2725 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2726 session, &cipherdata);
2728 DPAA2_SEC_ERR("XFORM not specified");
2733 DPAA2_SEC_ERR("Failed to process xform");
2737 session->ctxt_type = DPAA2_SEC_IPSEC;
2738 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2739 uint8_t *hdr = NULL;
2741 struct rte_ipv6_hdr ip6_hdr;
2743 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2744 /* For Sec Proto only one descriptor is required. */
2745 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2746 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2747 PDBOPTS_ESP_OIHI_PDB_INL |
2749 PDBHMO_ESP_ENCAP_DTTL |
2751 if (ipsec_xform->options.esn)
2752 encap_pdb.options |= PDBOPTS_ESP_ESN;
2753 encap_pdb.spi = ipsec_xform->spi;
2754 session->dir = DIR_ENC;
2755 if (ipsec_xform->tunnel.type ==
2756 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2757 encap_pdb.ip_hdr_len = sizeof(struct ip);
2758 ip4_hdr.ip_v = IPVERSION;
2760 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2761 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2764 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2765 ip4_hdr.ip_p = IPPROTO_ESP;
2767 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2768 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2769 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2770 &ip4_hdr, sizeof(struct ip));
2771 hdr = (uint8_t *)&ip4_hdr;
2772 } else if (ipsec_xform->tunnel.type ==
2773 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2774 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2775 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2776 ((ipsec_xform->tunnel.ipv6.dscp <<
2777 RTE_IPV6_HDR_TC_SHIFT) &
2778 RTE_IPV6_HDR_TC_MASK) |
2779 ((ipsec_xform->tunnel.ipv6.flabel <<
2780 RTE_IPV6_HDR_FL_SHIFT) &
2781 RTE_IPV6_HDR_FL_MASK));
2782 /* Payload length will be updated by HW */
2783 ip6_hdr.payload_len = 0;
2784 ip6_hdr.hop_limits =
2785 ipsec_xform->tunnel.ipv6.hlimit;
2786 ip6_hdr.proto = (ipsec_xform->proto ==
2787 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2788 IPPROTO_ESP : IPPROTO_AH;
2789 memcpy(&ip6_hdr.src_addr,
2790 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2791 memcpy(&ip6_hdr.dst_addr,
2792 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2793 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2794 hdr = (uint8_t *)&ip6_hdr;
2797 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2798 1, 0, SHR_SERIAL, &encap_pdb,
2799 hdr, &cipherdata, &authdata);
2800 } else if (ipsec_xform->direction ==
2801 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2802 flc->dhr = SEC_FLC_DHR_INBOUND;
2803 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2804 decap_pdb.options = sizeof(struct ip) << 16;
2805 if (ipsec_xform->options.esn)
2806 decap_pdb.options |= PDBOPTS_ESP_ESN;
2807 decap_pdb.options = (ipsec_xform->tunnel.type ==
2808 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2809 sizeof(struct ip) << 16 :
2810 sizeof(struct rte_ipv6_hdr) << 16;
2811 session->dir = DIR_DEC;
2812 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2814 &decap_pdb, &cipherdata, &authdata);
2819 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2823 flc->word1_sdl = (uint8_t)bufsize;
2825 /* Enable the stashing control bit */
2826 DPAA2_SET_FLC_RSC(flc);
2827 flc->word2_rflc_31_0 = lower_32_bits(
2828 (size_t)&(((struct dpaa2_sec_qp *)
2829 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2830 flc->word3_rflc_63_32 = upper_32_bits(
2831 (size_t)&(((struct dpaa2_sec_qp *)
2832 dev->data->queue_pairs[0])->rx_vq));
2834 /* Set EWS bit i.e. enable write-safe */
2835 DPAA2_SET_FLC_EWS(flc);
2836 /* Set BS = 1 i.e reuse input buffers as output buffers */
2837 DPAA2_SET_FLC_REUSE_BS(flc);
2838 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2839 DPAA2_SET_FLC_REUSE_FF(flc);
2841 session->ctxt = priv;
2845 rte_free(session->auth_key.data);
2846 rte_free(session->cipher_key.data);
2852 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2853 struct rte_security_session_conf *conf,
2856 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2857 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2858 struct rte_crypto_auth_xform *auth_xform = NULL;
2859 struct rte_crypto_cipher_xform *cipher_xform;
2860 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2861 struct ctxt_priv *priv;
2862 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2863 struct alginfo authdata, cipherdata;
2864 struct alginfo *p_authdata = NULL;
2866 struct sec_flow_context *flc;
2867 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2873 PMD_INIT_FUNC_TRACE();
2875 memset(session, 0, sizeof(dpaa2_sec_session));
2877 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2878 sizeof(struct ctxt_priv) +
2879 sizeof(struct sec_flc_desc),
2880 RTE_CACHE_LINE_SIZE);
2883 DPAA2_SEC_ERR("No memory for priv CTXT");
2887 priv->fle_pool = dev_priv->fle_pool;
2888 flc = &priv->flc_desc[0].flc;
2890 /* find xfrm types */
2891 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2892 cipher_xform = &xform->cipher;
2893 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2894 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2895 session->ext_params.aead_ctxt.auth_cipher_text = true;
2896 cipher_xform = &xform->cipher;
2897 auth_xform = &xform->next->auth;
2898 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2899 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2900 session->ext_params.aead_ctxt.auth_cipher_text = false;
2901 cipher_xform = &xform->next->cipher;
2902 auth_xform = &xform->auth;
2904 DPAA2_SEC_ERR("Invalid crypto type");
2908 session->ctxt_type = DPAA2_SEC_PDCP;
2910 session->cipher_key.data = rte_zmalloc(NULL,
2911 cipher_xform->key.length,
2912 RTE_CACHE_LINE_SIZE);
2913 if (session->cipher_key.data == NULL &&
2914 cipher_xform->key.length > 0) {
2915 DPAA2_SEC_ERR("No Memory for cipher key");
2919 session->cipher_key.length = cipher_xform->key.length;
2920 memcpy(session->cipher_key.data, cipher_xform->key.data,
2921 cipher_xform->key.length);
2923 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2925 session->cipher_alg = cipher_xform->algo;
2927 session->cipher_key.data = NULL;
2928 session->cipher_key.length = 0;
2929 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2930 session->dir = DIR_ENC;
2933 session->pdcp.domain = pdcp_xform->domain;
2934 session->pdcp.bearer = pdcp_xform->bearer;
2935 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2936 session->pdcp.sn_size = pdcp_xform->sn_size;
2937 session->pdcp.hfn = pdcp_xform->hfn;
2938 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2939 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2940 /* hfv ovd offset location is stored in iv.offset value*/
2941 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2943 cipherdata.key = (size_t)session->cipher_key.data;
2944 cipherdata.keylen = session->cipher_key.length;
2945 cipherdata.key_enc_flags = 0;
2946 cipherdata.key_type = RTA_DATA_IMM;
2948 switch (session->cipher_alg) {
2949 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2950 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2952 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2953 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2955 case RTE_CRYPTO_CIPHER_AES_CTR:
2956 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2958 case RTE_CRYPTO_CIPHER_NULL:
2959 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2962 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2963 session->cipher_alg);
2968 session->auth_key.data = rte_zmalloc(NULL,
2969 auth_xform->key.length,
2970 RTE_CACHE_LINE_SIZE);
2971 if (!session->auth_key.data &&
2972 auth_xform->key.length > 0) {
2973 DPAA2_SEC_ERR("No Memory for auth key");
2974 rte_free(session->cipher_key.data);
2978 session->auth_key.length = auth_xform->key.length;
2979 memcpy(session->auth_key.data, auth_xform->key.data,
2980 auth_xform->key.length);
2981 session->auth_alg = auth_xform->algo;
2983 session->auth_key.data = NULL;
2984 session->auth_key.length = 0;
2985 session->auth_alg = 0;
2987 authdata.key = (size_t)session->auth_key.data;
2988 authdata.keylen = session->auth_key.length;
2989 authdata.key_enc_flags = 0;
2990 authdata.key_type = RTA_DATA_IMM;
2992 if (session->auth_alg) {
2993 switch (session->auth_alg) {
2994 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2995 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2997 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2998 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3000 case RTE_CRYPTO_AUTH_AES_CMAC:
3001 authdata.algtype = PDCP_AUTH_TYPE_AES;
3003 case RTE_CRYPTO_AUTH_NULL:
3004 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3007 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3012 p_authdata = &authdata;
3013 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3014 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3018 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3019 if (session->dir == DIR_ENC)
3020 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3021 priv->flc_desc[0].desc, 1, swap,
3023 session->pdcp.sn_size,
3025 pdcp_xform->pkt_dir,
3026 pdcp_xform->hfn_threshold,
3027 &cipherdata, &authdata,
3029 else if (session->dir == DIR_DEC)
3030 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3031 priv->flc_desc[0].desc, 1, swap,
3033 session->pdcp.sn_size,
3035 pdcp_xform->pkt_dir,
3036 pdcp_xform->hfn_threshold,
3037 &cipherdata, &authdata,
3040 if (session->dir == DIR_ENC)
3041 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3042 priv->flc_desc[0].desc, 1, swap,
3043 session->pdcp.sn_size,
3046 pdcp_xform->pkt_dir,
3047 pdcp_xform->hfn_threshold,
3048 &cipherdata, p_authdata, 0);
3049 else if (session->dir == DIR_DEC)
3050 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3051 priv->flc_desc[0].desc, 1, swap,
3052 session->pdcp.sn_size,
3055 pdcp_xform->pkt_dir,
3056 pdcp_xform->hfn_threshold,
3057 &cipherdata, p_authdata, 0);
3061 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3065 /* Enable the stashing control bit */
3066 DPAA2_SET_FLC_RSC(flc);
3067 flc->word2_rflc_31_0 = lower_32_bits(
3068 (size_t)&(((struct dpaa2_sec_qp *)
3069 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3070 flc->word3_rflc_63_32 = upper_32_bits(
3071 (size_t)&(((struct dpaa2_sec_qp *)
3072 dev->data->queue_pairs[0])->rx_vq));
3074 flc->word1_sdl = (uint8_t)bufsize;
3076 /* TODO - check the perf impact or
3077 * align as per descriptor type
3078 * Set EWS bit i.e. enable write-safe
3079 * DPAA2_SET_FLC_EWS(flc);
3082 /* Set BS = 1 i.e reuse input buffers as output buffers */
3083 DPAA2_SET_FLC_REUSE_BS(flc);
3084 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3085 DPAA2_SET_FLC_REUSE_FF(flc);
3087 session->ctxt = priv;
3091 rte_free(session->auth_key.data);
3092 rte_free(session->cipher_key.data);
3098 dpaa2_sec_security_session_create(void *dev,
3099 struct rte_security_session_conf *conf,
3100 struct rte_security_session *sess,
3101 struct rte_mempool *mempool)
3103 void *sess_private_data;
3104 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3107 if (rte_mempool_get(mempool, &sess_private_data)) {
3108 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3112 switch (conf->protocol) {
3113 case RTE_SECURITY_PROTOCOL_IPSEC:
3114 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3117 case RTE_SECURITY_PROTOCOL_MACSEC:
3119 case RTE_SECURITY_PROTOCOL_PDCP:
3120 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3127 DPAA2_SEC_ERR("Failed to configure session parameters");
3128 /* Return session to mempool */
3129 rte_mempool_put(mempool, sess_private_data);
3133 set_sec_session_private_data(sess, sess_private_data);
3138 /** Clear the memory of session so it doesn't leave key material behind */
3140 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3141 struct rte_security_session *sess)
3143 PMD_INIT_FUNC_TRACE();
3144 void *sess_priv = get_sec_session_private_data(sess);
3146 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3149 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3152 rte_free(s->cipher_key.data);
3153 rte_free(s->auth_key.data);
3154 memset(s, 0, sizeof(dpaa2_sec_session));
3155 set_sec_session_private_data(sess, NULL);
3156 rte_mempool_put(sess_mp, sess_priv);
3162 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3163 struct rte_crypto_sym_xform *xform,
3164 struct rte_cryptodev_sym_session *sess,
3165 struct rte_mempool *mempool)
3167 void *sess_private_data;
3170 if (rte_mempool_get(mempool, &sess_private_data)) {
3171 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3175 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3177 DPAA2_SEC_ERR("Failed to configure session parameters");
3178 /* Return session to mempool */
3179 rte_mempool_put(mempool, sess_private_data);
3183 set_sym_session_private_data(sess, dev->driver_id,
3189 /** Clear the memory of session so it doesn't leave key material behind */
3191 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3192 struct rte_cryptodev_sym_session *sess)
3194 PMD_INIT_FUNC_TRACE();
3195 uint8_t index = dev->driver_id;
3196 void *sess_priv = get_sym_session_private_data(sess, index);
3197 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3201 rte_free(s->cipher_key.data);
3202 rte_free(s->auth_key.data);
3203 memset(s, 0, sizeof(dpaa2_sec_session));
3204 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3205 set_sym_session_private_data(sess, index, NULL);
3206 rte_mempool_put(sess_mp, sess_priv);
3211 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3212 struct rte_cryptodev_config *config __rte_unused)
3214 PMD_INIT_FUNC_TRACE();
3220 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3222 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3223 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3224 struct dpseci_attr attr;
3225 struct dpaa2_queue *dpaa2_q;
3226 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3227 dev->data->queue_pairs;
3228 struct dpseci_rx_queue_attr rx_attr;
3229 struct dpseci_tx_queue_attr tx_attr;
3232 PMD_INIT_FUNC_TRACE();
3234 memset(&attr, 0, sizeof(struct dpseci_attr));
3236 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3238 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3240 goto get_attr_failure;
3242 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3244 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3245 goto get_attr_failure;
3247 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3248 dpaa2_q = &qp[i]->rx_vq;
3249 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3251 dpaa2_q->fqid = rx_attr.fqid;
3252 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3254 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3255 dpaa2_q = &qp[i]->tx_vq;
3256 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3258 dpaa2_q->fqid = tx_attr.fqid;
3259 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3264 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3269 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3271 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3272 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3275 PMD_INIT_FUNC_TRACE();
3277 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3279 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3284 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3286 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3292 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3294 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3295 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3298 PMD_INIT_FUNC_TRACE();
3300 /* Function is reverse of dpaa2_sec_dev_init.
3301 * It does the following:
3302 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3303 * 2. Close the DPSECI device
3304 * 3. Free the allocated resources.
3307 /*Close the device at underlying layer*/
3308 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3310 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3314 /*Free the allocated memory for ethernet private data and dpseci*/
3322 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3323 struct rte_cryptodev_info *info)
3325 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3327 PMD_INIT_FUNC_TRACE();
3329 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3330 info->feature_flags = dev->feature_flags;
3331 info->capabilities = dpaa2_sec_capabilities;
3332 /* No limit of number of sessions */
3333 info->sym.max_nb_sessions = 0;
3334 info->driver_id = cryptodev_driver_id;
3339 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3340 struct rte_cryptodev_stats *stats)
3342 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3343 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3344 struct dpseci_sec_counters counters = {0};
3345 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3346 dev->data->queue_pairs;
3349 PMD_INIT_FUNC_TRACE();
3350 if (stats == NULL) {
3351 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3354 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3355 if (qp[i] == NULL) {
3356 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3360 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3361 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3362 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3363 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3366 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3369 DPAA2_SEC_ERR("SEC counters failed");
3371 DPAA2_SEC_INFO("dpseci hardware stats:"
3372 "\n\tNum of Requests Dequeued = %" PRIu64
3373 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3374 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3375 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3376 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3377 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3378 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3379 counters.dequeued_requests,
3380 counters.ob_enc_requests,
3381 counters.ib_dec_requests,
3382 counters.ob_enc_bytes,
3383 counters.ob_prot_bytes,
3384 counters.ib_dec_bytes,
3385 counters.ib_valid_bytes);
3390 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3393 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3394 (dev->data->queue_pairs);
3396 PMD_INIT_FUNC_TRACE();
3398 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3399 if (qp[i] == NULL) {
3400 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3403 qp[i]->tx_vq.rx_pkts = 0;
3404 qp[i]->tx_vq.tx_pkts = 0;
3405 qp[i]->tx_vq.err_pkts = 0;
3406 qp[i]->rx_vq.rx_pkts = 0;
3407 qp[i]->rx_vq.tx_pkts = 0;
3408 qp[i]->rx_vq.err_pkts = 0;
3412 static void __attribute__((hot))
3413 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3414 const struct qbman_fd *fd,
3415 const struct qbman_result *dq,
3416 struct dpaa2_queue *rxq,
3417 struct rte_event *ev)
3419 /* Prefetching mbuf */
3420 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3421 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3423 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3424 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3426 ev->flow_id = rxq->ev.flow_id;
3427 ev->sub_event_type = rxq->ev.sub_event_type;
3428 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3429 ev->op = RTE_EVENT_OP_NEW;
3430 ev->sched_type = rxq->ev.sched_type;
3431 ev->queue_id = rxq->ev.queue_id;
3432 ev->priority = rxq->ev.priority;
3433 ev->event_ptr = sec_fd_to_mbuf(fd);
3435 qbman_swp_dqrr_consume(swp, dq);
3438 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3439 const struct qbman_fd *fd,
3440 const struct qbman_result *dq,
3441 struct dpaa2_queue *rxq,
3442 struct rte_event *ev)
3445 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3446 /* Prefetching mbuf */
3447 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3448 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3450 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3451 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3453 ev->flow_id = rxq->ev.flow_id;
3454 ev->sub_event_type = rxq->ev.sub_event_type;
3455 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3456 ev->op = RTE_EVENT_OP_NEW;
3457 ev->sched_type = rxq->ev.sched_type;
3458 ev->queue_id = rxq->ev.queue_id;
3459 ev->priority = rxq->ev.priority;
3461 ev->event_ptr = sec_fd_to_mbuf(fd);
3462 dqrr_index = qbman_get_dqrr_idx(dq);
3463 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3464 DPAA2_PER_LCORE_DQRR_SIZE++;
3465 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3466 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3470 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3473 const struct rte_event *event)
3475 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3476 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3477 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3478 struct dpseci_rx_queue_cfg cfg;
3481 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3482 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3483 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3484 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3488 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3489 cfg.options = DPSECI_QUEUE_OPT_DEST;
3490 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3491 cfg.dest_cfg.dest_id = dpcon_id;
3492 cfg.dest_cfg.priority = event->priority;
3494 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3495 cfg.user_ctx = (size_t)(qp);
3496 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3497 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3498 cfg.order_preservation_en = 1;
3500 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3503 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3507 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3513 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3516 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3517 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3518 struct dpseci_rx_queue_cfg cfg;
3521 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3522 cfg.options = DPSECI_QUEUE_OPT_DEST;
3523 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3525 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3528 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3533 static struct rte_cryptodev_ops crypto_ops = {
3534 .dev_configure = dpaa2_sec_dev_configure,
3535 .dev_start = dpaa2_sec_dev_start,
3536 .dev_stop = dpaa2_sec_dev_stop,
3537 .dev_close = dpaa2_sec_dev_close,
3538 .dev_infos_get = dpaa2_sec_dev_infos_get,
3539 .stats_get = dpaa2_sec_stats_get,
3540 .stats_reset = dpaa2_sec_stats_reset,
3541 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3542 .queue_pair_release = dpaa2_sec_queue_pair_release,
3543 .queue_pair_count = dpaa2_sec_queue_pair_count,
3544 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3545 .sym_session_configure = dpaa2_sec_sym_session_configure,
3546 .sym_session_clear = dpaa2_sec_sym_session_clear,
3549 static const struct rte_security_capability *
3550 dpaa2_sec_capabilities_get(void *device __rte_unused)
3552 return dpaa2_sec_security_cap;
3555 static const struct rte_security_ops dpaa2_sec_security_ops = {
3556 .session_create = dpaa2_sec_security_session_create,
3557 .session_update = NULL,
3558 .session_stats_get = NULL,
3559 .session_destroy = dpaa2_sec_security_session_destroy,
3560 .set_pkt_metadata = NULL,
3561 .capabilities_get = dpaa2_sec_capabilities_get
3565 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3567 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3569 rte_free(dev->security_ctx);
3571 rte_mempool_free(internals->fle_pool);
3573 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3574 dev->data->name, rte_socket_id());
3580 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3582 struct dpaa2_sec_dev_private *internals;
3583 struct rte_device *dev = cryptodev->device;
3584 struct rte_dpaa2_device *dpaa2_dev;
3585 struct rte_security_ctx *security_instance;
3586 struct fsl_mc_io *dpseci;
3588 struct dpseci_attr attr;
3592 PMD_INIT_FUNC_TRACE();
3593 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3594 if (dpaa2_dev == NULL) {
3595 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3598 hw_id = dpaa2_dev->object_id;
3600 cryptodev->driver_id = cryptodev_driver_id;
3601 cryptodev->dev_ops = &crypto_ops;
3603 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3604 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3605 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3606 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3607 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3608 RTE_CRYPTODEV_FF_SECURITY |
3609 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3610 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3611 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3612 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3613 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3615 internals = cryptodev->data->dev_private;
3618 * For secondary processes, we don't initialise any further as primary
3619 * has already done this work. Only check we don't need a different
3622 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3623 DPAA2_SEC_DEBUG("Device already init by primary process");
3627 /* Initialize security_ctx only for primary process*/
3628 security_instance = rte_malloc("rte_security_instances_ops",
3629 sizeof(struct rte_security_ctx), 0);
3630 if (security_instance == NULL)
3632 security_instance->device = (void *)cryptodev;
3633 security_instance->ops = &dpaa2_sec_security_ops;
3634 security_instance->sess_cnt = 0;
3635 cryptodev->security_ctx = security_instance;
3637 /*Open the rte device via MC and save the handle for further use*/
3638 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3639 sizeof(struct fsl_mc_io), 0);
3642 "Error in allocating the memory for dpsec object");
3645 dpseci->regs = rte_mcp_ptr_list[0];
3647 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3649 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3653 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3656 "Cannot get dpsec device attributed: Error = %x",
3660 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3663 internals->max_nb_queue_pairs = attr.num_tx_queues;
3664 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3665 internals->hw = dpseci;
3666 internals->token = token;
3668 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3669 getpid(), cryptodev->data->dev_id);
3670 internals->fle_pool = rte_mempool_create((const char *)str,
3673 FLE_POOL_CACHE_SIZE, 0,
3674 NULL, NULL, NULL, NULL,
3676 if (!internals->fle_pool) {
3677 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3681 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3685 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3687 /* dpaa2_sec_uninit(crypto_dev_name); */
3692 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3693 struct rte_dpaa2_device *dpaa2_dev)
3695 struct rte_cryptodev *cryptodev;
3696 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3700 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3701 dpaa2_dev->object_id);
3703 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3704 if (cryptodev == NULL)
3707 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3708 cryptodev->data->dev_private = rte_zmalloc_socket(
3709 "cryptodev private structure",
3710 sizeof(struct dpaa2_sec_dev_private),
3711 RTE_CACHE_LINE_SIZE,
3714 if (cryptodev->data->dev_private == NULL)
3715 rte_panic("Cannot allocate memzone for private "
3719 dpaa2_dev->cryptodev = cryptodev;
3720 cryptodev->device = &dpaa2_dev->device;
3722 /* init user callbacks */
3723 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3725 if (dpaa2_svr_family == SVR_LX2160A)
3726 rta_set_sec_era(RTA_SEC_ERA_10);
3728 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3730 /* Invoke PMD device initialization function */
3731 retval = dpaa2_sec_dev_init(cryptodev);
3735 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3736 rte_free(cryptodev->data->dev_private);
3738 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3744 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3746 struct rte_cryptodev *cryptodev;
3749 cryptodev = dpaa2_dev->cryptodev;
3750 if (cryptodev == NULL)
3753 ret = dpaa2_sec_uninit(cryptodev);
3757 return rte_cryptodev_pmd_destroy(cryptodev);
3760 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3761 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3762 .drv_type = DPAA2_CRYPTO,
3764 .name = "DPAA2 SEC PMD"
3766 .probe = cryptodev_dpaa2_sec_probe,
3767 .remove = cryptodev_dpaa2_sec_remove,
3770 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3772 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3773 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3774 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3776 RTE_INIT(dpaa2_sec_init_log)
3778 /* Bus level logs */
3779 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3780 if (dpaa2_logtype_sec >= 0)
3781 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);