1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
37 typedef uint64_t dma_addr_t;
39 /* RTA header files */
40 #include <desc/ipsec.h>
41 #include <desc/pdcp.h>
42 #include <desc/algo.h>
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45 * a pointer to the shared descriptor
47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID 0x1957
49 #define FSL_DEVICE_ID 0x410
50 #define FSL_SUBSYSTEM_SEC 1
51 #define FSL_MC_DPSECI_DEVID 3
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS 32000
56 #define FLE_POOL_BUF_SIZE 256
57 #define FLE_POOL_CACHE_SIZE 512
58 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND -114
60 #define SEC_FLC_DHR_INBOUND 0
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
64 static uint8_t cryptodev_driver_id;
66 int dpaa2_logtype_sec;
68 #ifdef RTE_LIBRTE_SECURITY
70 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
71 struct rte_crypto_op *op,
72 struct qbman_fd *fd, uint16_t bpid)
74 struct rte_crypto_sym_op *sym_op = op->sym;
75 struct ctxt_priv *priv = sess->ctxt;
76 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
77 struct sec_flow_context *flc;
78 struct rte_mbuf *mbuf;
79 uint32_t in_len = 0, out_len = 0;
86 /* first FLE entry used to store mbuf and session ctxt */
87 fle = (struct qbman_fle *)rte_malloc(NULL,
88 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
91 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
94 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
95 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
96 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
98 /* Save the shared descriptor */
99 flc = &priv->flc_desc[0].flc;
105 if (likely(bpid < MAX_BPID)) {
106 DPAA2_SET_FD_BPID(fd, bpid);
107 DPAA2_SET_FLE_BPID(op_fle, bpid);
108 DPAA2_SET_FLE_BPID(ip_fle, bpid);
110 DPAA2_SET_FD_IVP(fd);
111 DPAA2_SET_FLE_IVP(op_fle);
112 DPAA2_SET_FLE_IVP(ip_fle);
115 /* Configure FD as a FRAME LIST */
116 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
117 DPAA2_SET_FD_COMPOUND_FMT(fd);
118 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
120 /* Configure Output FLE with Scatter/Gather Entry */
121 DPAA2_SET_FLE_SG_EXT(op_fle);
122 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
124 /* Configure Output SGE for Encap/Decap */
125 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
126 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 sge->length = mbuf->data_len;
130 out_len += sge->length;
133 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
134 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
136 /* using buf_len for last buf - so that extra data can be added */
137 sge->length = mbuf->buf_len - mbuf->data_off;
138 out_len += sge->length;
140 DPAA2_SET_FLE_FIN(sge);
141 op_fle->length = out_len;
144 mbuf = sym_op->m_src;
146 /* Configure Input FLE with Scatter/Gather Entry */
147 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
148 DPAA2_SET_FLE_SG_EXT(ip_fle);
149 DPAA2_SET_FLE_FIN(ip_fle);
151 /* Configure input SGE for Encap/Decap */
152 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
153 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
154 sge->length = mbuf->data_len;
155 in_len += sge->length;
161 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
162 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
163 sge->length = mbuf->data_len;
164 in_len += sge->length;
167 ip_fle->length = in_len;
168 DPAA2_SET_FLE_FIN(sge);
170 /* In case of PDCP, per packet HFN is stored in
171 * mbuf priv after sym_op.
173 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
174 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
175 /*enable HFN override override */
176 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
177 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
178 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
180 DPAA2_SET_FD_LEN(fd, ip_fle->length);
186 build_proto_compound_fd(dpaa2_sec_session *sess,
187 struct rte_crypto_op *op,
188 struct qbman_fd *fd, uint16_t bpid)
190 struct rte_crypto_sym_op *sym_op = op->sym;
191 struct ctxt_priv *priv = sess->ctxt;
192 struct qbman_fle *fle, *ip_fle, *op_fle;
193 struct sec_flow_context *flc;
194 struct rte_mbuf *src_mbuf = sym_op->m_src;
195 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
201 /* Save the shared descriptor */
202 flc = &priv->flc_desc[0].flc;
204 /* we are using the first FLE entry to store Mbuf */
205 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
207 DPAA2_SEC_DP_ERR("Memory alloc failed");
210 memset(fle, 0, FLE_POOL_BUF_SIZE);
211 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
212 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
217 if (likely(bpid < MAX_BPID)) {
218 DPAA2_SET_FD_BPID(fd, bpid);
219 DPAA2_SET_FLE_BPID(op_fle, bpid);
220 DPAA2_SET_FLE_BPID(ip_fle, bpid);
222 DPAA2_SET_FD_IVP(fd);
223 DPAA2_SET_FLE_IVP(op_fle);
224 DPAA2_SET_FLE_IVP(ip_fle);
227 /* Configure FD as a FRAME LIST */
228 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
229 DPAA2_SET_FD_COMPOUND_FMT(fd);
230 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
232 /* Configure Output FLE with dst mbuf data */
233 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
234 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
235 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
237 /* Configure Input FLE with src mbuf data */
238 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
239 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
240 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
242 DPAA2_SET_FD_LEN(fd, ip_fle->length);
243 DPAA2_SET_FLE_FIN(ip_fle);
245 /* In case of PDCP, per packet HFN is stored in
246 * mbuf priv after sym_op.
248 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
249 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
250 /*enable HFN override override */
251 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
252 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
253 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
261 build_proto_fd(dpaa2_sec_session *sess,
262 struct rte_crypto_op *op,
263 struct qbman_fd *fd, uint16_t bpid)
265 struct rte_crypto_sym_op *sym_op = op->sym;
267 return build_proto_compound_fd(sess, op, fd, bpid);
269 struct ctxt_priv *priv = sess->ctxt;
270 struct sec_flow_context *flc;
271 struct rte_mbuf *mbuf = sym_op->m_src;
273 if (likely(bpid < MAX_BPID))
274 DPAA2_SET_FD_BPID(fd, bpid);
276 DPAA2_SET_FD_IVP(fd);
278 /* Save the shared descriptor */
279 flc = &priv->flc_desc[0].flc;
281 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
282 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
283 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
284 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
286 /* save physical address of mbuf */
287 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
288 mbuf->buf_iova = (size_t)op;
295 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
296 struct rte_crypto_op *op,
297 struct qbman_fd *fd, __rte_unused uint16_t bpid)
299 struct rte_crypto_sym_op *sym_op = op->sym;
300 struct ctxt_priv *priv = sess->ctxt;
301 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
302 struct sec_flow_context *flc;
303 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
304 int icv_len = sess->digest_length;
306 struct rte_mbuf *mbuf;
307 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
311 mbuf = sym_op->m_dst;
313 mbuf = sym_op->m_src;
315 /* first FLE entry used to store mbuf and session ctxt */
316 fle = (struct qbman_fle *)rte_malloc(NULL,
317 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
318 RTE_CACHE_LINE_SIZE);
319 if (unlikely(!fle)) {
320 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
323 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
324 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
325 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
331 /* Save the shared descriptor */
332 flc = &priv->flc_desc[0].flc;
334 /* Configure FD as a FRAME LIST */
335 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
336 DPAA2_SET_FD_COMPOUND_FMT(fd);
337 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
339 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
340 "iv-len=%d data_off: 0x%x\n",
341 sym_op->aead.data.offset,
342 sym_op->aead.data.length,
345 sym_op->m_src->data_off);
347 /* Configure Output FLE with Scatter/Gather Entry */
348 DPAA2_SET_FLE_SG_EXT(op_fle);
349 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
352 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
354 op_fle->length = (sess->dir == DIR_ENC) ?
355 (sym_op->aead.data.length + icv_len) :
356 sym_op->aead.data.length;
358 /* Configure Output SGE for Encap/Decap */
359 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
360 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
361 sge->length = mbuf->data_len - sym_op->aead.data.offset;
367 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
368 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
369 sge->length = mbuf->data_len;
372 sge->length -= icv_len;
374 if (sess->dir == DIR_ENC) {
376 DPAA2_SET_FLE_ADDR(sge,
377 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
378 sge->length = icv_len;
380 DPAA2_SET_FLE_FIN(sge);
383 mbuf = sym_op->m_src;
385 /* Configure Input FLE with Scatter/Gather Entry */
386 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
387 DPAA2_SET_FLE_SG_EXT(ip_fle);
388 DPAA2_SET_FLE_FIN(ip_fle);
389 ip_fle->length = (sess->dir == DIR_ENC) ?
390 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
391 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
394 /* Configure Input SGE for Encap/Decap */
395 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
396 sge->length = sess->iv.length;
400 DPAA2_SET_FLE_ADDR(sge,
401 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
402 sge->length = auth_only_len;
406 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
407 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
409 sge->length = mbuf->data_len - sym_op->aead.data.offset;
415 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
416 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
417 sge->length = mbuf->data_len;
421 if (sess->dir == DIR_DEC) {
423 old_icv = (uint8_t *)(sge + 1);
424 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
425 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
426 sge->length = icv_len;
429 DPAA2_SET_FLE_FIN(sge);
431 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
432 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
434 DPAA2_SET_FD_LEN(fd, ip_fle->length);
440 build_authenc_gcm_fd(dpaa2_sec_session *sess,
441 struct rte_crypto_op *op,
442 struct qbman_fd *fd, uint16_t bpid)
444 struct rte_crypto_sym_op *sym_op = op->sym;
445 struct ctxt_priv *priv = sess->ctxt;
446 struct qbman_fle *fle, *sge;
447 struct sec_flow_context *flc;
448 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
449 int icv_len = sess->digest_length, retval;
451 struct rte_mbuf *dst;
452 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
460 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
461 * Currently we donot know which FLE has the mbuf stored.
462 * So while retreiving we can go back 1 FLE from the FD -ADDR
463 * to get the MBUF Addr from the previous FLE.
464 * We can have a better approach to use the inline Mbuf
466 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
468 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
471 memset(fle, 0, FLE_POOL_BUF_SIZE);
472 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
473 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
476 if (likely(bpid < MAX_BPID)) {
477 DPAA2_SET_FD_BPID(fd, bpid);
478 DPAA2_SET_FLE_BPID(fle, bpid);
479 DPAA2_SET_FLE_BPID(fle + 1, bpid);
480 DPAA2_SET_FLE_BPID(sge, bpid);
481 DPAA2_SET_FLE_BPID(sge + 1, bpid);
482 DPAA2_SET_FLE_BPID(sge + 2, bpid);
483 DPAA2_SET_FLE_BPID(sge + 3, bpid);
485 DPAA2_SET_FD_IVP(fd);
486 DPAA2_SET_FLE_IVP(fle);
487 DPAA2_SET_FLE_IVP((fle + 1));
488 DPAA2_SET_FLE_IVP(sge);
489 DPAA2_SET_FLE_IVP((sge + 1));
490 DPAA2_SET_FLE_IVP((sge + 2));
491 DPAA2_SET_FLE_IVP((sge + 3));
494 /* Save the shared descriptor */
495 flc = &priv->flc_desc[0].flc;
496 /* Configure FD as a FRAME LIST */
497 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
498 DPAA2_SET_FD_COMPOUND_FMT(fd);
499 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
501 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
502 "iv-len=%d data_off: 0x%x\n",
503 sym_op->aead.data.offset,
504 sym_op->aead.data.length,
507 sym_op->m_src->data_off);
509 /* Configure Output FLE with Scatter/Gather Entry */
510 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
512 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
513 fle->length = (sess->dir == DIR_ENC) ?
514 (sym_op->aead.data.length + icv_len) :
515 sym_op->aead.data.length;
517 DPAA2_SET_FLE_SG_EXT(fle);
519 /* Configure Output SGE for Encap/Decap */
520 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
521 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
522 sge->length = sym_op->aead.data.length;
524 if (sess->dir == DIR_ENC) {
526 DPAA2_SET_FLE_ADDR(sge,
527 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
528 sge->length = sess->digest_length;
530 DPAA2_SET_FLE_FIN(sge);
535 /* Configure Input FLE with Scatter/Gather Entry */
536 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
537 DPAA2_SET_FLE_SG_EXT(fle);
538 DPAA2_SET_FLE_FIN(fle);
539 fle->length = (sess->dir == DIR_ENC) ?
540 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
541 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
542 sess->digest_length);
544 /* Configure Input SGE for Encap/Decap */
545 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
546 sge->length = sess->iv.length;
549 DPAA2_SET_FLE_ADDR(sge,
550 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
551 sge->length = auth_only_len;
552 DPAA2_SET_FLE_BPID(sge, bpid);
556 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
557 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
558 sym_op->m_src->data_off);
559 sge->length = sym_op->aead.data.length;
560 if (sess->dir == DIR_DEC) {
562 old_icv = (uint8_t *)(sge + 1);
563 memcpy(old_icv, sym_op->aead.digest.data,
564 sess->digest_length);
565 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
566 sge->length = sess->digest_length;
568 DPAA2_SET_FLE_FIN(sge);
571 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
572 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
575 DPAA2_SET_FD_LEN(fd, fle->length);
580 build_authenc_sg_fd(dpaa2_sec_session *sess,
581 struct rte_crypto_op *op,
582 struct qbman_fd *fd, __rte_unused uint16_t bpid)
584 struct rte_crypto_sym_op *sym_op = op->sym;
585 struct ctxt_priv *priv = sess->ctxt;
586 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
587 struct sec_flow_context *flc;
588 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
589 sym_op->auth.data.offset;
590 uint16_t auth_tail_len = sym_op->auth.data.length -
591 sym_op->cipher.data.length - auth_hdr_len;
592 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
593 int icv_len = sess->digest_length;
595 struct rte_mbuf *mbuf;
596 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
600 mbuf = sym_op->m_dst;
602 mbuf = sym_op->m_src;
604 /* first FLE entry used to store mbuf and session ctxt */
605 fle = (struct qbman_fle *)rte_malloc(NULL,
606 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
607 RTE_CACHE_LINE_SIZE);
608 if (unlikely(!fle)) {
609 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
612 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
613 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
614 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
620 /* Save the shared descriptor */
621 flc = &priv->flc_desc[0].flc;
623 /* Configure FD as a FRAME LIST */
624 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
625 DPAA2_SET_FD_COMPOUND_FMT(fd);
626 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
629 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
630 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
631 sym_op->auth.data.offset,
632 sym_op->auth.data.length,
634 sym_op->cipher.data.offset,
635 sym_op->cipher.data.length,
637 sym_op->m_src->data_off);
639 /* Configure Output FLE with Scatter/Gather Entry */
640 DPAA2_SET_FLE_SG_EXT(op_fle);
641 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
644 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
646 op_fle->length = (sess->dir == DIR_ENC) ?
647 (sym_op->cipher.data.length + icv_len) :
648 sym_op->cipher.data.length;
650 /* Configure Output SGE for Encap/Decap */
651 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
652 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
653 sge->length = mbuf->data_len - sym_op->auth.data.offset;
659 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
660 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
661 sge->length = mbuf->data_len;
664 sge->length -= icv_len;
666 if (sess->dir == DIR_ENC) {
668 DPAA2_SET_FLE_ADDR(sge,
669 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
670 sge->length = icv_len;
672 DPAA2_SET_FLE_FIN(sge);
675 mbuf = sym_op->m_src;
677 /* Configure Input FLE with Scatter/Gather Entry */
678 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
679 DPAA2_SET_FLE_SG_EXT(ip_fle);
680 DPAA2_SET_FLE_FIN(ip_fle);
681 ip_fle->length = (sess->dir == DIR_ENC) ?
682 (sym_op->auth.data.length + sess->iv.length) :
683 (sym_op->auth.data.length + sess->iv.length +
686 /* Configure Input SGE for Encap/Decap */
687 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
688 sge->length = sess->iv.length;
691 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
692 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
694 sge->length = mbuf->data_len - sym_op->auth.data.offset;
700 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
701 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
702 sge->length = mbuf->data_len;
705 sge->length -= icv_len;
707 if (sess->dir == DIR_DEC) {
709 old_icv = (uint8_t *)(sge + 1);
710 memcpy(old_icv, sym_op->auth.digest.data,
712 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
713 sge->length = icv_len;
716 DPAA2_SET_FLE_FIN(sge);
718 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
719 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
721 DPAA2_SET_FD_LEN(fd, ip_fle->length);
727 build_authenc_fd(dpaa2_sec_session *sess,
728 struct rte_crypto_op *op,
729 struct qbman_fd *fd, uint16_t bpid)
731 struct rte_crypto_sym_op *sym_op = op->sym;
732 struct ctxt_priv *priv = sess->ctxt;
733 struct qbman_fle *fle, *sge;
734 struct sec_flow_context *flc;
735 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
736 sym_op->auth.data.offset;
737 uint16_t auth_tail_len = sym_op->auth.data.length -
738 sym_op->cipher.data.length - auth_hdr_len;
739 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
741 int icv_len = sess->digest_length, retval;
743 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
745 struct rte_mbuf *dst;
752 /* we are using the first FLE entry to store Mbuf.
753 * Currently we donot know which FLE has the mbuf stored.
754 * So while retreiving we can go back 1 FLE from the FD -ADDR
755 * to get the MBUF Addr from the previous FLE.
756 * We can have a better approach to use the inline Mbuf
758 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
760 DPAA2_SEC_ERR("Memory alloc failed for SGE");
763 memset(fle, 0, FLE_POOL_BUF_SIZE);
764 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
765 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
768 if (likely(bpid < MAX_BPID)) {
769 DPAA2_SET_FD_BPID(fd, bpid);
770 DPAA2_SET_FLE_BPID(fle, bpid);
771 DPAA2_SET_FLE_BPID(fle + 1, bpid);
772 DPAA2_SET_FLE_BPID(sge, bpid);
773 DPAA2_SET_FLE_BPID(sge + 1, bpid);
774 DPAA2_SET_FLE_BPID(sge + 2, bpid);
775 DPAA2_SET_FLE_BPID(sge + 3, bpid);
777 DPAA2_SET_FD_IVP(fd);
778 DPAA2_SET_FLE_IVP(fle);
779 DPAA2_SET_FLE_IVP((fle + 1));
780 DPAA2_SET_FLE_IVP(sge);
781 DPAA2_SET_FLE_IVP((sge + 1));
782 DPAA2_SET_FLE_IVP((sge + 2));
783 DPAA2_SET_FLE_IVP((sge + 3));
786 /* Save the shared descriptor */
787 flc = &priv->flc_desc[0].flc;
788 /* Configure FD as a FRAME LIST */
789 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
790 DPAA2_SET_FD_COMPOUND_FMT(fd);
791 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
794 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
795 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
796 sym_op->auth.data.offset,
797 sym_op->auth.data.length,
799 sym_op->cipher.data.offset,
800 sym_op->cipher.data.length,
802 sym_op->m_src->data_off);
804 /* Configure Output FLE with Scatter/Gather Entry */
805 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
807 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
808 fle->length = (sess->dir == DIR_ENC) ?
809 (sym_op->cipher.data.length + icv_len) :
810 sym_op->cipher.data.length;
812 DPAA2_SET_FLE_SG_EXT(fle);
814 /* Configure Output SGE for Encap/Decap */
815 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
816 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
818 sge->length = sym_op->cipher.data.length;
820 if (sess->dir == DIR_ENC) {
822 DPAA2_SET_FLE_ADDR(sge,
823 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
824 sge->length = sess->digest_length;
825 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
828 DPAA2_SET_FLE_FIN(sge);
833 /* Configure Input FLE with Scatter/Gather Entry */
834 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
835 DPAA2_SET_FLE_SG_EXT(fle);
836 DPAA2_SET_FLE_FIN(fle);
837 fle->length = (sess->dir == DIR_ENC) ?
838 (sym_op->auth.data.length + sess->iv.length) :
839 (sym_op->auth.data.length + sess->iv.length +
840 sess->digest_length);
842 /* Configure Input SGE for Encap/Decap */
843 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
844 sge->length = sess->iv.length;
847 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
848 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
849 sym_op->m_src->data_off);
850 sge->length = sym_op->auth.data.length;
851 if (sess->dir == DIR_DEC) {
853 old_icv = (uint8_t *)(sge + 1);
854 memcpy(old_icv, sym_op->auth.digest.data,
855 sess->digest_length);
856 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
857 sge->length = sess->digest_length;
858 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
859 sess->digest_length +
862 DPAA2_SET_FLE_FIN(sge);
864 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
865 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
870 static inline int build_auth_sg_fd(
871 dpaa2_sec_session *sess,
872 struct rte_crypto_op *op,
874 __rte_unused uint16_t bpid)
876 struct rte_crypto_sym_op *sym_op = op->sym;
877 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
878 struct sec_flow_context *flc;
879 struct ctxt_priv *priv = sess->ctxt;
880 int data_len, data_offset;
882 struct rte_mbuf *mbuf;
884 data_len = sym_op->auth.data.length;
885 data_offset = sym_op->auth.data.offset;
887 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
888 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
889 if ((data_len & 7) || (data_offset & 7)) {
890 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
894 data_len = data_len >> 3;
895 data_offset = data_offset >> 3;
898 mbuf = sym_op->m_src;
899 fle = (struct qbman_fle *)rte_malloc(NULL,
900 FLE_SG_MEM_SIZE(mbuf->nb_segs),
901 RTE_CACHE_LINE_SIZE);
902 if (unlikely(!fle)) {
903 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
906 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
907 /* first FLE entry used to store mbuf and session ctxt */
908 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
909 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
914 flc = &priv->flc_desc[DESC_INITFINAL].flc;
916 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
917 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
918 DPAA2_SET_FD_COMPOUND_FMT(fd);
921 DPAA2_SET_FLE_ADDR(op_fle,
922 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
923 op_fle->length = sess->digest_length;
926 DPAA2_SET_FLE_SG_EXT(ip_fle);
927 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
928 ip_fle->length = data_len;
930 if (sess->iv.length) {
933 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
936 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
937 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
939 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
940 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
943 sge->length = sess->iv.length;
945 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
946 ip_fle->length += sge->length;
950 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
951 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
953 if (data_len <= (mbuf->data_len - data_offset)) {
954 sge->length = data_len;
957 sge->length = mbuf->data_len - data_offset;
959 /* remaining i/p segs */
960 while ((data_len = data_len - sge->length) &&
961 (mbuf = mbuf->next)) {
963 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
964 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
965 if (data_len > mbuf->data_len)
966 sge->length = mbuf->data_len;
968 sge->length = data_len;
972 if (sess->dir == DIR_DEC) {
973 /* Digest verification case */
975 old_digest = (uint8_t *)(sge + 1);
976 rte_memcpy(old_digest, sym_op->auth.digest.data,
977 sess->digest_length);
978 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
979 sge->length = sess->digest_length;
980 ip_fle->length += sess->digest_length;
982 DPAA2_SET_FLE_FIN(sge);
983 DPAA2_SET_FLE_FIN(ip_fle);
984 DPAA2_SET_FD_LEN(fd, ip_fle->length);
990 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
991 struct qbman_fd *fd, uint16_t bpid)
993 struct rte_crypto_sym_op *sym_op = op->sym;
994 struct qbman_fle *fle, *sge;
995 struct sec_flow_context *flc;
996 struct ctxt_priv *priv = sess->ctxt;
997 int data_len, data_offset;
1001 data_len = sym_op->auth.data.length;
1002 data_offset = sym_op->auth.data.offset;
1004 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1005 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1006 if ((data_len & 7) || (data_offset & 7)) {
1007 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1011 data_len = data_len >> 3;
1012 data_offset = data_offset >> 3;
1015 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1017 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1020 memset(fle, 0, FLE_POOL_BUF_SIZE);
1021 /* TODO we are using the first FLE entry to store Mbuf.
1022 * Currently we donot know which FLE has the mbuf stored.
1023 * So while retreiving we can go back 1 FLE from the FD -ADDR
1024 * to get the MBUF Addr from the previous FLE.
1025 * We can have a better approach to use the inline Mbuf
1027 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1028 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1032 if (likely(bpid < MAX_BPID)) {
1033 DPAA2_SET_FD_BPID(fd, bpid);
1034 DPAA2_SET_FLE_BPID(fle, bpid);
1035 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1036 DPAA2_SET_FLE_BPID(sge, bpid);
1037 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1039 DPAA2_SET_FD_IVP(fd);
1040 DPAA2_SET_FLE_IVP(fle);
1041 DPAA2_SET_FLE_IVP((fle + 1));
1042 DPAA2_SET_FLE_IVP(sge);
1043 DPAA2_SET_FLE_IVP((sge + 1));
1046 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1047 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1048 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1049 DPAA2_SET_FD_COMPOUND_FMT(fd);
1051 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1052 fle->length = sess->digest_length;
1055 /* Setting input FLE */
1056 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1057 DPAA2_SET_FLE_SG_EXT(fle);
1058 fle->length = data_len;
1060 if (sess->iv.length) {
1063 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1066 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1067 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1069 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1070 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1073 sge->length = sess->iv.length;
1076 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1077 fle->length = fle->length + sge->length;
1081 /* Setting data to authenticate */
1082 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1083 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1084 sge->length = data_len;
1086 if (sess->dir == DIR_DEC) {
1088 old_digest = (uint8_t *)(sge + 1);
1089 rte_memcpy(old_digest, sym_op->auth.digest.data,
1090 sess->digest_length);
1091 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1092 sge->length = sess->digest_length;
1093 fle->length = fle->length + sess->digest_length;
1096 DPAA2_SET_FLE_FIN(sge);
1097 DPAA2_SET_FLE_FIN(fle);
1098 DPAA2_SET_FD_LEN(fd, fle->length);
1104 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1105 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1107 struct rte_crypto_sym_op *sym_op = op->sym;
1108 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1109 int data_len, data_offset;
1110 struct sec_flow_context *flc;
1111 struct ctxt_priv *priv = sess->ctxt;
1112 struct rte_mbuf *mbuf;
1113 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1116 data_len = sym_op->cipher.data.length;
1117 data_offset = sym_op->cipher.data.offset;
1119 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1120 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1121 if ((data_len & 7) || (data_offset & 7)) {
1122 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1126 data_len = data_len >> 3;
1127 data_offset = data_offset >> 3;
1131 mbuf = sym_op->m_dst;
1133 mbuf = sym_op->m_src;
1135 /* first FLE entry used to store mbuf and session ctxt */
1136 fle = (struct qbman_fle *)rte_malloc(NULL,
1137 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1138 RTE_CACHE_LINE_SIZE);
1140 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1143 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1144 /* first FLE entry used to store mbuf and session ctxt */
1145 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1146 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1152 flc = &priv->flc_desc[0].flc;
1155 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1156 " data_off: 0x%x\n",
1160 sym_op->m_src->data_off);
1163 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1164 op_fle->length = data_len;
1165 DPAA2_SET_FLE_SG_EXT(op_fle);
1168 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1169 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1170 sge->length = mbuf->data_len - data_offset;
1176 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1177 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1178 sge->length = mbuf->data_len;
1181 DPAA2_SET_FLE_FIN(sge);
1184 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1185 flc, fle, fle->addr_hi, fle->addr_lo,
1189 mbuf = sym_op->m_src;
1191 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1192 ip_fle->length = sess->iv.length + data_len;
1193 DPAA2_SET_FLE_SG_EXT(ip_fle);
1196 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1197 DPAA2_SET_FLE_OFFSET(sge, 0);
1198 sge->length = sess->iv.length;
1203 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1204 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1205 sge->length = mbuf->data_len - data_offset;
1211 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1212 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1213 sge->length = mbuf->data_len;
1216 DPAA2_SET_FLE_FIN(sge);
1217 DPAA2_SET_FLE_FIN(ip_fle);
1220 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1221 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1222 DPAA2_SET_FD_COMPOUND_FMT(fd);
1223 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1226 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1227 " off =%d, len =%d\n",
1228 DPAA2_GET_FD_ADDR(fd),
1229 DPAA2_GET_FD_BPID(fd),
1230 rte_dpaa2_bpid_info[bpid].meta_data_size,
1231 DPAA2_GET_FD_OFFSET(fd),
1232 DPAA2_GET_FD_LEN(fd));
1237 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1238 struct qbman_fd *fd, uint16_t bpid)
1240 struct rte_crypto_sym_op *sym_op = op->sym;
1241 struct qbman_fle *fle, *sge;
1242 int retval, data_len, data_offset;
1243 struct sec_flow_context *flc;
1244 struct ctxt_priv *priv = sess->ctxt;
1245 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1247 struct rte_mbuf *dst;
1249 data_len = sym_op->cipher.data.length;
1250 data_offset = sym_op->cipher.data.offset;
1252 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1253 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1254 if ((data_len & 7) || (data_offset & 7)) {
1255 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1259 data_len = data_len >> 3;
1260 data_offset = data_offset >> 3;
1264 dst = sym_op->m_dst;
1266 dst = sym_op->m_src;
1268 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1270 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1273 memset(fle, 0, FLE_POOL_BUF_SIZE);
1274 /* TODO we are using the first FLE entry to store Mbuf.
1275 * Currently we donot know which FLE has the mbuf stored.
1276 * So while retreiving we can go back 1 FLE from the FD -ADDR
1277 * to get the MBUF Addr from the previous FLE.
1278 * We can have a better approach to use the inline Mbuf
1280 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1281 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1285 if (likely(bpid < MAX_BPID)) {
1286 DPAA2_SET_FD_BPID(fd, bpid);
1287 DPAA2_SET_FLE_BPID(fle, bpid);
1288 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1289 DPAA2_SET_FLE_BPID(sge, bpid);
1290 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1292 DPAA2_SET_FD_IVP(fd);
1293 DPAA2_SET_FLE_IVP(fle);
1294 DPAA2_SET_FLE_IVP((fle + 1));
1295 DPAA2_SET_FLE_IVP(sge);
1296 DPAA2_SET_FLE_IVP((sge + 1));
1299 flc = &priv->flc_desc[0].flc;
1300 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1301 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1302 DPAA2_SET_FD_COMPOUND_FMT(fd);
1303 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1306 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1307 " data_off: 0x%x\n",
1311 sym_op->m_src->data_off);
1313 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1314 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1316 fle->length = data_len + sess->iv.length;
1319 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1320 flc, fle, fle->addr_hi, fle->addr_lo,
1325 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1326 fle->length = data_len + sess->iv.length;
1328 DPAA2_SET_FLE_SG_EXT(fle);
1330 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1331 sge->length = sess->iv.length;
1334 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1335 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1337 sge->length = data_len;
1338 DPAA2_SET_FLE_FIN(sge);
1339 DPAA2_SET_FLE_FIN(fle);
1342 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1343 " off =%d, len =%d\n",
1344 DPAA2_GET_FD_ADDR(fd),
1345 DPAA2_GET_FD_BPID(fd),
1346 rte_dpaa2_bpid_info[bpid].meta_data_size,
1347 DPAA2_GET_FD_OFFSET(fd),
1348 DPAA2_GET_FD_LEN(fd));
1354 build_sec_fd(struct rte_crypto_op *op,
1355 struct qbman_fd *fd, uint16_t bpid)
1358 dpaa2_sec_session *sess;
1360 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1361 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1362 op->sym->session, cryptodev_driver_id);
1363 #ifdef RTE_LIBRTE_SECURITY
1364 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1365 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1366 op->sym->sec_session);
1371 /* Any of the buffer is segmented*/
1372 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1373 ((op->sym->m_dst != NULL) &&
1374 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1375 switch (sess->ctxt_type) {
1376 case DPAA2_SEC_CIPHER:
1377 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1379 case DPAA2_SEC_AUTH:
1380 ret = build_auth_sg_fd(sess, op, fd, bpid);
1382 case DPAA2_SEC_AEAD:
1383 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1385 case DPAA2_SEC_CIPHER_HASH:
1386 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1388 #ifdef RTE_LIBRTE_SECURITY
1389 case DPAA2_SEC_IPSEC:
1390 case DPAA2_SEC_PDCP:
1391 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1394 case DPAA2_SEC_HASH_CIPHER:
1396 DPAA2_SEC_ERR("error: Unsupported session");
1399 switch (sess->ctxt_type) {
1400 case DPAA2_SEC_CIPHER:
1401 ret = build_cipher_fd(sess, op, fd, bpid);
1403 case DPAA2_SEC_AUTH:
1404 ret = build_auth_fd(sess, op, fd, bpid);
1406 case DPAA2_SEC_AEAD:
1407 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1409 case DPAA2_SEC_CIPHER_HASH:
1410 ret = build_authenc_fd(sess, op, fd, bpid);
1412 #ifdef RTE_LIBRTE_SECURITY
1413 case DPAA2_SEC_IPSEC:
1414 ret = build_proto_fd(sess, op, fd, bpid);
1416 case DPAA2_SEC_PDCP:
1417 ret = build_proto_compound_fd(sess, op, fd, bpid);
1420 case DPAA2_SEC_HASH_CIPHER:
1422 DPAA2_SEC_ERR("error: Unsupported session");
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1432 /* Function to transmit the frames to given device and VQ*/
1435 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 uint32_t frames_to_send;
1437 struct qbman_eq_desc eqdesc;
1438 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 struct qbman_swp *swp;
1440 uint16_t num_tx = 0;
1441 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 /*todo - need to support multiple buffer pools */
1444 struct rte_mempool *mb_pool;
1446 if (unlikely(nb_ops == 0))
1449 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 DPAA2_SEC_ERR("sessionless crypto op not supported");
1453 /*Prepare enqueue descriptor*/
1454 qbman_eq_desc_clear(&eqdesc);
1455 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1459 if (!DPAA2_PER_LCORE_DPIO) {
1460 ret = dpaa2_affine_qbman_swp();
1462 DPAA2_SEC_ERR("Failure in affining portal");
1466 swp = DPAA2_PER_LCORE_PORTAL;
1469 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1470 dpaa2_eqcr_size : nb_ops;
1472 for (loop = 0; loop < frames_to_send; loop++) {
1473 if ((*ops)->sym->m_src->seqn) {
1474 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1476 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1477 DPAA2_PER_LCORE_DQRR_SIZE--;
1478 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1479 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1482 /*Clear the unused FD fields before sending*/
1483 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1484 mb_pool = (*ops)->sym->m_src->pool;
1485 bpid = mempool_to_bpid(mb_pool);
1486 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1488 DPAA2_SEC_ERR("error: Improper packet contents"
1489 " for crypto operation");
1495 while (loop < frames_to_send) {
1496 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1499 frames_to_send - loop);
1502 num_tx += frames_to_send;
1503 nb_ops -= frames_to_send;
1506 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1507 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1511 #ifdef RTE_LIBRTE_SECURITY
1512 static inline struct rte_crypto_op *
1513 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1515 struct rte_crypto_op *op;
1516 uint16_t len = DPAA2_GET_FD_LEN(fd);
1518 dpaa2_sec_session *sess_priv __rte_unused;
1520 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1521 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1522 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1524 diff = len - mbuf->pkt_len;
1525 mbuf->pkt_len += diff;
1526 mbuf->data_len += diff;
1527 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1528 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1529 op->sym->aead.digest.phys_addr = 0L;
1531 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1532 op->sym->sec_session);
1533 if (sess_priv->dir == DIR_ENC)
1534 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1536 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1542 static inline struct rte_crypto_op *
1543 sec_fd_to_mbuf(const struct qbman_fd *fd)
1545 struct qbman_fle *fle;
1546 struct rte_crypto_op *op;
1547 struct ctxt_priv *priv;
1548 struct rte_mbuf *dst, *src;
1550 #ifdef RTE_LIBRTE_SECURITY
1551 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1552 return sec_simple_fd_to_mbuf(fd);
1554 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1556 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1557 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1559 /* we are using the first FLE entry to store Mbuf.
1560 * Currently we donot know which FLE has the mbuf stored.
1561 * So while retreiving we can go back 1 FLE from the FD -ADDR
1562 * to get the MBUF Addr from the previous FLE.
1563 * We can have a better approach to use the inline Mbuf
1566 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1567 /* TODO complete it. */
1568 DPAA2_SEC_ERR("error: non inline buffer");
1571 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1574 src = op->sym->m_src;
1577 if (op->sym->m_dst) {
1578 dst = op->sym->m_dst;
1583 #ifdef RTE_LIBRTE_SECURITY
1584 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1585 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1586 get_sec_session_private_data(op->sym->sec_session);
1587 if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1588 sess->ctxt_type == DPAA2_SEC_PDCP) {
1589 uint16_t len = DPAA2_GET_FD_LEN(fd);
1591 while (dst->next != NULL) {
1592 len -= dst->data_len;
1595 dst->data_len = len;
1599 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1600 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1603 DPAA2_GET_FD_ADDR(fd),
1604 DPAA2_GET_FD_BPID(fd),
1605 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1606 DPAA2_GET_FD_OFFSET(fd),
1607 DPAA2_GET_FD_LEN(fd));
1609 /* free the fle memory */
1610 if (likely(rte_pktmbuf_is_contiguous(src))) {
1611 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1612 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1614 rte_free((void *)(fle-1));
1620 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1623 /* Function is responsible to receive frames for a given device and VQ*/
1624 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1625 struct qbman_result *dq_storage;
1626 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1627 int ret, num_rx = 0;
1628 uint8_t is_last = 0, status;
1629 struct qbman_swp *swp;
1630 const struct qbman_fd *fd;
1631 struct qbman_pull_desc pulldesc;
1633 if (!DPAA2_PER_LCORE_DPIO) {
1634 ret = dpaa2_affine_qbman_swp();
1636 DPAA2_SEC_ERR("Failure in affining portal");
1640 swp = DPAA2_PER_LCORE_PORTAL;
1641 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1643 qbman_pull_desc_clear(&pulldesc);
1644 qbman_pull_desc_set_numframes(&pulldesc,
1645 (nb_ops > dpaa2_dqrr_size) ?
1646 dpaa2_dqrr_size : nb_ops);
1647 qbman_pull_desc_set_fq(&pulldesc, fqid);
1648 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1649 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1652 /*Issue a volatile dequeue command. */
1654 if (qbman_swp_pull(swp, &pulldesc)) {
1656 "SEC VDQ command is not issued : QBMAN busy");
1657 /* Portal was busy, try again */
1663 /* Receive the packets till Last Dequeue entry is found with
1664 * respect to the above issues PULL command.
1667 /* Check if the previous issued command is completed.
1668 * Also seems like the SWP is shared between the Ethernet Driver
1669 * and the SEC driver.
1671 while (!qbman_check_command_complete(dq_storage))
1674 /* Loop until the dq_storage is updated with
1675 * new token by QBMAN
1677 while (!qbman_check_new_result(dq_storage))
1679 /* Check whether Last Pull command is Expired and
1680 * setting Condition for Loop termination
1682 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1684 /* Check for valid frame. */
1685 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1687 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1688 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1693 fd = qbman_result_DQ_fd(dq_storage);
1694 ops[num_rx] = sec_fd_to_mbuf(fd);
1696 if (unlikely(fd->simple.frc)) {
1697 /* TODO Parse SEC errors */
1698 DPAA2_SEC_ERR("SEC returned Error - %x",
1700 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1702 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1707 } /* End of Packet Rx loop */
1709 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1711 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1712 /*Return the total number of packets received to DPAA2 app*/
1716 /** Release queue pair */
1718 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1720 struct dpaa2_sec_qp *qp =
1721 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1723 PMD_INIT_FUNC_TRACE();
1725 if (qp->rx_vq.q_storage) {
1726 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1727 rte_free(qp->rx_vq.q_storage);
1731 dev->data->queue_pairs[queue_pair_id] = NULL;
1736 /** Setup a queue pair */
1738 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1739 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1740 __rte_unused int socket_id)
1742 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1743 struct dpaa2_sec_qp *qp;
1744 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1745 struct dpseci_rx_queue_cfg cfg;
1748 PMD_INIT_FUNC_TRACE();
1750 /* If qp is already in use free ring memory and qp metadata. */
1751 if (dev->data->queue_pairs[qp_id] != NULL) {
1752 DPAA2_SEC_INFO("QP already setup");
1756 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1757 dev, qp_id, qp_conf);
1759 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1761 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1762 RTE_CACHE_LINE_SIZE);
1764 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1768 qp->rx_vq.crypto_data = dev->data;
1769 qp->tx_vq.crypto_data = dev->data;
1770 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1771 sizeof(struct queue_storage_info_t),
1772 RTE_CACHE_LINE_SIZE);
1773 if (!qp->rx_vq.q_storage) {
1774 DPAA2_SEC_ERR("malloc failed for q_storage");
1777 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1779 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1780 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1784 dev->data->queue_pairs[qp_id] = qp;
1786 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1787 cfg.user_ctx = (size_t)(&qp->rx_vq);
1788 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1793 /** Return the number of allocated queue pairs */
1795 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1797 PMD_INIT_FUNC_TRACE();
1799 return dev->data->nb_queue_pairs;
1802 /** Returns the size of the aesni gcm session structure */
1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1806 PMD_INIT_FUNC_TRACE();
1808 return sizeof(dpaa2_sec_session);
1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1813 struct rte_crypto_sym_xform *xform,
1814 dpaa2_sec_session *session)
1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1817 struct alginfo cipherdata;
1819 struct ctxt_priv *priv;
1820 struct sec_flow_context *flc;
1822 PMD_INIT_FUNC_TRACE();
1824 /* For SEC CIPHER only one descriptor is required. */
1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1827 RTE_CACHE_LINE_SIZE);
1829 DPAA2_SEC_ERR("No Memory for priv CTXT");
1833 priv->fle_pool = dev_priv->fle_pool;
1835 flc = &priv->flc_desc[0].flc;
1837 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1838 RTE_CACHE_LINE_SIZE);
1839 if (session->cipher_key.data == NULL) {
1840 DPAA2_SEC_ERR("No Memory for cipher key");
1844 session->cipher_key.length = xform->cipher.key.length;
1846 memcpy(session->cipher_key.data, xform->cipher.key.data,
1847 xform->cipher.key.length);
1848 cipherdata.key = (size_t)session->cipher_key.data;
1849 cipherdata.keylen = session->cipher_key.length;
1850 cipherdata.key_enc_flags = 0;
1851 cipherdata.key_type = RTA_DATA_IMM;
1853 /* Set IV parameters */
1854 session->iv.offset = xform->cipher.iv.offset;
1855 session->iv.length = xform->cipher.iv.length;
1856 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1859 switch (xform->cipher.algo) {
1860 case RTE_CRYPTO_CIPHER_AES_CBC:
1861 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1862 cipherdata.algmode = OP_ALG_AAI_CBC;
1863 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1864 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1865 SHR_NEVER, &cipherdata, NULL,
1869 case RTE_CRYPTO_CIPHER_3DES_CBC:
1870 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1871 cipherdata.algmode = OP_ALG_AAI_CBC;
1872 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1873 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1874 SHR_NEVER, &cipherdata, NULL,
1878 case RTE_CRYPTO_CIPHER_AES_CTR:
1879 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1880 cipherdata.algmode = OP_ALG_AAI_CTR;
1881 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1882 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1883 SHR_NEVER, &cipherdata, NULL,
1887 case RTE_CRYPTO_CIPHER_3DES_CTR:
1888 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1889 cipherdata.algmode = OP_ALG_AAI_CTR;
1890 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1891 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1892 SHR_NEVER, &cipherdata, NULL,
1896 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1897 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1898 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1899 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1903 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1904 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1905 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1906 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1910 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1911 case RTE_CRYPTO_CIPHER_AES_F8:
1912 case RTE_CRYPTO_CIPHER_AES_ECB:
1913 case RTE_CRYPTO_CIPHER_3DES_ECB:
1914 case RTE_CRYPTO_CIPHER_AES_XTS:
1915 case RTE_CRYPTO_CIPHER_ARC4:
1916 case RTE_CRYPTO_CIPHER_NULL:
1917 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1918 xform->cipher.algo);
1921 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1922 xform->cipher.algo);
1927 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1931 flc->word1_sdl = (uint8_t)bufsize;
1932 session->ctxt = priv;
1934 #ifdef CAAM_DESC_DEBUG
1936 for (i = 0; i < bufsize; i++)
1937 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1942 rte_free(session->cipher_key.data);
1948 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1949 struct rte_crypto_sym_xform *xform,
1950 dpaa2_sec_session *session)
1952 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1953 struct alginfo authdata;
1955 struct ctxt_priv *priv;
1956 struct sec_flow_context *flc;
1958 PMD_INIT_FUNC_TRACE();
1960 /* For SEC AUTH three descriptors are required for various stages */
1961 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1962 sizeof(struct ctxt_priv) + 3 *
1963 sizeof(struct sec_flc_desc),
1964 RTE_CACHE_LINE_SIZE);
1966 DPAA2_SEC_ERR("No Memory for priv CTXT");
1970 priv->fle_pool = dev_priv->fle_pool;
1971 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1973 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1974 RTE_CACHE_LINE_SIZE);
1975 if (session->auth_key.data == NULL) {
1976 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1980 session->auth_key.length = xform->auth.key.length;
1982 memcpy(session->auth_key.data, xform->auth.key.data,
1983 xform->auth.key.length);
1984 authdata.key = (size_t)session->auth_key.data;
1985 authdata.keylen = session->auth_key.length;
1986 authdata.key_enc_flags = 0;
1987 authdata.key_type = RTA_DATA_IMM;
1989 session->digest_length = xform->auth.digest_length;
1990 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1993 switch (xform->auth.algo) {
1994 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1995 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1996 authdata.algmode = OP_ALG_AAI_HMAC;
1997 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1998 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1999 1, 0, SHR_NEVER, &authdata,
2001 session->digest_length);
2003 case RTE_CRYPTO_AUTH_MD5_HMAC:
2004 authdata.algtype = OP_ALG_ALGSEL_MD5;
2005 authdata.algmode = OP_ALG_AAI_HMAC;
2006 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2007 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2008 1, 0, SHR_NEVER, &authdata,
2010 session->digest_length);
2012 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2013 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2014 authdata.algmode = OP_ALG_AAI_HMAC;
2015 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2016 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2017 1, 0, SHR_NEVER, &authdata,
2019 session->digest_length);
2021 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2022 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2023 authdata.algmode = OP_ALG_AAI_HMAC;
2024 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2025 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2026 1, 0, SHR_NEVER, &authdata,
2028 session->digest_length);
2030 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2031 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2032 authdata.algmode = OP_ALG_AAI_HMAC;
2033 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2034 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2035 1, 0, SHR_NEVER, &authdata,
2037 session->digest_length);
2039 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2040 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2041 authdata.algmode = OP_ALG_AAI_HMAC;
2042 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2043 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2044 1, 0, SHR_NEVER, &authdata,
2046 session->digest_length);
2048 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2049 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2050 authdata.algmode = OP_ALG_AAI_F9;
2051 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2052 session->iv.offset = xform->auth.iv.offset;
2053 session->iv.length = xform->auth.iv.length;
2054 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2057 session->digest_length);
2059 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2060 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2061 authdata.algmode = OP_ALG_AAI_F9;
2062 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2063 session->iv.offset = xform->auth.iv.offset;
2064 session->iv.length = xform->auth.iv.length;
2065 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2068 session->digest_length);
2070 case RTE_CRYPTO_AUTH_KASUMI_F9:
2071 case RTE_CRYPTO_AUTH_NULL:
2072 case RTE_CRYPTO_AUTH_SHA1:
2073 case RTE_CRYPTO_AUTH_SHA256:
2074 case RTE_CRYPTO_AUTH_SHA512:
2075 case RTE_CRYPTO_AUTH_SHA224:
2076 case RTE_CRYPTO_AUTH_SHA384:
2077 case RTE_CRYPTO_AUTH_MD5:
2078 case RTE_CRYPTO_AUTH_AES_GMAC:
2079 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2080 case RTE_CRYPTO_AUTH_AES_CMAC:
2081 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2082 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2086 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2092 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2096 flc->word1_sdl = (uint8_t)bufsize;
2097 session->ctxt = priv;
2098 #ifdef CAAM_DESC_DEBUG
2100 for (i = 0; i < bufsize; i++)
2101 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2102 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2108 rte_free(session->auth_key.data);
2114 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2115 struct rte_crypto_sym_xform *xform,
2116 dpaa2_sec_session *session)
2118 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2119 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2120 struct alginfo aeaddata;
2122 struct ctxt_priv *priv;
2123 struct sec_flow_context *flc;
2124 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2127 PMD_INIT_FUNC_TRACE();
2129 /* Set IV parameters */
2130 session->iv.offset = aead_xform->iv.offset;
2131 session->iv.length = aead_xform->iv.length;
2132 session->ctxt_type = DPAA2_SEC_AEAD;
2134 /* For SEC AEAD only one descriptor is required */
2135 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2136 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2137 RTE_CACHE_LINE_SIZE);
2139 DPAA2_SEC_ERR("No Memory for priv CTXT");
2143 priv->fle_pool = dev_priv->fle_pool;
2144 flc = &priv->flc_desc[0].flc;
2146 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2147 RTE_CACHE_LINE_SIZE);
2148 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2149 DPAA2_SEC_ERR("No Memory for aead key");
2153 memcpy(session->aead_key.data, aead_xform->key.data,
2154 aead_xform->key.length);
2156 session->digest_length = aead_xform->digest_length;
2157 session->aead_key.length = aead_xform->key.length;
2158 ctxt->auth_only_len = aead_xform->aad_length;
2160 aeaddata.key = (size_t)session->aead_key.data;
2161 aeaddata.keylen = session->aead_key.length;
2162 aeaddata.key_enc_flags = 0;
2163 aeaddata.key_type = RTA_DATA_IMM;
2165 switch (aead_xform->algo) {
2166 case RTE_CRYPTO_AEAD_AES_GCM:
2167 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2168 aeaddata.algmode = OP_ALG_AAI_GCM;
2169 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2171 case RTE_CRYPTO_AEAD_AES_CCM:
2172 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2176 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2180 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2183 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2184 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2186 (unsigned int *)priv->flc_desc[0].desc,
2187 &priv->flc_desc[0].desc[1], 1);
2190 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2193 if (priv->flc_desc[0].desc[1] & 1) {
2194 aeaddata.key_type = RTA_DATA_IMM;
2196 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2197 aeaddata.key_type = RTA_DATA_PTR;
2199 priv->flc_desc[0].desc[0] = 0;
2200 priv->flc_desc[0].desc[1] = 0;
2202 if (session->dir == DIR_ENC)
2203 bufsize = cnstr_shdsc_gcm_encap(
2204 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2205 &aeaddata, session->iv.length,
2206 session->digest_length);
2208 bufsize = cnstr_shdsc_gcm_decap(
2209 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2210 &aeaddata, session->iv.length,
2211 session->digest_length);
2213 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2217 flc->word1_sdl = (uint8_t)bufsize;
2218 session->ctxt = priv;
2219 #ifdef CAAM_DESC_DEBUG
2221 for (i = 0; i < bufsize; i++)
2222 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2223 i, priv->flc_desc[0].desc[i]);
2228 rte_free(session->aead_key.data);
2235 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2236 struct rte_crypto_sym_xform *xform,
2237 dpaa2_sec_session *session)
2239 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2240 struct alginfo authdata, cipherdata;
2242 struct ctxt_priv *priv;
2243 struct sec_flow_context *flc;
2244 struct rte_crypto_cipher_xform *cipher_xform;
2245 struct rte_crypto_auth_xform *auth_xform;
2248 PMD_INIT_FUNC_TRACE();
2250 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2251 cipher_xform = &xform->cipher;
2252 auth_xform = &xform->next->auth;
2253 session->ctxt_type =
2254 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2255 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2257 cipher_xform = &xform->next->cipher;
2258 auth_xform = &xform->auth;
2259 session->ctxt_type =
2260 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2261 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2264 /* Set IV parameters */
2265 session->iv.offset = cipher_xform->iv.offset;
2266 session->iv.length = cipher_xform->iv.length;
2268 /* For SEC AEAD only one descriptor is required */
2269 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2270 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2271 RTE_CACHE_LINE_SIZE);
2273 DPAA2_SEC_ERR("No Memory for priv CTXT");
2277 priv->fle_pool = dev_priv->fle_pool;
2278 flc = &priv->flc_desc[0].flc;
2280 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2281 RTE_CACHE_LINE_SIZE);
2282 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2283 DPAA2_SEC_ERR("No Memory for cipher key");
2287 session->cipher_key.length = cipher_xform->key.length;
2288 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2289 RTE_CACHE_LINE_SIZE);
2290 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2291 DPAA2_SEC_ERR("No Memory for auth key");
2292 rte_free(session->cipher_key.data);
2296 session->auth_key.length = auth_xform->key.length;
2297 memcpy(session->cipher_key.data, cipher_xform->key.data,
2298 cipher_xform->key.length);
2299 memcpy(session->auth_key.data, auth_xform->key.data,
2300 auth_xform->key.length);
2302 authdata.key = (size_t)session->auth_key.data;
2303 authdata.keylen = session->auth_key.length;
2304 authdata.key_enc_flags = 0;
2305 authdata.key_type = RTA_DATA_IMM;
2307 session->digest_length = auth_xform->digest_length;
2309 switch (auth_xform->algo) {
2310 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2311 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2312 authdata.algmode = OP_ALG_AAI_HMAC;
2313 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2315 case RTE_CRYPTO_AUTH_MD5_HMAC:
2316 authdata.algtype = OP_ALG_ALGSEL_MD5;
2317 authdata.algmode = OP_ALG_AAI_HMAC;
2318 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2320 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2321 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2322 authdata.algmode = OP_ALG_AAI_HMAC;
2323 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2325 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2326 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2327 authdata.algmode = OP_ALG_AAI_HMAC;
2328 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2330 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2331 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2332 authdata.algmode = OP_ALG_AAI_HMAC;
2333 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2335 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2336 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2337 authdata.algmode = OP_ALG_AAI_HMAC;
2338 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2340 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2341 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2342 case RTE_CRYPTO_AUTH_NULL:
2343 case RTE_CRYPTO_AUTH_SHA1:
2344 case RTE_CRYPTO_AUTH_SHA256:
2345 case RTE_CRYPTO_AUTH_SHA512:
2346 case RTE_CRYPTO_AUTH_SHA224:
2347 case RTE_CRYPTO_AUTH_SHA384:
2348 case RTE_CRYPTO_AUTH_MD5:
2349 case RTE_CRYPTO_AUTH_AES_GMAC:
2350 case RTE_CRYPTO_AUTH_KASUMI_F9:
2351 case RTE_CRYPTO_AUTH_AES_CMAC:
2352 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2353 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2354 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2358 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2362 cipherdata.key = (size_t)session->cipher_key.data;
2363 cipherdata.keylen = session->cipher_key.length;
2364 cipherdata.key_enc_flags = 0;
2365 cipherdata.key_type = RTA_DATA_IMM;
2367 switch (cipher_xform->algo) {
2368 case RTE_CRYPTO_CIPHER_AES_CBC:
2369 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2370 cipherdata.algmode = OP_ALG_AAI_CBC;
2371 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2373 case RTE_CRYPTO_CIPHER_3DES_CBC:
2374 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2375 cipherdata.algmode = OP_ALG_AAI_CBC;
2376 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2378 case RTE_CRYPTO_CIPHER_AES_CTR:
2379 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2380 cipherdata.algmode = OP_ALG_AAI_CTR;
2381 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2383 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2384 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2385 case RTE_CRYPTO_CIPHER_NULL:
2386 case RTE_CRYPTO_CIPHER_3DES_ECB:
2387 case RTE_CRYPTO_CIPHER_AES_ECB:
2388 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2389 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2390 cipher_xform->algo);
2393 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2394 cipher_xform->algo);
2397 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2400 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2401 priv->flc_desc[0].desc[1] = authdata.keylen;
2402 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2404 (unsigned int *)priv->flc_desc[0].desc,
2405 &priv->flc_desc[0].desc[2], 2);
2408 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2411 if (priv->flc_desc[0].desc[2] & 1) {
2412 cipherdata.key_type = RTA_DATA_IMM;
2414 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2415 cipherdata.key_type = RTA_DATA_PTR;
2417 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2418 authdata.key_type = RTA_DATA_IMM;
2420 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2421 authdata.key_type = RTA_DATA_PTR;
2423 priv->flc_desc[0].desc[0] = 0;
2424 priv->flc_desc[0].desc[1] = 0;
2425 priv->flc_desc[0].desc[2] = 0;
2427 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2428 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2430 &cipherdata, &authdata,
2432 session->digest_length,
2435 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2439 DPAA2_SEC_ERR("Hash before cipher not supported");
2443 flc->word1_sdl = (uint8_t)bufsize;
2444 session->ctxt = priv;
2445 #ifdef CAAM_DESC_DEBUG
2447 for (i = 0; i < bufsize; i++)
2448 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2449 i, priv->flc_desc[0].desc[i]);
2455 rte_free(session->cipher_key.data);
2456 rte_free(session->auth_key.data);
2462 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2463 struct rte_crypto_sym_xform *xform, void *sess)
2465 dpaa2_sec_session *session = sess;
2468 PMD_INIT_FUNC_TRACE();
2470 if (unlikely(sess == NULL)) {
2471 DPAA2_SEC_ERR("Invalid session struct");
2475 memset(session, 0, sizeof(dpaa2_sec_session));
2476 /* Default IV length = 0 */
2477 session->iv.length = 0;
2480 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2481 session->ctxt_type = DPAA2_SEC_CIPHER;
2482 ret = dpaa2_sec_cipher_init(dev, xform, session);
2484 /* Authentication Only */
2485 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2486 xform->next == NULL) {
2487 session->ctxt_type = DPAA2_SEC_AUTH;
2488 ret = dpaa2_sec_auth_init(dev, xform, session);
2490 /* Cipher then Authenticate */
2491 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2492 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2493 session->ext_params.aead_ctxt.auth_cipher_text = true;
2494 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2496 /* Authenticate then Cipher */
2497 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2498 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2499 session->ext_params.aead_ctxt.auth_cipher_text = false;
2500 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2502 /* AEAD operation for AES-GCM kind of Algorithms */
2503 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2504 xform->next == NULL) {
2505 ret = dpaa2_sec_aead_init(dev, xform, session);
2508 DPAA2_SEC_ERR("Invalid crypto type");
2515 #ifdef RTE_LIBRTE_SECURITY
2517 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2518 dpaa2_sec_session *session,
2519 struct alginfo *aeaddata)
2521 PMD_INIT_FUNC_TRACE();
2523 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2524 RTE_CACHE_LINE_SIZE);
2525 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2526 DPAA2_SEC_ERR("No Memory for aead key");
2529 memcpy(session->aead_key.data, aead_xform->key.data,
2530 aead_xform->key.length);
2532 session->digest_length = aead_xform->digest_length;
2533 session->aead_key.length = aead_xform->key.length;
2535 aeaddata->key = (size_t)session->aead_key.data;
2536 aeaddata->keylen = session->aead_key.length;
2537 aeaddata->key_enc_flags = 0;
2538 aeaddata->key_type = RTA_DATA_IMM;
2540 switch (aead_xform->algo) {
2541 case RTE_CRYPTO_AEAD_AES_GCM:
2542 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2543 aeaddata->algmode = OP_ALG_AAI_GCM;
2544 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2546 case RTE_CRYPTO_AEAD_AES_CCM:
2547 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2548 aeaddata->algmode = OP_ALG_AAI_CCM;
2549 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2552 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2556 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2563 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2564 struct rte_crypto_auth_xform *auth_xform,
2565 dpaa2_sec_session *session,
2566 struct alginfo *cipherdata,
2567 struct alginfo *authdata)
2570 session->cipher_key.data = rte_zmalloc(NULL,
2571 cipher_xform->key.length,
2572 RTE_CACHE_LINE_SIZE);
2573 if (session->cipher_key.data == NULL &&
2574 cipher_xform->key.length > 0) {
2575 DPAA2_SEC_ERR("No Memory for cipher key");
2579 session->cipher_key.length = cipher_xform->key.length;
2580 memcpy(session->cipher_key.data, cipher_xform->key.data,
2581 cipher_xform->key.length);
2582 session->cipher_alg = cipher_xform->algo;
2584 session->cipher_key.data = NULL;
2585 session->cipher_key.length = 0;
2586 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2590 session->auth_key.data = rte_zmalloc(NULL,
2591 auth_xform->key.length,
2592 RTE_CACHE_LINE_SIZE);
2593 if (session->auth_key.data == NULL &&
2594 auth_xform->key.length > 0) {
2595 DPAA2_SEC_ERR("No Memory for auth key");
2598 session->auth_key.length = auth_xform->key.length;
2599 memcpy(session->auth_key.data, auth_xform->key.data,
2600 auth_xform->key.length);
2601 session->auth_alg = auth_xform->algo;
2603 session->auth_key.data = NULL;
2604 session->auth_key.length = 0;
2605 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2608 authdata->key = (size_t)session->auth_key.data;
2609 authdata->keylen = session->auth_key.length;
2610 authdata->key_enc_flags = 0;
2611 authdata->key_type = RTA_DATA_IMM;
2612 switch (session->auth_alg) {
2613 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2614 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2615 authdata->algmode = OP_ALG_AAI_HMAC;
2617 case RTE_CRYPTO_AUTH_MD5_HMAC:
2618 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2619 authdata->algmode = OP_ALG_AAI_HMAC;
2621 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2622 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2623 authdata->algmode = OP_ALG_AAI_HMAC;
2625 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2626 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2627 authdata->algmode = OP_ALG_AAI_HMAC;
2629 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2630 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2631 authdata->algmode = OP_ALG_AAI_HMAC;
2633 case RTE_CRYPTO_AUTH_AES_CMAC:
2634 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2636 case RTE_CRYPTO_AUTH_NULL:
2637 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2639 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2640 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2641 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2642 case RTE_CRYPTO_AUTH_SHA1:
2643 case RTE_CRYPTO_AUTH_SHA256:
2644 case RTE_CRYPTO_AUTH_SHA512:
2645 case RTE_CRYPTO_AUTH_SHA224:
2646 case RTE_CRYPTO_AUTH_SHA384:
2647 case RTE_CRYPTO_AUTH_MD5:
2648 case RTE_CRYPTO_AUTH_AES_GMAC:
2649 case RTE_CRYPTO_AUTH_KASUMI_F9:
2650 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2651 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2652 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2656 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2660 cipherdata->key = (size_t)session->cipher_key.data;
2661 cipherdata->keylen = session->cipher_key.length;
2662 cipherdata->key_enc_flags = 0;
2663 cipherdata->key_type = RTA_DATA_IMM;
2665 switch (session->cipher_alg) {
2666 case RTE_CRYPTO_CIPHER_AES_CBC:
2667 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2668 cipherdata->algmode = OP_ALG_AAI_CBC;
2670 case RTE_CRYPTO_CIPHER_3DES_CBC:
2671 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2672 cipherdata->algmode = OP_ALG_AAI_CBC;
2674 case RTE_CRYPTO_CIPHER_AES_CTR:
2675 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2676 cipherdata->algmode = OP_ALG_AAI_CTR;
2678 case RTE_CRYPTO_CIPHER_NULL:
2679 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2681 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2682 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2683 case RTE_CRYPTO_CIPHER_3DES_ECB:
2684 case RTE_CRYPTO_CIPHER_AES_ECB:
2685 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2686 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2687 session->cipher_alg);
2690 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2691 session->cipher_alg);
2698 #ifdef RTE_LIBRTE_SECURITY_TEST
2699 static uint8_t aes_cbc_iv[] = {
2700 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2701 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2705 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2706 struct rte_security_session_conf *conf,
2709 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2710 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2711 struct rte_crypto_auth_xform *auth_xform = NULL;
2712 struct rte_crypto_aead_xform *aead_xform = NULL;
2713 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2714 struct ctxt_priv *priv;
2715 struct ipsec_encap_pdb encap_pdb;
2716 struct ipsec_decap_pdb decap_pdb;
2717 struct alginfo authdata, cipherdata;
2719 struct sec_flow_context *flc;
2720 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2723 PMD_INIT_FUNC_TRACE();
2725 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2726 sizeof(struct ctxt_priv) +
2727 sizeof(struct sec_flc_desc),
2728 RTE_CACHE_LINE_SIZE);
2731 DPAA2_SEC_ERR("No memory for priv CTXT");
2735 priv->fle_pool = dev_priv->fle_pool;
2736 flc = &priv->flc_desc[0].flc;
2738 memset(session, 0, sizeof(dpaa2_sec_session));
2740 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2741 cipher_xform = &conf->crypto_xform->cipher;
2742 if (conf->crypto_xform->next)
2743 auth_xform = &conf->crypto_xform->next->auth;
2744 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2745 session, &cipherdata, &authdata);
2746 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2747 auth_xform = &conf->crypto_xform->auth;
2748 if (conf->crypto_xform->next)
2749 cipher_xform = &conf->crypto_xform->next->cipher;
2750 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2751 session, &cipherdata, &authdata);
2752 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2753 aead_xform = &conf->crypto_xform->aead;
2754 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2755 session, &cipherdata);
2757 DPAA2_SEC_ERR("XFORM not specified");
2762 DPAA2_SEC_ERR("Failed to process xform");
2766 session->ctxt_type = DPAA2_SEC_IPSEC;
2767 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2768 uint8_t *hdr = NULL;
2770 struct rte_ipv6_hdr ip6_hdr;
2772 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2773 /* For Sec Proto only one descriptor is required. */
2774 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2775 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2776 PDBOPTS_ESP_OIHI_PDB_INL |
2778 PDBHMO_ESP_ENCAP_DTTL |
2780 if (ipsec_xform->options.esn)
2781 encap_pdb.options |= PDBOPTS_ESP_ESN;
2782 encap_pdb.spi = ipsec_xform->spi;
2783 session->dir = DIR_ENC;
2784 if (ipsec_xform->tunnel.type ==
2785 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2786 encap_pdb.ip_hdr_len = sizeof(struct ip);
2787 ip4_hdr.ip_v = IPVERSION;
2789 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2790 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2793 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2794 ip4_hdr.ip_p = IPPROTO_ESP;
2796 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2797 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2798 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2799 &ip4_hdr, sizeof(struct ip));
2800 hdr = (uint8_t *)&ip4_hdr;
2801 } else if (ipsec_xform->tunnel.type ==
2802 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2803 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2804 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2805 ((ipsec_xform->tunnel.ipv6.dscp <<
2806 RTE_IPV6_HDR_TC_SHIFT) &
2807 RTE_IPV6_HDR_TC_MASK) |
2808 ((ipsec_xform->tunnel.ipv6.flabel <<
2809 RTE_IPV6_HDR_FL_SHIFT) &
2810 RTE_IPV6_HDR_FL_MASK));
2811 /* Payload length will be updated by HW */
2812 ip6_hdr.payload_len = 0;
2813 ip6_hdr.hop_limits =
2814 ipsec_xform->tunnel.ipv6.hlimit;
2815 ip6_hdr.proto = (ipsec_xform->proto ==
2816 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2817 IPPROTO_ESP : IPPROTO_AH;
2818 memcpy(&ip6_hdr.src_addr,
2819 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2820 memcpy(&ip6_hdr.dst_addr,
2821 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2822 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2823 hdr = (uint8_t *)&ip6_hdr;
2826 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2827 1, 0, SHR_SERIAL, &encap_pdb,
2828 hdr, &cipherdata, &authdata);
2829 } else if (ipsec_xform->direction ==
2830 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2831 flc->dhr = SEC_FLC_DHR_INBOUND;
2832 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2833 decap_pdb.options = (ipsec_xform->tunnel.type ==
2834 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2835 sizeof(struct ip) << 16 :
2836 sizeof(struct rte_ipv6_hdr) << 16;
2837 if (ipsec_xform->options.esn)
2838 decap_pdb.options |= PDBOPTS_ESP_ESN;
2839 session->dir = DIR_DEC;
2840 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2842 &decap_pdb, &cipherdata, &authdata);
2847 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2851 flc->word1_sdl = (uint8_t)bufsize;
2853 /* Enable the stashing control bit */
2854 DPAA2_SET_FLC_RSC(flc);
2855 flc->word2_rflc_31_0 = lower_32_bits(
2856 (size_t)&(((struct dpaa2_sec_qp *)
2857 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2858 flc->word3_rflc_63_32 = upper_32_bits(
2859 (size_t)&(((struct dpaa2_sec_qp *)
2860 dev->data->queue_pairs[0])->rx_vq));
2862 /* Set EWS bit i.e. enable write-safe */
2863 DPAA2_SET_FLC_EWS(flc);
2864 /* Set BS = 1 i.e reuse input buffers as output buffers */
2865 DPAA2_SET_FLC_REUSE_BS(flc);
2866 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2867 DPAA2_SET_FLC_REUSE_FF(flc);
2869 session->ctxt = priv;
2873 rte_free(session->auth_key.data);
2874 rte_free(session->cipher_key.data);
2880 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2881 struct rte_security_session_conf *conf,
2884 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2885 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2886 struct rte_crypto_auth_xform *auth_xform = NULL;
2887 struct rte_crypto_cipher_xform *cipher_xform;
2888 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2889 struct ctxt_priv *priv;
2890 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2891 struct alginfo authdata, cipherdata;
2892 struct alginfo *p_authdata = NULL;
2894 struct sec_flow_context *flc;
2895 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2901 PMD_INIT_FUNC_TRACE();
2903 memset(session, 0, sizeof(dpaa2_sec_session));
2905 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2906 sizeof(struct ctxt_priv) +
2907 sizeof(struct sec_flc_desc),
2908 RTE_CACHE_LINE_SIZE);
2911 DPAA2_SEC_ERR("No memory for priv CTXT");
2915 priv->fle_pool = dev_priv->fle_pool;
2916 flc = &priv->flc_desc[0].flc;
2918 /* find xfrm types */
2919 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2920 cipher_xform = &xform->cipher;
2921 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2922 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2923 session->ext_params.aead_ctxt.auth_cipher_text = true;
2924 cipher_xform = &xform->cipher;
2925 auth_xform = &xform->next->auth;
2926 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2927 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2928 session->ext_params.aead_ctxt.auth_cipher_text = false;
2929 cipher_xform = &xform->next->cipher;
2930 auth_xform = &xform->auth;
2932 DPAA2_SEC_ERR("Invalid crypto type");
2936 session->ctxt_type = DPAA2_SEC_PDCP;
2938 session->cipher_key.data = rte_zmalloc(NULL,
2939 cipher_xform->key.length,
2940 RTE_CACHE_LINE_SIZE);
2941 if (session->cipher_key.data == NULL &&
2942 cipher_xform->key.length > 0) {
2943 DPAA2_SEC_ERR("No Memory for cipher key");
2947 session->cipher_key.length = cipher_xform->key.length;
2948 memcpy(session->cipher_key.data, cipher_xform->key.data,
2949 cipher_xform->key.length);
2951 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2953 session->cipher_alg = cipher_xform->algo;
2955 session->cipher_key.data = NULL;
2956 session->cipher_key.length = 0;
2957 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2958 session->dir = DIR_ENC;
2961 session->pdcp.domain = pdcp_xform->domain;
2962 session->pdcp.bearer = pdcp_xform->bearer;
2963 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2964 session->pdcp.sn_size = pdcp_xform->sn_size;
2965 session->pdcp.hfn = pdcp_xform->hfn;
2966 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2967 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2968 /* hfv ovd offset location is stored in iv.offset value*/
2969 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2971 cipherdata.key = (size_t)session->cipher_key.data;
2972 cipherdata.keylen = session->cipher_key.length;
2973 cipherdata.key_enc_flags = 0;
2974 cipherdata.key_type = RTA_DATA_IMM;
2976 switch (session->cipher_alg) {
2977 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2978 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2980 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2981 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2983 case RTE_CRYPTO_CIPHER_AES_CTR:
2984 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2986 case RTE_CRYPTO_CIPHER_NULL:
2987 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2990 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2991 session->cipher_alg);
2996 session->auth_key.data = rte_zmalloc(NULL,
2997 auth_xform->key.length,
2998 RTE_CACHE_LINE_SIZE);
2999 if (!session->auth_key.data &&
3000 auth_xform->key.length > 0) {
3001 DPAA2_SEC_ERR("No Memory for auth key");
3002 rte_free(session->cipher_key.data);
3006 session->auth_key.length = auth_xform->key.length;
3007 memcpy(session->auth_key.data, auth_xform->key.data,
3008 auth_xform->key.length);
3009 session->auth_alg = auth_xform->algo;
3011 session->auth_key.data = NULL;
3012 session->auth_key.length = 0;
3013 session->auth_alg = 0;
3015 authdata.key = (size_t)session->auth_key.data;
3016 authdata.keylen = session->auth_key.length;
3017 authdata.key_enc_flags = 0;
3018 authdata.key_type = RTA_DATA_IMM;
3020 if (session->auth_alg) {
3021 switch (session->auth_alg) {
3022 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3023 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3025 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3026 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3028 case RTE_CRYPTO_AUTH_AES_CMAC:
3029 authdata.algtype = PDCP_AUTH_TYPE_AES;
3031 case RTE_CRYPTO_AUTH_NULL:
3032 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3035 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3040 p_authdata = &authdata;
3041 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3042 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3046 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3047 if (session->dir == DIR_ENC)
3048 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3049 priv->flc_desc[0].desc, 1, swap,
3051 session->pdcp.sn_size,
3053 pdcp_xform->pkt_dir,
3054 pdcp_xform->hfn_threshold,
3055 &cipherdata, &authdata,
3057 else if (session->dir == DIR_DEC)
3058 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3059 priv->flc_desc[0].desc, 1, swap,
3061 session->pdcp.sn_size,
3063 pdcp_xform->pkt_dir,
3064 pdcp_xform->hfn_threshold,
3065 &cipherdata, &authdata,
3068 if (session->dir == DIR_ENC)
3069 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3070 priv->flc_desc[0].desc, 1, swap,
3071 session->pdcp.sn_size,
3074 pdcp_xform->pkt_dir,
3075 pdcp_xform->hfn_threshold,
3076 &cipherdata, p_authdata, 0);
3077 else if (session->dir == DIR_DEC)
3078 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3079 priv->flc_desc[0].desc, 1, swap,
3080 session->pdcp.sn_size,
3083 pdcp_xform->pkt_dir,
3084 pdcp_xform->hfn_threshold,
3085 &cipherdata, p_authdata, 0);
3089 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3093 /* Enable the stashing control bit */
3094 DPAA2_SET_FLC_RSC(flc);
3095 flc->word2_rflc_31_0 = lower_32_bits(
3096 (size_t)&(((struct dpaa2_sec_qp *)
3097 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3098 flc->word3_rflc_63_32 = upper_32_bits(
3099 (size_t)&(((struct dpaa2_sec_qp *)
3100 dev->data->queue_pairs[0])->rx_vq));
3102 flc->word1_sdl = (uint8_t)bufsize;
3104 /* TODO - check the perf impact or
3105 * align as per descriptor type
3106 * Set EWS bit i.e. enable write-safe
3107 * DPAA2_SET_FLC_EWS(flc);
3110 /* Set BS = 1 i.e reuse input buffers as output buffers */
3111 DPAA2_SET_FLC_REUSE_BS(flc);
3112 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3113 DPAA2_SET_FLC_REUSE_FF(flc);
3115 session->ctxt = priv;
3119 rte_free(session->auth_key.data);
3120 rte_free(session->cipher_key.data);
3126 dpaa2_sec_security_session_create(void *dev,
3127 struct rte_security_session_conf *conf,
3128 struct rte_security_session *sess,
3129 struct rte_mempool *mempool)
3131 void *sess_private_data;
3132 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3135 if (rte_mempool_get(mempool, &sess_private_data)) {
3136 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3140 switch (conf->protocol) {
3141 case RTE_SECURITY_PROTOCOL_IPSEC:
3142 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3145 case RTE_SECURITY_PROTOCOL_MACSEC:
3147 case RTE_SECURITY_PROTOCOL_PDCP:
3148 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3155 DPAA2_SEC_ERR("Failed to configure session parameters");
3156 /* Return session to mempool */
3157 rte_mempool_put(mempool, sess_private_data);
3161 set_sec_session_private_data(sess, sess_private_data);
3166 /** Clear the memory of session so it doesn't leave key material behind */
3168 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3169 struct rte_security_session *sess)
3171 PMD_INIT_FUNC_TRACE();
3172 void *sess_priv = get_sec_session_private_data(sess);
3174 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3177 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3180 rte_free(s->cipher_key.data);
3181 rte_free(s->auth_key.data);
3182 memset(s, 0, sizeof(dpaa2_sec_session));
3183 set_sec_session_private_data(sess, NULL);
3184 rte_mempool_put(sess_mp, sess_priv);
3190 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3191 struct rte_crypto_sym_xform *xform,
3192 struct rte_cryptodev_sym_session *sess,
3193 struct rte_mempool *mempool)
3195 void *sess_private_data;
3198 if (rte_mempool_get(mempool, &sess_private_data)) {
3199 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3203 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3205 DPAA2_SEC_ERR("Failed to configure session parameters");
3206 /* Return session to mempool */
3207 rte_mempool_put(mempool, sess_private_data);
3211 set_sym_session_private_data(sess, dev->driver_id,
3217 /** Clear the memory of session so it doesn't leave key material behind */
3219 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3220 struct rte_cryptodev_sym_session *sess)
3222 PMD_INIT_FUNC_TRACE();
3223 uint8_t index = dev->driver_id;
3224 void *sess_priv = get_sym_session_private_data(sess, index);
3225 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3229 rte_free(s->cipher_key.data);
3230 rte_free(s->auth_key.data);
3231 memset(s, 0, sizeof(dpaa2_sec_session));
3232 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3233 set_sym_session_private_data(sess, index, NULL);
3234 rte_mempool_put(sess_mp, sess_priv);
3239 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3240 struct rte_cryptodev_config *config __rte_unused)
3242 PMD_INIT_FUNC_TRACE();
3248 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3250 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3251 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3252 struct dpseci_attr attr;
3253 struct dpaa2_queue *dpaa2_q;
3254 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3255 dev->data->queue_pairs;
3256 struct dpseci_rx_queue_attr rx_attr;
3257 struct dpseci_tx_queue_attr tx_attr;
3260 PMD_INIT_FUNC_TRACE();
3262 memset(&attr, 0, sizeof(struct dpseci_attr));
3264 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3266 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3268 goto get_attr_failure;
3270 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3272 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3273 goto get_attr_failure;
3275 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3276 dpaa2_q = &qp[i]->rx_vq;
3277 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3279 dpaa2_q->fqid = rx_attr.fqid;
3280 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3282 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3283 dpaa2_q = &qp[i]->tx_vq;
3284 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3286 dpaa2_q->fqid = tx_attr.fqid;
3287 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3292 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3297 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3299 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3300 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3303 PMD_INIT_FUNC_TRACE();
3305 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3307 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3312 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3314 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3320 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3322 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3323 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3326 PMD_INIT_FUNC_TRACE();
3328 /* Function is reverse of dpaa2_sec_dev_init.
3329 * It does the following:
3330 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3331 * 2. Close the DPSECI device
3332 * 3. Free the allocated resources.
3335 /*Close the device at underlying layer*/
3336 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3338 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3342 /*Free the allocated memory for ethernet private data and dpseci*/
3350 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3351 struct rte_cryptodev_info *info)
3353 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3355 PMD_INIT_FUNC_TRACE();
3357 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3358 info->feature_flags = dev->feature_flags;
3359 info->capabilities = dpaa2_sec_capabilities;
3360 /* No limit of number of sessions */
3361 info->sym.max_nb_sessions = 0;
3362 info->driver_id = cryptodev_driver_id;
3367 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3368 struct rte_cryptodev_stats *stats)
3370 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3371 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3372 struct dpseci_sec_counters counters = {0};
3373 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3374 dev->data->queue_pairs;
3377 PMD_INIT_FUNC_TRACE();
3378 if (stats == NULL) {
3379 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3382 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3383 if (qp[i] == NULL) {
3384 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3388 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3389 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3390 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3391 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3394 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3397 DPAA2_SEC_ERR("SEC counters failed");
3399 DPAA2_SEC_INFO("dpseci hardware stats:"
3400 "\n\tNum of Requests Dequeued = %" PRIu64
3401 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3402 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3403 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3404 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3405 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3406 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3407 counters.dequeued_requests,
3408 counters.ob_enc_requests,
3409 counters.ib_dec_requests,
3410 counters.ob_enc_bytes,
3411 counters.ob_prot_bytes,
3412 counters.ib_dec_bytes,
3413 counters.ib_valid_bytes);
3418 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3421 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3422 (dev->data->queue_pairs);
3424 PMD_INIT_FUNC_TRACE();
3426 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3427 if (qp[i] == NULL) {
3428 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3431 qp[i]->tx_vq.rx_pkts = 0;
3432 qp[i]->tx_vq.tx_pkts = 0;
3433 qp[i]->tx_vq.err_pkts = 0;
3434 qp[i]->rx_vq.rx_pkts = 0;
3435 qp[i]->rx_vq.tx_pkts = 0;
3436 qp[i]->rx_vq.err_pkts = 0;
3440 static void __attribute__((hot))
3441 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3442 const struct qbman_fd *fd,
3443 const struct qbman_result *dq,
3444 struct dpaa2_queue *rxq,
3445 struct rte_event *ev)
3447 /* Prefetching mbuf */
3448 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3449 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3451 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3452 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3454 ev->flow_id = rxq->ev.flow_id;
3455 ev->sub_event_type = rxq->ev.sub_event_type;
3456 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3457 ev->op = RTE_EVENT_OP_NEW;
3458 ev->sched_type = rxq->ev.sched_type;
3459 ev->queue_id = rxq->ev.queue_id;
3460 ev->priority = rxq->ev.priority;
3461 ev->event_ptr = sec_fd_to_mbuf(fd);
3463 qbman_swp_dqrr_consume(swp, dq);
3466 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3467 const struct qbman_fd *fd,
3468 const struct qbman_result *dq,
3469 struct dpaa2_queue *rxq,
3470 struct rte_event *ev)
3473 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3474 /* Prefetching mbuf */
3475 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3476 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3478 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3479 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3481 ev->flow_id = rxq->ev.flow_id;
3482 ev->sub_event_type = rxq->ev.sub_event_type;
3483 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3484 ev->op = RTE_EVENT_OP_NEW;
3485 ev->sched_type = rxq->ev.sched_type;
3486 ev->queue_id = rxq->ev.queue_id;
3487 ev->priority = rxq->ev.priority;
3489 ev->event_ptr = sec_fd_to_mbuf(fd);
3490 dqrr_index = qbman_get_dqrr_idx(dq);
3491 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3492 DPAA2_PER_LCORE_DQRR_SIZE++;
3493 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3494 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3498 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3500 struct dpaa2_dpcon_dev *dpcon,
3501 const struct rte_event *event)
3503 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3504 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3505 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3506 struct dpseci_rx_queue_cfg cfg;
3510 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3511 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3512 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3513 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3517 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3518 (dpcon->num_priorities - 1);
3520 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3521 cfg.options = DPSECI_QUEUE_OPT_DEST;
3522 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3523 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3524 cfg.dest_cfg.priority = priority;
3526 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3527 cfg.user_ctx = (size_t)(qp);
3528 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3529 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3530 cfg.order_preservation_en = 1;
3532 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3535 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3539 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3545 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3548 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3549 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3550 struct dpseci_rx_queue_cfg cfg;
3553 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3554 cfg.options = DPSECI_QUEUE_OPT_DEST;
3555 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3557 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3560 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3565 static struct rte_cryptodev_ops crypto_ops = {
3566 .dev_configure = dpaa2_sec_dev_configure,
3567 .dev_start = dpaa2_sec_dev_start,
3568 .dev_stop = dpaa2_sec_dev_stop,
3569 .dev_close = dpaa2_sec_dev_close,
3570 .dev_infos_get = dpaa2_sec_dev_infos_get,
3571 .stats_get = dpaa2_sec_stats_get,
3572 .stats_reset = dpaa2_sec_stats_reset,
3573 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3574 .queue_pair_release = dpaa2_sec_queue_pair_release,
3575 .queue_pair_count = dpaa2_sec_queue_pair_count,
3576 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3577 .sym_session_configure = dpaa2_sec_sym_session_configure,
3578 .sym_session_clear = dpaa2_sec_sym_session_clear,
3581 #ifdef RTE_LIBRTE_SECURITY
3582 static const struct rte_security_capability *
3583 dpaa2_sec_capabilities_get(void *device __rte_unused)
3585 return dpaa2_sec_security_cap;
3588 static const struct rte_security_ops dpaa2_sec_security_ops = {
3589 .session_create = dpaa2_sec_security_session_create,
3590 .session_update = NULL,
3591 .session_stats_get = NULL,
3592 .session_destroy = dpaa2_sec_security_session_destroy,
3593 .set_pkt_metadata = NULL,
3594 .capabilities_get = dpaa2_sec_capabilities_get
3599 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3601 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3603 rte_free(dev->security_ctx);
3605 rte_mempool_free(internals->fle_pool);
3607 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3608 dev->data->name, rte_socket_id());
3614 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3616 struct dpaa2_sec_dev_private *internals;
3617 struct rte_device *dev = cryptodev->device;
3618 struct rte_dpaa2_device *dpaa2_dev;
3619 #ifdef RTE_LIBRTE_SECURITY
3620 struct rte_security_ctx *security_instance;
3622 struct fsl_mc_io *dpseci;
3624 struct dpseci_attr attr;
3628 PMD_INIT_FUNC_TRACE();
3629 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3630 if (dpaa2_dev == NULL) {
3631 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3634 hw_id = dpaa2_dev->object_id;
3636 cryptodev->driver_id = cryptodev_driver_id;
3637 cryptodev->dev_ops = &crypto_ops;
3639 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3640 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3641 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3642 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3643 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3644 RTE_CRYPTODEV_FF_SECURITY |
3645 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3646 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3647 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3648 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3649 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3651 internals = cryptodev->data->dev_private;
3654 * For secondary processes, we don't initialise any further as primary
3655 * has already done this work. Only check we don't need a different
3658 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3659 DPAA2_SEC_DEBUG("Device already init by primary process");
3662 #ifdef RTE_LIBRTE_SECURITY
3663 /* Initialize security_ctx only for primary process*/
3664 security_instance = rte_malloc("rte_security_instances_ops",
3665 sizeof(struct rte_security_ctx), 0);
3666 if (security_instance == NULL)
3668 security_instance->device = (void *)cryptodev;
3669 security_instance->ops = &dpaa2_sec_security_ops;
3670 security_instance->sess_cnt = 0;
3671 cryptodev->security_ctx = security_instance;
3673 /*Open the rte device via MC and save the handle for further use*/
3674 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3675 sizeof(struct fsl_mc_io), 0);
3678 "Error in allocating the memory for dpsec object");
3681 dpseci->regs = rte_mcp_ptr_list[0];
3683 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3685 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3689 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3692 "Cannot get dpsec device attributed: Error = %x",
3696 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3699 internals->max_nb_queue_pairs = attr.num_tx_queues;
3700 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3701 internals->hw = dpseci;
3702 internals->token = token;
3704 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3705 getpid(), cryptodev->data->dev_id);
3706 internals->fle_pool = rte_mempool_create((const char *)str,
3709 FLE_POOL_CACHE_SIZE, 0,
3710 NULL, NULL, NULL, NULL,
3712 if (!internals->fle_pool) {
3713 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3717 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3721 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3723 /* dpaa2_sec_uninit(crypto_dev_name); */
3728 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3729 struct rte_dpaa2_device *dpaa2_dev)
3731 struct rte_cryptodev *cryptodev;
3732 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3736 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3737 dpaa2_dev->object_id);
3739 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3740 if (cryptodev == NULL)
3743 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3744 cryptodev->data->dev_private = rte_zmalloc_socket(
3745 "cryptodev private structure",
3746 sizeof(struct dpaa2_sec_dev_private),
3747 RTE_CACHE_LINE_SIZE,
3750 if (cryptodev->data->dev_private == NULL)
3751 rte_panic("Cannot allocate memzone for private "
3755 dpaa2_dev->cryptodev = cryptodev;
3756 cryptodev->device = &dpaa2_dev->device;
3758 /* init user callbacks */
3759 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3761 if (dpaa2_svr_family == SVR_LX2160A)
3762 rta_set_sec_era(RTA_SEC_ERA_10);
3764 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3766 /* Invoke PMD device initialization function */
3767 retval = dpaa2_sec_dev_init(cryptodev);
3771 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3772 rte_free(cryptodev->data->dev_private);
3774 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3780 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3782 struct rte_cryptodev *cryptodev;
3785 cryptodev = dpaa2_dev->cryptodev;
3786 if (cryptodev == NULL)
3789 ret = dpaa2_sec_uninit(cryptodev);
3793 return rte_cryptodev_pmd_destroy(cryptodev);
3796 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3797 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3798 .drv_type = DPAA2_CRYPTO,
3800 .name = "DPAA2 SEC PMD"
3802 .probe = cryptodev_dpaa2_sec_probe,
3803 .remove = cryptodev_dpaa2_sec_remove,
3806 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3808 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3809 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3810 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3812 RTE_INIT(dpaa2_sec_init_log)
3814 /* Bus level logs */
3815 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3816 if (dpaa2_logtype_sec >= 0)
3817 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);