1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
37 typedef uint64_t dma_addr_t;
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45 * a pointer to the shared descriptor
47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID 0x1957
49 #define FSL_DEVICE_ID 0x410
50 #define FSL_SUBSYSTEM_SEC 1
51 #define FSL_MC_DPSECI_DEVID 3
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS 32000
56 #define FLE_POOL_BUF_SIZE 256
57 #define FLE_POOL_CACHE_SIZE 512
58 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND -114
60 #define SEC_FLC_DHR_INBOUND 0
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
64 static uint8_t cryptodev_driver_id;
66 int dpaa2_logtype_sec;
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 struct rte_crypto_op *op,
71 struct qbman_fd *fd, uint16_t bpid)
73 struct rte_crypto_sym_op *sym_op = op->sym;
74 struct ctxt_priv *priv = sess->ctxt;
75 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 struct sec_flow_context *flc;
77 struct rte_mbuf *mbuf;
78 uint32_t in_len = 0, out_len = 0;
85 /* first FLE entry used to store mbuf and session ctxt */
86 fle = (struct qbman_fle *)rte_malloc(NULL,
87 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
90 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
93 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
97 /* Save the shared descriptor */
98 flc = &priv->flc_desc[0].flc;
104 if (likely(bpid < MAX_BPID)) {
105 DPAA2_SET_FD_BPID(fd, bpid);
106 DPAA2_SET_FLE_BPID(op_fle, bpid);
107 DPAA2_SET_FLE_BPID(ip_fle, bpid);
109 DPAA2_SET_FD_IVP(fd);
110 DPAA2_SET_FLE_IVP(op_fle);
111 DPAA2_SET_FLE_IVP(ip_fle);
114 /* Configure FD as a FRAME LIST */
115 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116 DPAA2_SET_FD_COMPOUND_FMT(fd);
117 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
119 /* Configure Output FLE with Scatter/Gather Entry */
120 DPAA2_SET_FLE_SG_EXT(op_fle);
121 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
123 /* Configure Output SGE for Encap/Decap */
124 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
128 sge->length = mbuf->data_len;
129 out_len += sge->length;
132 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
135 /* using buf_len for last buf - so that extra data can be added */
136 sge->length = mbuf->buf_len - mbuf->data_off;
137 out_len += sge->length;
139 DPAA2_SET_FLE_FIN(sge);
140 op_fle->length = out_len;
143 mbuf = sym_op->m_src;
145 /* Configure Input FLE with Scatter/Gather Entry */
146 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147 DPAA2_SET_FLE_SG_EXT(ip_fle);
148 DPAA2_SET_FLE_FIN(ip_fle);
150 /* Configure input SGE for Encap/Decap */
151 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 sge->length = mbuf->data_len;
154 in_len += sge->length;
160 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162 sge->length = mbuf->data_len;
163 in_len += sge->length;
166 ip_fle->length = in_len;
167 DPAA2_SET_FLE_FIN(sge);
169 /* In case of PDCP, per packet HFN is stored in
170 * mbuf priv after sym_op.
172 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
174 /*enable HFN override override */
175 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
176 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
177 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
179 DPAA2_SET_FD_LEN(fd, ip_fle->length);
185 build_proto_compound_fd(dpaa2_sec_session *sess,
186 struct rte_crypto_op *op,
187 struct qbman_fd *fd, uint16_t bpid)
189 struct rte_crypto_sym_op *sym_op = op->sym;
190 struct ctxt_priv *priv = sess->ctxt;
191 struct qbman_fle *fle, *ip_fle, *op_fle;
192 struct sec_flow_context *flc;
193 struct rte_mbuf *src_mbuf = sym_op->m_src;
194 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
200 /* Save the shared descriptor */
201 flc = &priv->flc_desc[0].flc;
203 /* we are using the first FLE entry to store Mbuf */
204 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
206 DPAA2_SEC_DP_ERR("Memory alloc failed");
209 memset(fle, 0, FLE_POOL_BUF_SIZE);
210 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
211 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
216 if (likely(bpid < MAX_BPID)) {
217 DPAA2_SET_FD_BPID(fd, bpid);
218 DPAA2_SET_FLE_BPID(op_fle, bpid);
219 DPAA2_SET_FLE_BPID(ip_fle, bpid);
221 DPAA2_SET_FD_IVP(fd);
222 DPAA2_SET_FLE_IVP(op_fle);
223 DPAA2_SET_FLE_IVP(ip_fle);
226 /* Configure FD as a FRAME LIST */
227 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
228 DPAA2_SET_FD_COMPOUND_FMT(fd);
229 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
231 /* Configure Output FLE with dst mbuf data */
232 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
233 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
234 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
236 /* Configure Input FLE with src mbuf data */
237 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
238 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
239 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
241 DPAA2_SET_FD_LEN(fd, ip_fle->length);
242 DPAA2_SET_FLE_FIN(ip_fle);
244 /* In case of PDCP, per packet HFN is stored in
245 * mbuf priv after sym_op.
247 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
248 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
249 /*enable HFN override override */
250 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
251 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
252 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
260 build_proto_fd(dpaa2_sec_session *sess,
261 struct rte_crypto_op *op,
262 struct qbman_fd *fd, uint16_t bpid)
264 struct rte_crypto_sym_op *sym_op = op->sym;
266 return build_proto_compound_fd(sess, op, fd, bpid);
268 struct ctxt_priv *priv = sess->ctxt;
269 struct sec_flow_context *flc;
270 struct rte_mbuf *mbuf = sym_op->m_src;
272 if (likely(bpid < MAX_BPID))
273 DPAA2_SET_FD_BPID(fd, bpid);
275 DPAA2_SET_FD_IVP(fd);
277 /* Save the shared descriptor */
278 flc = &priv->flc_desc[0].flc;
280 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
281 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
282 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
283 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
285 /* save physical address of mbuf */
286 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
287 mbuf->buf_iova = (size_t)op;
293 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
294 struct rte_crypto_op *op,
295 struct qbman_fd *fd, __rte_unused uint16_t bpid)
297 struct rte_crypto_sym_op *sym_op = op->sym;
298 struct ctxt_priv *priv = sess->ctxt;
299 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
300 struct sec_flow_context *flc;
301 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
302 int icv_len = sess->digest_length;
304 struct rte_mbuf *mbuf;
305 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
309 mbuf = sym_op->m_dst;
311 mbuf = sym_op->m_src;
313 /* first FLE entry used to store mbuf and session ctxt */
314 fle = (struct qbman_fle *)rte_malloc(NULL,
315 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
316 RTE_CACHE_LINE_SIZE);
317 if (unlikely(!fle)) {
318 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
321 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
322 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
323 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
329 /* Save the shared descriptor */
330 flc = &priv->flc_desc[0].flc;
332 /* Configure FD as a FRAME LIST */
333 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
334 DPAA2_SET_FD_COMPOUND_FMT(fd);
335 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
337 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
338 "iv-len=%d data_off: 0x%x\n",
339 sym_op->aead.data.offset,
340 sym_op->aead.data.length,
343 sym_op->m_src->data_off);
345 /* Configure Output FLE with Scatter/Gather Entry */
346 DPAA2_SET_FLE_SG_EXT(op_fle);
347 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
350 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
352 op_fle->length = (sess->dir == DIR_ENC) ?
353 (sym_op->aead.data.length + icv_len) :
354 sym_op->aead.data.length;
356 /* Configure Output SGE for Encap/Decap */
357 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
358 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
359 sge->length = mbuf->data_len - sym_op->aead.data.offset;
365 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
366 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
367 sge->length = mbuf->data_len;
370 sge->length -= icv_len;
372 if (sess->dir == DIR_ENC) {
374 DPAA2_SET_FLE_ADDR(sge,
375 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
376 sge->length = icv_len;
378 DPAA2_SET_FLE_FIN(sge);
381 mbuf = sym_op->m_src;
383 /* Configure Input FLE with Scatter/Gather Entry */
384 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
385 DPAA2_SET_FLE_SG_EXT(ip_fle);
386 DPAA2_SET_FLE_FIN(ip_fle);
387 ip_fle->length = (sess->dir == DIR_ENC) ?
388 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
389 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
392 /* Configure Input SGE for Encap/Decap */
393 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
394 sge->length = sess->iv.length;
398 DPAA2_SET_FLE_ADDR(sge,
399 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
400 sge->length = auth_only_len;
404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
405 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
407 sge->length = mbuf->data_len - sym_op->aead.data.offset;
413 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
414 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
415 sge->length = mbuf->data_len;
419 if (sess->dir == DIR_DEC) {
421 old_icv = (uint8_t *)(sge + 1);
422 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
423 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
424 sge->length = icv_len;
427 DPAA2_SET_FLE_FIN(sge);
429 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
430 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
432 DPAA2_SET_FD_LEN(fd, ip_fle->length);
438 build_authenc_gcm_fd(dpaa2_sec_session *sess,
439 struct rte_crypto_op *op,
440 struct qbman_fd *fd, uint16_t bpid)
442 struct rte_crypto_sym_op *sym_op = op->sym;
443 struct ctxt_priv *priv = sess->ctxt;
444 struct qbman_fle *fle, *sge;
445 struct sec_flow_context *flc;
446 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
447 int icv_len = sess->digest_length, retval;
449 struct rte_mbuf *dst;
450 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
458 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
459 * Currently we donot know which FLE has the mbuf stored.
460 * So while retreiving we can go back 1 FLE from the FD -ADDR
461 * to get the MBUF Addr from the previous FLE.
462 * We can have a better approach to use the inline Mbuf
464 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
466 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
469 memset(fle, 0, FLE_POOL_BUF_SIZE);
470 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
471 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
474 if (likely(bpid < MAX_BPID)) {
475 DPAA2_SET_FD_BPID(fd, bpid);
476 DPAA2_SET_FLE_BPID(fle, bpid);
477 DPAA2_SET_FLE_BPID(fle + 1, bpid);
478 DPAA2_SET_FLE_BPID(sge, bpid);
479 DPAA2_SET_FLE_BPID(sge + 1, bpid);
480 DPAA2_SET_FLE_BPID(sge + 2, bpid);
481 DPAA2_SET_FLE_BPID(sge + 3, bpid);
483 DPAA2_SET_FD_IVP(fd);
484 DPAA2_SET_FLE_IVP(fle);
485 DPAA2_SET_FLE_IVP((fle + 1));
486 DPAA2_SET_FLE_IVP(sge);
487 DPAA2_SET_FLE_IVP((sge + 1));
488 DPAA2_SET_FLE_IVP((sge + 2));
489 DPAA2_SET_FLE_IVP((sge + 3));
492 /* Save the shared descriptor */
493 flc = &priv->flc_desc[0].flc;
494 /* Configure FD as a FRAME LIST */
495 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
496 DPAA2_SET_FD_COMPOUND_FMT(fd);
497 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
499 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
500 "iv-len=%d data_off: 0x%x\n",
501 sym_op->aead.data.offset,
502 sym_op->aead.data.length,
505 sym_op->m_src->data_off);
507 /* Configure Output FLE with Scatter/Gather Entry */
508 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
510 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
511 fle->length = (sess->dir == DIR_ENC) ?
512 (sym_op->aead.data.length + icv_len) :
513 sym_op->aead.data.length;
515 DPAA2_SET_FLE_SG_EXT(fle);
517 /* Configure Output SGE for Encap/Decap */
518 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
519 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
520 sge->length = sym_op->aead.data.length;
522 if (sess->dir == DIR_ENC) {
524 DPAA2_SET_FLE_ADDR(sge,
525 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
526 sge->length = sess->digest_length;
528 DPAA2_SET_FLE_FIN(sge);
533 /* Configure Input FLE with Scatter/Gather Entry */
534 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
535 DPAA2_SET_FLE_SG_EXT(fle);
536 DPAA2_SET_FLE_FIN(fle);
537 fle->length = (sess->dir == DIR_ENC) ?
538 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
539 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
540 sess->digest_length);
542 /* Configure Input SGE for Encap/Decap */
543 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
544 sge->length = sess->iv.length;
547 DPAA2_SET_FLE_ADDR(sge,
548 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
549 sge->length = auth_only_len;
550 DPAA2_SET_FLE_BPID(sge, bpid);
554 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
555 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
556 sym_op->m_src->data_off);
557 sge->length = sym_op->aead.data.length;
558 if (sess->dir == DIR_DEC) {
560 old_icv = (uint8_t *)(sge + 1);
561 memcpy(old_icv, sym_op->aead.digest.data,
562 sess->digest_length);
563 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
564 sge->length = sess->digest_length;
566 DPAA2_SET_FLE_FIN(sge);
569 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
570 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
573 DPAA2_SET_FD_LEN(fd, fle->length);
578 build_authenc_sg_fd(dpaa2_sec_session *sess,
579 struct rte_crypto_op *op,
580 struct qbman_fd *fd, __rte_unused uint16_t bpid)
582 struct rte_crypto_sym_op *sym_op = op->sym;
583 struct ctxt_priv *priv = sess->ctxt;
584 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
585 struct sec_flow_context *flc;
586 uint32_t auth_only_len = sym_op->auth.data.length -
587 sym_op->cipher.data.length;
588 int icv_len = sess->digest_length;
590 struct rte_mbuf *mbuf;
591 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
595 mbuf = sym_op->m_dst;
597 mbuf = sym_op->m_src;
599 /* first FLE entry used to store mbuf and session ctxt */
600 fle = (struct qbman_fle *)rte_malloc(NULL,
601 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
602 RTE_CACHE_LINE_SIZE);
603 if (unlikely(!fle)) {
604 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
607 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
608 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
609 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
615 /* Save the shared descriptor */
616 flc = &priv->flc_desc[0].flc;
618 /* Configure FD as a FRAME LIST */
619 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
620 DPAA2_SET_FD_COMPOUND_FMT(fd);
621 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
624 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
625 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
626 sym_op->auth.data.offset,
627 sym_op->auth.data.length,
629 sym_op->cipher.data.offset,
630 sym_op->cipher.data.length,
632 sym_op->m_src->data_off);
634 /* Configure Output FLE with Scatter/Gather Entry */
635 DPAA2_SET_FLE_SG_EXT(op_fle);
636 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
639 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
641 op_fle->length = (sess->dir == DIR_ENC) ?
642 (sym_op->cipher.data.length + icv_len) :
643 sym_op->cipher.data.length;
645 /* Configure Output SGE for Encap/Decap */
646 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
647 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
648 sge->length = mbuf->data_len - sym_op->auth.data.offset;
654 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
655 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
656 sge->length = mbuf->data_len;
659 sge->length -= icv_len;
661 if (sess->dir == DIR_ENC) {
663 DPAA2_SET_FLE_ADDR(sge,
664 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
665 sge->length = icv_len;
667 DPAA2_SET_FLE_FIN(sge);
670 mbuf = sym_op->m_src;
672 /* Configure Input FLE with Scatter/Gather Entry */
673 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
674 DPAA2_SET_FLE_SG_EXT(ip_fle);
675 DPAA2_SET_FLE_FIN(ip_fle);
676 ip_fle->length = (sess->dir == DIR_ENC) ?
677 (sym_op->auth.data.length + sess->iv.length) :
678 (sym_op->auth.data.length + sess->iv.length +
681 /* Configure Input SGE for Encap/Decap */
682 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
683 sge->length = sess->iv.length;
686 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
687 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
689 sge->length = mbuf->data_len - sym_op->auth.data.offset;
695 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
696 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
697 sge->length = mbuf->data_len;
700 sge->length -= icv_len;
702 if (sess->dir == DIR_DEC) {
704 old_icv = (uint8_t *)(sge + 1);
705 memcpy(old_icv, sym_op->auth.digest.data,
707 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
708 sge->length = icv_len;
711 DPAA2_SET_FLE_FIN(sge);
713 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
714 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
716 DPAA2_SET_FD_LEN(fd, ip_fle->length);
722 build_authenc_fd(dpaa2_sec_session *sess,
723 struct rte_crypto_op *op,
724 struct qbman_fd *fd, uint16_t bpid)
726 struct rte_crypto_sym_op *sym_op = op->sym;
727 struct ctxt_priv *priv = sess->ctxt;
728 struct qbman_fle *fle, *sge;
729 struct sec_flow_context *flc;
730 uint32_t auth_only_len = sym_op->auth.data.length -
731 sym_op->cipher.data.length;
732 int icv_len = sess->digest_length, retval;
734 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
736 struct rte_mbuf *dst;
743 /* we are using the first FLE entry to store Mbuf.
744 * Currently we donot know which FLE has the mbuf stored.
745 * So while retreiving we can go back 1 FLE from the FD -ADDR
746 * to get the MBUF Addr from the previous FLE.
747 * We can have a better approach to use the inline Mbuf
749 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
751 DPAA2_SEC_ERR("Memory alloc failed for SGE");
754 memset(fle, 0, FLE_POOL_BUF_SIZE);
755 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
756 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
759 if (likely(bpid < MAX_BPID)) {
760 DPAA2_SET_FD_BPID(fd, bpid);
761 DPAA2_SET_FLE_BPID(fle, bpid);
762 DPAA2_SET_FLE_BPID(fle + 1, bpid);
763 DPAA2_SET_FLE_BPID(sge, bpid);
764 DPAA2_SET_FLE_BPID(sge + 1, bpid);
765 DPAA2_SET_FLE_BPID(sge + 2, bpid);
766 DPAA2_SET_FLE_BPID(sge + 3, bpid);
768 DPAA2_SET_FD_IVP(fd);
769 DPAA2_SET_FLE_IVP(fle);
770 DPAA2_SET_FLE_IVP((fle + 1));
771 DPAA2_SET_FLE_IVP(sge);
772 DPAA2_SET_FLE_IVP((sge + 1));
773 DPAA2_SET_FLE_IVP((sge + 2));
774 DPAA2_SET_FLE_IVP((sge + 3));
777 /* Save the shared descriptor */
778 flc = &priv->flc_desc[0].flc;
779 /* Configure FD as a FRAME LIST */
780 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
781 DPAA2_SET_FD_COMPOUND_FMT(fd);
782 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
785 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
786 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
787 sym_op->auth.data.offset,
788 sym_op->auth.data.length,
790 sym_op->cipher.data.offset,
791 sym_op->cipher.data.length,
793 sym_op->m_src->data_off);
795 /* Configure Output FLE with Scatter/Gather Entry */
796 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
798 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
799 fle->length = (sess->dir == DIR_ENC) ?
800 (sym_op->cipher.data.length + icv_len) :
801 sym_op->cipher.data.length;
803 DPAA2_SET_FLE_SG_EXT(fle);
805 /* Configure Output SGE for Encap/Decap */
806 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
807 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
809 sge->length = sym_op->cipher.data.length;
811 if (sess->dir == DIR_ENC) {
813 DPAA2_SET_FLE_ADDR(sge,
814 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
815 sge->length = sess->digest_length;
816 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
819 DPAA2_SET_FLE_FIN(sge);
824 /* Configure Input FLE with Scatter/Gather Entry */
825 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
826 DPAA2_SET_FLE_SG_EXT(fle);
827 DPAA2_SET_FLE_FIN(fle);
828 fle->length = (sess->dir == DIR_ENC) ?
829 (sym_op->auth.data.length + sess->iv.length) :
830 (sym_op->auth.data.length + sess->iv.length +
831 sess->digest_length);
833 /* Configure Input SGE for Encap/Decap */
834 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
835 sge->length = sess->iv.length;
838 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
839 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
840 sym_op->m_src->data_off);
841 sge->length = sym_op->auth.data.length;
842 if (sess->dir == DIR_DEC) {
844 old_icv = (uint8_t *)(sge + 1);
845 memcpy(old_icv, sym_op->auth.digest.data,
846 sess->digest_length);
847 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
848 sge->length = sess->digest_length;
849 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
850 sess->digest_length +
853 DPAA2_SET_FLE_FIN(sge);
855 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
856 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
861 static inline int build_auth_sg_fd(
862 dpaa2_sec_session *sess,
863 struct rte_crypto_op *op,
865 __rte_unused uint16_t bpid)
867 struct rte_crypto_sym_op *sym_op = op->sym;
868 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
869 struct sec_flow_context *flc;
870 struct ctxt_priv *priv = sess->ctxt;
871 int data_len, data_offset;
873 struct rte_mbuf *mbuf;
875 data_len = sym_op->auth.data.length;
876 data_offset = sym_op->auth.data.offset;
878 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
879 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
880 if ((data_len & 7) || (data_offset & 7)) {
881 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
885 data_len = data_len >> 3;
886 data_offset = data_offset >> 3;
889 mbuf = sym_op->m_src;
890 fle = (struct qbman_fle *)rte_malloc(NULL,
891 FLE_SG_MEM_SIZE(mbuf->nb_segs),
892 RTE_CACHE_LINE_SIZE);
893 if (unlikely(!fle)) {
894 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
897 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
898 /* first FLE entry used to store mbuf and session ctxt */
899 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
900 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
905 flc = &priv->flc_desc[DESC_INITFINAL].flc;
907 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
908 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
909 DPAA2_SET_FD_COMPOUND_FMT(fd);
912 DPAA2_SET_FLE_ADDR(op_fle,
913 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
914 op_fle->length = sess->digest_length;
917 DPAA2_SET_FLE_SG_EXT(ip_fle);
918 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
919 ip_fle->length = data_len;
921 if (sess->iv.length) {
924 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
927 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
928 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
930 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
931 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
934 sge->length = sess->iv.length;
936 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
937 ip_fle->length += sge->length;
941 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
942 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
944 if (data_len <= (mbuf->data_len - data_offset)) {
945 sge->length = data_len;
948 sge->length = mbuf->data_len - data_offset;
950 /* remaining i/p segs */
951 while ((data_len = data_len - sge->length) &&
952 (mbuf = mbuf->next)) {
954 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
955 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
956 if (data_len > mbuf->data_len)
957 sge->length = mbuf->data_len;
959 sge->length = data_len;
963 if (sess->dir == DIR_DEC) {
964 /* Digest verification case */
966 old_digest = (uint8_t *)(sge + 1);
967 rte_memcpy(old_digest, sym_op->auth.digest.data,
968 sess->digest_length);
969 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
970 sge->length = sess->digest_length;
971 ip_fle->length += sess->digest_length;
973 DPAA2_SET_FLE_FIN(sge);
974 DPAA2_SET_FLE_FIN(ip_fle);
975 DPAA2_SET_FD_LEN(fd, ip_fle->length);
981 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
982 struct qbman_fd *fd, uint16_t bpid)
984 struct rte_crypto_sym_op *sym_op = op->sym;
985 struct qbman_fle *fle, *sge;
986 struct sec_flow_context *flc;
987 struct ctxt_priv *priv = sess->ctxt;
988 int data_len, data_offset;
992 data_len = sym_op->auth.data.length;
993 data_offset = sym_op->auth.data.offset;
995 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
996 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
997 if ((data_len & 7) || (data_offset & 7)) {
998 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1002 data_len = data_len >> 3;
1003 data_offset = data_offset >> 3;
1006 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1008 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1011 memset(fle, 0, FLE_POOL_BUF_SIZE);
1012 /* TODO we are using the first FLE entry to store Mbuf.
1013 * Currently we donot know which FLE has the mbuf stored.
1014 * So while retreiving we can go back 1 FLE from the FD -ADDR
1015 * to get the MBUF Addr from the previous FLE.
1016 * We can have a better approach to use the inline Mbuf
1018 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1019 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1023 if (likely(bpid < MAX_BPID)) {
1024 DPAA2_SET_FD_BPID(fd, bpid);
1025 DPAA2_SET_FLE_BPID(fle, bpid);
1026 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1027 DPAA2_SET_FLE_BPID(sge, bpid);
1028 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1030 DPAA2_SET_FD_IVP(fd);
1031 DPAA2_SET_FLE_IVP(fle);
1032 DPAA2_SET_FLE_IVP((fle + 1));
1033 DPAA2_SET_FLE_IVP(sge);
1034 DPAA2_SET_FLE_IVP((sge + 1));
1037 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1038 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1039 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1040 DPAA2_SET_FD_COMPOUND_FMT(fd);
1042 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1043 fle->length = sess->digest_length;
1046 /* Setting input FLE */
1047 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1048 DPAA2_SET_FLE_SG_EXT(fle);
1049 fle->length = data_len;
1051 if (sess->iv.length) {
1054 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1057 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1058 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1060 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1061 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1064 sge->length = sess->iv.length;
1067 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1068 fle->length = fle->length + sge->length;
1072 /* Setting data to authenticate */
1073 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1074 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1075 sge->length = data_len;
1077 if (sess->dir == DIR_DEC) {
1079 old_digest = (uint8_t *)(sge + 1);
1080 rte_memcpy(old_digest, sym_op->auth.digest.data,
1081 sess->digest_length);
1082 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1083 sge->length = sess->digest_length;
1084 fle->length = fle->length + sess->digest_length;
1087 DPAA2_SET_FLE_FIN(sge);
1088 DPAA2_SET_FLE_FIN(fle);
1089 DPAA2_SET_FD_LEN(fd, fle->length);
1095 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1096 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1098 struct rte_crypto_sym_op *sym_op = op->sym;
1099 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1100 int data_len, data_offset;
1101 struct sec_flow_context *flc;
1102 struct ctxt_priv *priv = sess->ctxt;
1103 struct rte_mbuf *mbuf;
1104 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1107 data_len = sym_op->cipher.data.length;
1108 data_offset = sym_op->cipher.data.offset;
1110 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1111 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1112 if ((data_len & 7) || (data_offset & 7)) {
1113 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1117 data_len = data_len >> 3;
1118 data_offset = data_offset >> 3;
1122 mbuf = sym_op->m_dst;
1124 mbuf = sym_op->m_src;
1126 /* first FLE entry used to store mbuf and session ctxt */
1127 fle = (struct qbman_fle *)rte_malloc(NULL,
1128 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1129 RTE_CACHE_LINE_SIZE);
1131 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1134 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1135 /* first FLE entry used to store mbuf and session ctxt */
1136 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1137 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1143 flc = &priv->flc_desc[0].flc;
1146 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1147 " data_off: 0x%x\n",
1151 sym_op->m_src->data_off);
1154 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1155 op_fle->length = data_len;
1156 DPAA2_SET_FLE_SG_EXT(op_fle);
1159 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1160 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1161 sge->length = mbuf->data_len - data_offset;
1167 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1168 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1169 sge->length = mbuf->data_len;
1172 DPAA2_SET_FLE_FIN(sge);
1175 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1176 flc, fle, fle->addr_hi, fle->addr_lo,
1180 mbuf = sym_op->m_src;
1182 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1183 ip_fle->length = sess->iv.length + data_len;
1184 DPAA2_SET_FLE_SG_EXT(ip_fle);
1187 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1188 DPAA2_SET_FLE_OFFSET(sge, 0);
1189 sge->length = sess->iv.length;
1194 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1195 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1196 sge->length = mbuf->data_len - data_offset;
1202 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1203 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1204 sge->length = mbuf->data_len;
1207 DPAA2_SET_FLE_FIN(sge);
1208 DPAA2_SET_FLE_FIN(ip_fle);
1211 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1212 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1213 DPAA2_SET_FD_COMPOUND_FMT(fd);
1214 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1217 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1218 " off =%d, len =%d\n",
1219 DPAA2_GET_FD_ADDR(fd),
1220 DPAA2_GET_FD_BPID(fd),
1221 rte_dpaa2_bpid_info[bpid].meta_data_size,
1222 DPAA2_GET_FD_OFFSET(fd),
1223 DPAA2_GET_FD_LEN(fd));
1228 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1229 struct qbman_fd *fd, uint16_t bpid)
1231 struct rte_crypto_sym_op *sym_op = op->sym;
1232 struct qbman_fle *fle, *sge;
1233 int retval, data_len, data_offset;
1234 struct sec_flow_context *flc;
1235 struct ctxt_priv *priv = sess->ctxt;
1236 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1238 struct rte_mbuf *dst;
1240 data_len = sym_op->cipher.data.length;
1241 data_offset = sym_op->cipher.data.offset;
1243 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1244 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1245 if ((data_len & 7) || (data_offset & 7)) {
1246 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1250 data_len = data_len >> 3;
1251 data_offset = data_offset >> 3;
1255 dst = sym_op->m_dst;
1257 dst = sym_op->m_src;
1259 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1261 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1264 memset(fle, 0, FLE_POOL_BUF_SIZE);
1265 /* TODO we are using the first FLE entry to store Mbuf.
1266 * Currently we donot know which FLE has the mbuf stored.
1267 * So while retreiving we can go back 1 FLE from the FD -ADDR
1268 * to get the MBUF Addr from the previous FLE.
1269 * We can have a better approach to use the inline Mbuf
1271 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1272 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1276 if (likely(bpid < MAX_BPID)) {
1277 DPAA2_SET_FD_BPID(fd, bpid);
1278 DPAA2_SET_FLE_BPID(fle, bpid);
1279 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1280 DPAA2_SET_FLE_BPID(sge, bpid);
1281 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1283 DPAA2_SET_FD_IVP(fd);
1284 DPAA2_SET_FLE_IVP(fle);
1285 DPAA2_SET_FLE_IVP((fle + 1));
1286 DPAA2_SET_FLE_IVP(sge);
1287 DPAA2_SET_FLE_IVP((sge + 1));
1290 flc = &priv->flc_desc[0].flc;
1291 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1292 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1293 DPAA2_SET_FD_COMPOUND_FMT(fd);
1294 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1297 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1298 " data_off: 0x%x\n",
1302 sym_op->m_src->data_off);
1304 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1305 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1307 fle->length = data_len + sess->iv.length;
1310 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1311 flc, fle, fle->addr_hi, fle->addr_lo,
1316 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1317 fle->length = data_len + sess->iv.length;
1319 DPAA2_SET_FLE_SG_EXT(fle);
1321 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1322 sge->length = sess->iv.length;
1325 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1326 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1328 sge->length = data_len;
1329 DPAA2_SET_FLE_FIN(sge);
1330 DPAA2_SET_FLE_FIN(fle);
1333 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1334 " off =%d, len =%d\n",
1335 DPAA2_GET_FD_ADDR(fd),
1336 DPAA2_GET_FD_BPID(fd),
1337 rte_dpaa2_bpid_info[bpid].meta_data_size,
1338 DPAA2_GET_FD_OFFSET(fd),
1339 DPAA2_GET_FD_LEN(fd));
1345 build_sec_fd(struct rte_crypto_op *op,
1346 struct qbman_fd *fd, uint16_t bpid)
1349 dpaa2_sec_session *sess;
1351 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1352 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1353 op->sym->session, cryptodev_driver_id);
1354 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1355 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1356 op->sym->sec_session);
1360 /* Any of the buffer is segmented*/
1361 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1362 ((op->sym->m_dst != NULL) &&
1363 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1364 switch (sess->ctxt_type) {
1365 case DPAA2_SEC_CIPHER:
1366 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1368 case DPAA2_SEC_AUTH:
1369 ret = build_auth_sg_fd(sess, op, fd, bpid);
1371 case DPAA2_SEC_AEAD:
1372 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1374 case DPAA2_SEC_CIPHER_HASH:
1375 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1377 case DPAA2_SEC_IPSEC:
1378 case DPAA2_SEC_PDCP:
1379 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1381 case DPAA2_SEC_HASH_CIPHER:
1383 DPAA2_SEC_ERR("error: Unsupported session");
1386 switch (sess->ctxt_type) {
1387 case DPAA2_SEC_CIPHER:
1388 ret = build_cipher_fd(sess, op, fd, bpid);
1390 case DPAA2_SEC_AUTH:
1391 ret = build_auth_fd(sess, op, fd, bpid);
1393 case DPAA2_SEC_AEAD:
1394 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1396 case DPAA2_SEC_CIPHER_HASH:
1397 ret = build_authenc_fd(sess, op, fd, bpid);
1399 case DPAA2_SEC_IPSEC:
1400 ret = build_proto_fd(sess, op, fd, bpid);
1402 case DPAA2_SEC_PDCP:
1403 ret = build_proto_compound_fd(sess, op, fd, bpid);
1405 case DPAA2_SEC_HASH_CIPHER:
1407 DPAA2_SEC_ERR("error: Unsupported session");
1414 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1417 /* Function to transmit the frames to given device and VQ*/
1420 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1421 uint32_t frames_to_send;
1422 struct qbman_eq_desc eqdesc;
1423 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1424 struct qbman_swp *swp;
1425 uint16_t num_tx = 0;
1426 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1427 /*todo - need to support multiple buffer pools */
1429 struct rte_mempool *mb_pool;
1431 if (unlikely(nb_ops == 0))
1434 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1435 DPAA2_SEC_ERR("sessionless crypto op not supported");
1438 /*Prepare enqueue descriptor*/
1439 qbman_eq_desc_clear(&eqdesc);
1440 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1441 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1442 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1444 if (!DPAA2_PER_LCORE_DPIO) {
1445 ret = dpaa2_affine_qbman_swp();
1447 DPAA2_SEC_ERR("Failure in affining portal");
1451 swp = DPAA2_PER_LCORE_PORTAL;
1454 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1455 dpaa2_eqcr_size : nb_ops;
1457 for (loop = 0; loop < frames_to_send; loop++) {
1458 if ((*ops)->sym->m_src->seqn) {
1459 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1461 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1462 DPAA2_PER_LCORE_DQRR_SIZE--;
1463 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1464 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1467 /*Clear the unused FD fields before sending*/
1468 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1469 mb_pool = (*ops)->sym->m_src->pool;
1470 bpid = mempool_to_bpid(mb_pool);
1471 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1473 DPAA2_SEC_ERR("error: Improper packet contents"
1474 " for crypto operation");
1480 while (loop < frames_to_send) {
1481 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1484 frames_to_send - loop);
1487 num_tx += frames_to_send;
1488 nb_ops -= frames_to_send;
1491 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1492 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1496 static inline struct rte_crypto_op *
1497 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1499 struct rte_crypto_op *op;
1500 uint16_t len = DPAA2_GET_FD_LEN(fd);
1502 dpaa2_sec_session *sess_priv;
1504 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1505 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1506 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1508 diff = len - mbuf->pkt_len;
1509 mbuf->pkt_len += diff;
1510 mbuf->data_len += diff;
1511 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1512 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1513 op->sym->aead.digest.phys_addr = 0L;
1515 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1516 op->sym->sec_session);
1517 if (sess_priv->dir == DIR_ENC)
1518 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1520 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1525 static inline struct rte_crypto_op *
1526 sec_fd_to_mbuf(const struct qbman_fd *fd)
1528 struct qbman_fle *fle;
1529 struct rte_crypto_op *op;
1530 struct ctxt_priv *priv;
1531 struct rte_mbuf *dst, *src;
1533 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1534 return sec_simple_fd_to_mbuf(fd);
1536 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1538 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1539 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1541 /* we are using the first FLE entry to store Mbuf.
1542 * Currently we donot know which FLE has the mbuf stored.
1543 * So while retreiving we can go back 1 FLE from the FD -ADDR
1544 * to get the MBUF Addr from the previous FLE.
1545 * We can have a better approach to use the inline Mbuf
1548 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1549 /* TODO complete it. */
1550 DPAA2_SEC_ERR("error: non inline buffer");
1553 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1556 src = op->sym->m_src;
1559 if (op->sym->m_dst) {
1560 dst = op->sym->m_dst;
1565 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1566 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1567 get_sec_session_private_data(op->sym->sec_session);
1568 if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1569 sess->ctxt_type == DPAA2_SEC_PDCP) {
1570 uint16_t len = DPAA2_GET_FD_LEN(fd);
1572 while (dst->next != NULL) {
1573 len -= dst->data_len;
1576 dst->data_len = len;
1580 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1581 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1584 DPAA2_GET_FD_ADDR(fd),
1585 DPAA2_GET_FD_BPID(fd),
1586 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1587 DPAA2_GET_FD_OFFSET(fd),
1588 DPAA2_GET_FD_LEN(fd));
1590 /* free the fle memory */
1591 if (likely(rte_pktmbuf_is_contiguous(src))) {
1592 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1593 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1595 rte_free((void *)(fle-1));
1601 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1604 /* Function is responsible to receive frames for a given device and VQ*/
1605 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1606 struct qbman_result *dq_storage;
1607 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1608 int ret, num_rx = 0;
1609 uint8_t is_last = 0, status;
1610 struct qbman_swp *swp;
1611 const struct qbman_fd *fd;
1612 struct qbman_pull_desc pulldesc;
1614 if (!DPAA2_PER_LCORE_DPIO) {
1615 ret = dpaa2_affine_qbman_swp();
1617 DPAA2_SEC_ERR("Failure in affining portal");
1621 swp = DPAA2_PER_LCORE_PORTAL;
1622 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1624 qbman_pull_desc_clear(&pulldesc);
1625 qbman_pull_desc_set_numframes(&pulldesc,
1626 (nb_ops > dpaa2_dqrr_size) ?
1627 dpaa2_dqrr_size : nb_ops);
1628 qbman_pull_desc_set_fq(&pulldesc, fqid);
1629 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1630 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1633 /*Issue a volatile dequeue command. */
1635 if (qbman_swp_pull(swp, &pulldesc)) {
1637 "SEC VDQ command is not issued : QBMAN busy");
1638 /* Portal was busy, try again */
1644 /* Receive the packets till Last Dequeue entry is found with
1645 * respect to the above issues PULL command.
1648 /* Check if the previous issued command is completed.
1649 * Also seems like the SWP is shared between the Ethernet Driver
1650 * and the SEC driver.
1652 while (!qbman_check_command_complete(dq_storage))
1655 /* Loop until the dq_storage is updated with
1656 * new token by QBMAN
1658 while (!qbman_check_new_result(dq_storage))
1660 /* Check whether Last Pull command is Expired and
1661 * setting Condition for Loop termination
1663 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1665 /* Check for valid frame. */
1666 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1668 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1669 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1674 fd = qbman_result_DQ_fd(dq_storage);
1675 ops[num_rx] = sec_fd_to_mbuf(fd);
1677 if (unlikely(fd->simple.frc)) {
1678 /* TODO Parse SEC errors */
1679 DPAA2_SEC_ERR("SEC returned Error - %x",
1681 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1683 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1688 } /* End of Packet Rx loop */
1690 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1692 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1693 /*Return the total number of packets received to DPAA2 app*/
1697 /** Release queue pair */
1699 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1701 struct dpaa2_sec_qp *qp =
1702 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1704 PMD_INIT_FUNC_TRACE();
1706 if (qp->rx_vq.q_storage) {
1707 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1708 rte_free(qp->rx_vq.q_storage);
1712 dev->data->queue_pairs[queue_pair_id] = NULL;
1717 /** Setup a queue pair */
1719 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1720 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1721 __rte_unused int socket_id)
1723 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1724 struct dpaa2_sec_qp *qp;
1725 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1726 struct dpseci_rx_queue_cfg cfg;
1729 PMD_INIT_FUNC_TRACE();
1731 /* If qp is already in use free ring memory and qp metadata. */
1732 if (dev->data->queue_pairs[qp_id] != NULL) {
1733 DPAA2_SEC_INFO("QP already setup");
1737 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1738 dev, qp_id, qp_conf);
1740 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1742 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1743 RTE_CACHE_LINE_SIZE);
1745 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1749 qp->rx_vq.crypto_data = dev->data;
1750 qp->tx_vq.crypto_data = dev->data;
1751 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1752 sizeof(struct queue_storage_info_t),
1753 RTE_CACHE_LINE_SIZE);
1754 if (!qp->rx_vq.q_storage) {
1755 DPAA2_SEC_ERR("malloc failed for q_storage");
1758 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1760 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1761 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1765 dev->data->queue_pairs[qp_id] = qp;
1767 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1768 cfg.user_ctx = (size_t)(&qp->rx_vq);
1769 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1774 /** Return the number of allocated queue pairs */
1776 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1778 PMD_INIT_FUNC_TRACE();
1780 return dev->data->nb_queue_pairs;
1783 /** Returns the size of the aesni gcm session structure */
1785 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1787 PMD_INIT_FUNC_TRACE();
1789 return sizeof(dpaa2_sec_session);
1793 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1794 struct rte_crypto_sym_xform *xform,
1795 dpaa2_sec_session *session)
1797 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1798 struct alginfo cipherdata;
1800 struct ctxt_priv *priv;
1801 struct sec_flow_context *flc;
1803 PMD_INIT_FUNC_TRACE();
1805 /* For SEC CIPHER only one descriptor is required. */
1806 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1807 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1808 RTE_CACHE_LINE_SIZE);
1810 DPAA2_SEC_ERR("No Memory for priv CTXT");
1814 priv->fle_pool = dev_priv->fle_pool;
1816 flc = &priv->flc_desc[0].flc;
1818 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1819 RTE_CACHE_LINE_SIZE);
1820 if (session->cipher_key.data == NULL) {
1821 DPAA2_SEC_ERR("No Memory for cipher key");
1825 session->cipher_key.length = xform->cipher.key.length;
1827 memcpy(session->cipher_key.data, xform->cipher.key.data,
1828 xform->cipher.key.length);
1829 cipherdata.key = (size_t)session->cipher_key.data;
1830 cipherdata.keylen = session->cipher_key.length;
1831 cipherdata.key_enc_flags = 0;
1832 cipherdata.key_type = RTA_DATA_IMM;
1834 /* Set IV parameters */
1835 session->iv.offset = xform->cipher.iv.offset;
1836 session->iv.length = xform->cipher.iv.length;
1837 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1840 switch (xform->cipher.algo) {
1841 case RTE_CRYPTO_CIPHER_AES_CBC:
1842 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1843 cipherdata.algmode = OP_ALG_AAI_CBC;
1844 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1845 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1846 SHR_NEVER, &cipherdata, NULL,
1850 case RTE_CRYPTO_CIPHER_3DES_CBC:
1851 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1852 cipherdata.algmode = OP_ALG_AAI_CBC;
1853 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1854 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1855 SHR_NEVER, &cipherdata, NULL,
1859 case RTE_CRYPTO_CIPHER_AES_CTR:
1860 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1861 cipherdata.algmode = OP_ALG_AAI_CTR;
1862 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1863 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1864 SHR_NEVER, &cipherdata, NULL,
1868 case RTE_CRYPTO_CIPHER_3DES_CTR:
1869 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1870 cipherdata.algmode = OP_ALG_AAI_CTR;
1871 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1872 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1873 SHR_NEVER, &cipherdata, NULL,
1877 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1878 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1879 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1880 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1884 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1885 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1886 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1887 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1891 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1892 case RTE_CRYPTO_CIPHER_AES_F8:
1893 case RTE_CRYPTO_CIPHER_AES_ECB:
1894 case RTE_CRYPTO_CIPHER_3DES_ECB:
1895 case RTE_CRYPTO_CIPHER_AES_XTS:
1896 case RTE_CRYPTO_CIPHER_ARC4:
1897 case RTE_CRYPTO_CIPHER_NULL:
1898 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1899 xform->cipher.algo);
1902 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1903 xform->cipher.algo);
1908 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1912 flc->word1_sdl = (uint8_t)bufsize;
1913 session->ctxt = priv;
1915 #ifdef CAAM_DESC_DEBUG
1917 for (i = 0; i < bufsize; i++)
1918 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1923 rte_free(session->cipher_key.data);
1929 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1930 struct rte_crypto_sym_xform *xform,
1931 dpaa2_sec_session *session)
1933 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1934 struct alginfo authdata;
1936 struct ctxt_priv *priv;
1937 struct sec_flow_context *flc;
1939 PMD_INIT_FUNC_TRACE();
1941 /* For SEC AUTH three descriptors are required for various stages */
1942 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1943 sizeof(struct ctxt_priv) + 3 *
1944 sizeof(struct sec_flc_desc),
1945 RTE_CACHE_LINE_SIZE);
1947 DPAA2_SEC_ERR("No Memory for priv CTXT");
1951 priv->fle_pool = dev_priv->fle_pool;
1952 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1954 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1955 RTE_CACHE_LINE_SIZE);
1956 if (session->auth_key.data == NULL) {
1957 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1961 session->auth_key.length = xform->auth.key.length;
1963 memcpy(session->auth_key.data, xform->auth.key.data,
1964 xform->auth.key.length);
1965 authdata.key = (size_t)session->auth_key.data;
1966 authdata.keylen = session->auth_key.length;
1967 authdata.key_enc_flags = 0;
1968 authdata.key_type = RTA_DATA_IMM;
1970 session->digest_length = xform->auth.digest_length;
1971 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1974 switch (xform->auth.algo) {
1975 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1976 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1977 authdata.algmode = OP_ALG_AAI_HMAC;
1978 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1979 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1980 1, 0, SHR_NEVER, &authdata,
1982 session->digest_length);
1984 case RTE_CRYPTO_AUTH_MD5_HMAC:
1985 authdata.algtype = OP_ALG_ALGSEL_MD5;
1986 authdata.algmode = OP_ALG_AAI_HMAC;
1987 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1988 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1989 1, 0, SHR_NEVER, &authdata,
1991 session->digest_length);
1993 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1994 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1995 authdata.algmode = OP_ALG_AAI_HMAC;
1996 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1997 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1998 1, 0, SHR_NEVER, &authdata,
2000 session->digest_length);
2002 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2003 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2004 authdata.algmode = OP_ALG_AAI_HMAC;
2005 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2006 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2007 1, 0, SHR_NEVER, &authdata,
2009 session->digest_length);
2011 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2012 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2013 authdata.algmode = OP_ALG_AAI_HMAC;
2014 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2015 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2016 1, 0, SHR_NEVER, &authdata,
2018 session->digest_length);
2020 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2021 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2022 authdata.algmode = OP_ALG_AAI_HMAC;
2023 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2024 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2025 1, 0, SHR_NEVER, &authdata,
2027 session->digest_length);
2029 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2030 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2031 authdata.algmode = OP_ALG_AAI_F9;
2032 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2033 session->iv.offset = xform->auth.iv.offset;
2034 session->iv.length = xform->auth.iv.length;
2035 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2038 session->digest_length);
2040 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2041 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2042 authdata.algmode = OP_ALG_AAI_F9;
2043 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2044 session->iv.offset = xform->auth.iv.offset;
2045 session->iv.length = xform->auth.iv.length;
2046 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2049 session->digest_length);
2051 case RTE_CRYPTO_AUTH_KASUMI_F9:
2052 case RTE_CRYPTO_AUTH_NULL:
2053 case RTE_CRYPTO_AUTH_SHA1:
2054 case RTE_CRYPTO_AUTH_SHA256:
2055 case RTE_CRYPTO_AUTH_SHA512:
2056 case RTE_CRYPTO_AUTH_SHA224:
2057 case RTE_CRYPTO_AUTH_SHA384:
2058 case RTE_CRYPTO_AUTH_MD5:
2059 case RTE_CRYPTO_AUTH_AES_GMAC:
2060 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2061 case RTE_CRYPTO_AUTH_AES_CMAC:
2062 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2063 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2067 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2073 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2077 flc->word1_sdl = (uint8_t)bufsize;
2078 session->ctxt = priv;
2079 #ifdef CAAM_DESC_DEBUG
2081 for (i = 0; i < bufsize; i++)
2082 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2083 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2089 rte_free(session->auth_key.data);
2095 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2096 struct rte_crypto_sym_xform *xform,
2097 dpaa2_sec_session *session)
2099 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2100 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2101 struct alginfo aeaddata;
2103 struct ctxt_priv *priv;
2104 struct sec_flow_context *flc;
2105 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2108 PMD_INIT_FUNC_TRACE();
2110 /* Set IV parameters */
2111 session->iv.offset = aead_xform->iv.offset;
2112 session->iv.length = aead_xform->iv.length;
2113 session->ctxt_type = DPAA2_SEC_AEAD;
2115 /* For SEC AEAD only one descriptor is required */
2116 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2117 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2118 RTE_CACHE_LINE_SIZE);
2120 DPAA2_SEC_ERR("No Memory for priv CTXT");
2124 priv->fle_pool = dev_priv->fle_pool;
2125 flc = &priv->flc_desc[0].flc;
2127 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2128 RTE_CACHE_LINE_SIZE);
2129 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2130 DPAA2_SEC_ERR("No Memory for aead key");
2134 memcpy(session->aead_key.data, aead_xform->key.data,
2135 aead_xform->key.length);
2137 session->digest_length = aead_xform->digest_length;
2138 session->aead_key.length = aead_xform->key.length;
2139 ctxt->auth_only_len = aead_xform->aad_length;
2141 aeaddata.key = (size_t)session->aead_key.data;
2142 aeaddata.keylen = session->aead_key.length;
2143 aeaddata.key_enc_flags = 0;
2144 aeaddata.key_type = RTA_DATA_IMM;
2146 switch (aead_xform->algo) {
2147 case RTE_CRYPTO_AEAD_AES_GCM:
2148 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2149 aeaddata.algmode = OP_ALG_AAI_GCM;
2150 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2152 case RTE_CRYPTO_AEAD_AES_CCM:
2153 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2157 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2161 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2164 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2165 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2167 (unsigned int *)priv->flc_desc[0].desc,
2168 &priv->flc_desc[0].desc[1], 1);
2171 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2174 if (priv->flc_desc[0].desc[1] & 1) {
2175 aeaddata.key_type = RTA_DATA_IMM;
2177 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2178 aeaddata.key_type = RTA_DATA_PTR;
2180 priv->flc_desc[0].desc[0] = 0;
2181 priv->flc_desc[0].desc[1] = 0;
2183 if (session->dir == DIR_ENC)
2184 bufsize = cnstr_shdsc_gcm_encap(
2185 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2186 &aeaddata, session->iv.length,
2187 session->digest_length);
2189 bufsize = cnstr_shdsc_gcm_decap(
2190 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2191 &aeaddata, session->iv.length,
2192 session->digest_length);
2194 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2198 flc->word1_sdl = (uint8_t)bufsize;
2199 session->ctxt = priv;
2200 #ifdef CAAM_DESC_DEBUG
2202 for (i = 0; i < bufsize; i++)
2203 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2204 i, priv->flc_desc[0].desc[i]);
2209 rte_free(session->aead_key.data);
2216 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2217 struct rte_crypto_sym_xform *xform,
2218 dpaa2_sec_session *session)
2220 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2221 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2222 struct alginfo authdata, cipherdata;
2224 struct ctxt_priv *priv;
2225 struct sec_flow_context *flc;
2226 struct rte_crypto_cipher_xform *cipher_xform;
2227 struct rte_crypto_auth_xform *auth_xform;
2230 PMD_INIT_FUNC_TRACE();
2232 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2233 cipher_xform = &xform->cipher;
2234 auth_xform = &xform->next->auth;
2235 session->ctxt_type =
2236 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2237 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2239 cipher_xform = &xform->next->cipher;
2240 auth_xform = &xform->auth;
2241 session->ctxt_type =
2242 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2243 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2246 /* Set IV parameters */
2247 session->iv.offset = cipher_xform->iv.offset;
2248 session->iv.length = cipher_xform->iv.length;
2250 /* For SEC AEAD only one descriptor is required */
2251 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2252 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2253 RTE_CACHE_LINE_SIZE);
2255 DPAA2_SEC_ERR("No Memory for priv CTXT");
2259 priv->fle_pool = dev_priv->fle_pool;
2260 flc = &priv->flc_desc[0].flc;
2262 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2263 RTE_CACHE_LINE_SIZE);
2264 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2265 DPAA2_SEC_ERR("No Memory for cipher key");
2269 session->cipher_key.length = cipher_xform->key.length;
2270 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2271 RTE_CACHE_LINE_SIZE);
2272 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2273 DPAA2_SEC_ERR("No Memory for auth key");
2274 rte_free(session->cipher_key.data);
2278 session->auth_key.length = auth_xform->key.length;
2279 memcpy(session->cipher_key.data, cipher_xform->key.data,
2280 cipher_xform->key.length);
2281 memcpy(session->auth_key.data, auth_xform->key.data,
2282 auth_xform->key.length);
2284 authdata.key = (size_t)session->auth_key.data;
2285 authdata.keylen = session->auth_key.length;
2286 authdata.key_enc_flags = 0;
2287 authdata.key_type = RTA_DATA_IMM;
2289 session->digest_length = auth_xform->digest_length;
2291 switch (auth_xform->algo) {
2292 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2293 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2294 authdata.algmode = OP_ALG_AAI_HMAC;
2295 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2297 case RTE_CRYPTO_AUTH_MD5_HMAC:
2298 authdata.algtype = OP_ALG_ALGSEL_MD5;
2299 authdata.algmode = OP_ALG_AAI_HMAC;
2300 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2302 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2303 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2304 authdata.algmode = OP_ALG_AAI_HMAC;
2305 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2307 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2308 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2309 authdata.algmode = OP_ALG_AAI_HMAC;
2310 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2312 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2313 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2314 authdata.algmode = OP_ALG_AAI_HMAC;
2315 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2317 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2318 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2319 authdata.algmode = OP_ALG_AAI_HMAC;
2320 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2322 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2323 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2324 case RTE_CRYPTO_AUTH_NULL:
2325 case RTE_CRYPTO_AUTH_SHA1:
2326 case RTE_CRYPTO_AUTH_SHA256:
2327 case RTE_CRYPTO_AUTH_SHA512:
2328 case RTE_CRYPTO_AUTH_SHA224:
2329 case RTE_CRYPTO_AUTH_SHA384:
2330 case RTE_CRYPTO_AUTH_MD5:
2331 case RTE_CRYPTO_AUTH_AES_GMAC:
2332 case RTE_CRYPTO_AUTH_KASUMI_F9:
2333 case RTE_CRYPTO_AUTH_AES_CMAC:
2334 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2335 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2336 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2340 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2344 cipherdata.key = (size_t)session->cipher_key.data;
2345 cipherdata.keylen = session->cipher_key.length;
2346 cipherdata.key_enc_flags = 0;
2347 cipherdata.key_type = RTA_DATA_IMM;
2349 switch (cipher_xform->algo) {
2350 case RTE_CRYPTO_CIPHER_AES_CBC:
2351 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2352 cipherdata.algmode = OP_ALG_AAI_CBC;
2353 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2355 case RTE_CRYPTO_CIPHER_3DES_CBC:
2356 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2357 cipherdata.algmode = OP_ALG_AAI_CBC;
2358 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2360 case RTE_CRYPTO_CIPHER_AES_CTR:
2361 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2362 cipherdata.algmode = OP_ALG_AAI_CTR;
2363 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2365 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2366 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2367 case RTE_CRYPTO_CIPHER_NULL:
2368 case RTE_CRYPTO_CIPHER_3DES_ECB:
2369 case RTE_CRYPTO_CIPHER_AES_ECB:
2370 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2371 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2372 cipher_xform->algo);
2375 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2376 cipher_xform->algo);
2379 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2382 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2383 priv->flc_desc[0].desc[1] = authdata.keylen;
2384 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2386 (unsigned int *)priv->flc_desc[0].desc,
2387 &priv->flc_desc[0].desc[2], 2);
2390 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2393 if (priv->flc_desc[0].desc[2] & 1) {
2394 cipherdata.key_type = RTA_DATA_IMM;
2396 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2397 cipherdata.key_type = RTA_DATA_PTR;
2399 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2400 authdata.key_type = RTA_DATA_IMM;
2402 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2403 authdata.key_type = RTA_DATA_PTR;
2405 priv->flc_desc[0].desc[0] = 0;
2406 priv->flc_desc[0].desc[1] = 0;
2407 priv->flc_desc[0].desc[2] = 0;
2409 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2410 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2412 &cipherdata, &authdata,
2414 ctxt->auth_only_len,
2415 session->digest_length,
2418 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2422 DPAA2_SEC_ERR("Hash before cipher not supported");
2426 flc->word1_sdl = (uint8_t)bufsize;
2427 session->ctxt = priv;
2428 #ifdef CAAM_DESC_DEBUG
2430 for (i = 0; i < bufsize; i++)
2431 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2432 i, priv->flc_desc[0].desc[i]);
2438 rte_free(session->cipher_key.data);
2439 rte_free(session->auth_key.data);
2445 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2446 struct rte_crypto_sym_xform *xform, void *sess)
2448 dpaa2_sec_session *session = sess;
2451 PMD_INIT_FUNC_TRACE();
2453 if (unlikely(sess == NULL)) {
2454 DPAA2_SEC_ERR("Invalid session struct");
2458 memset(session, 0, sizeof(dpaa2_sec_session));
2459 /* Default IV length = 0 */
2460 session->iv.length = 0;
2463 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2464 session->ctxt_type = DPAA2_SEC_CIPHER;
2465 ret = dpaa2_sec_cipher_init(dev, xform, session);
2467 /* Authentication Only */
2468 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2469 xform->next == NULL) {
2470 session->ctxt_type = DPAA2_SEC_AUTH;
2471 ret = dpaa2_sec_auth_init(dev, xform, session);
2473 /* Cipher then Authenticate */
2474 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2475 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2476 session->ext_params.aead_ctxt.auth_cipher_text = true;
2477 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2479 /* Authenticate then Cipher */
2480 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2481 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2482 session->ext_params.aead_ctxt.auth_cipher_text = false;
2483 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2485 /* AEAD operation for AES-GCM kind of Algorithms */
2486 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2487 xform->next == NULL) {
2488 ret = dpaa2_sec_aead_init(dev, xform, session);
2491 DPAA2_SEC_ERR("Invalid crypto type");
2499 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2500 dpaa2_sec_session *session,
2501 struct alginfo *aeaddata)
2503 PMD_INIT_FUNC_TRACE();
2505 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2506 RTE_CACHE_LINE_SIZE);
2507 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2508 DPAA2_SEC_ERR("No Memory for aead key");
2511 memcpy(session->aead_key.data, aead_xform->key.data,
2512 aead_xform->key.length);
2514 session->digest_length = aead_xform->digest_length;
2515 session->aead_key.length = aead_xform->key.length;
2517 aeaddata->key = (size_t)session->aead_key.data;
2518 aeaddata->keylen = session->aead_key.length;
2519 aeaddata->key_enc_flags = 0;
2520 aeaddata->key_type = RTA_DATA_IMM;
2522 switch (aead_xform->algo) {
2523 case RTE_CRYPTO_AEAD_AES_GCM:
2524 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2525 aeaddata->algmode = OP_ALG_AAI_GCM;
2526 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2528 case RTE_CRYPTO_AEAD_AES_CCM:
2529 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2530 aeaddata->algmode = OP_ALG_AAI_CCM;
2531 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2534 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2538 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2545 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2546 struct rte_crypto_auth_xform *auth_xform,
2547 dpaa2_sec_session *session,
2548 struct alginfo *cipherdata,
2549 struct alginfo *authdata)
2552 session->cipher_key.data = rte_zmalloc(NULL,
2553 cipher_xform->key.length,
2554 RTE_CACHE_LINE_SIZE);
2555 if (session->cipher_key.data == NULL &&
2556 cipher_xform->key.length > 0) {
2557 DPAA2_SEC_ERR("No Memory for cipher key");
2561 session->cipher_key.length = cipher_xform->key.length;
2562 memcpy(session->cipher_key.data, cipher_xform->key.data,
2563 cipher_xform->key.length);
2564 session->cipher_alg = cipher_xform->algo;
2566 session->cipher_key.data = NULL;
2567 session->cipher_key.length = 0;
2568 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2572 session->auth_key.data = rte_zmalloc(NULL,
2573 auth_xform->key.length,
2574 RTE_CACHE_LINE_SIZE);
2575 if (session->auth_key.data == NULL &&
2576 auth_xform->key.length > 0) {
2577 DPAA2_SEC_ERR("No Memory for auth key");
2580 session->auth_key.length = auth_xform->key.length;
2581 memcpy(session->auth_key.data, auth_xform->key.data,
2582 auth_xform->key.length);
2583 session->auth_alg = auth_xform->algo;
2585 session->auth_key.data = NULL;
2586 session->auth_key.length = 0;
2587 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2590 authdata->key = (size_t)session->auth_key.data;
2591 authdata->keylen = session->auth_key.length;
2592 authdata->key_enc_flags = 0;
2593 authdata->key_type = RTA_DATA_IMM;
2594 switch (session->auth_alg) {
2595 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2596 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2597 authdata->algmode = OP_ALG_AAI_HMAC;
2599 case RTE_CRYPTO_AUTH_MD5_HMAC:
2600 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2601 authdata->algmode = OP_ALG_AAI_HMAC;
2603 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2604 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2605 authdata->algmode = OP_ALG_AAI_HMAC;
2607 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2608 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2609 authdata->algmode = OP_ALG_AAI_HMAC;
2611 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2612 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2613 authdata->algmode = OP_ALG_AAI_HMAC;
2615 case RTE_CRYPTO_AUTH_AES_CMAC:
2616 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2618 case RTE_CRYPTO_AUTH_NULL:
2619 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2621 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2622 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2623 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2624 case RTE_CRYPTO_AUTH_SHA1:
2625 case RTE_CRYPTO_AUTH_SHA256:
2626 case RTE_CRYPTO_AUTH_SHA512:
2627 case RTE_CRYPTO_AUTH_SHA224:
2628 case RTE_CRYPTO_AUTH_SHA384:
2629 case RTE_CRYPTO_AUTH_MD5:
2630 case RTE_CRYPTO_AUTH_AES_GMAC:
2631 case RTE_CRYPTO_AUTH_KASUMI_F9:
2632 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2633 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2634 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2638 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2642 cipherdata->key = (size_t)session->cipher_key.data;
2643 cipherdata->keylen = session->cipher_key.length;
2644 cipherdata->key_enc_flags = 0;
2645 cipherdata->key_type = RTA_DATA_IMM;
2647 switch (session->cipher_alg) {
2648 case RTE_CRYPTO_CIPHER_AES_CBC:
2649 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2650 cipherdata->algmode = OP_ALG_AAI_CBC;
2652 case RTE_CRYPTO_CIPHER_3DES_CBC:
2653 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2654 cipherdata->algmode = OP_ALG_AAI_CBC;
2656 case RTE_CRYPTO_CIPHER_AES_CTR:
2657 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2658 cipherdata->algmode = OP_ALG_AAI_CTR;
2660 case RTE_CRYPTO_CIPHER_NULL:
2661 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2663 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2664 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2665 case RTE_CRYPTO_CIPHER_3DES_ECB:
2666 case RTE_CRYPTO_CIPHER_AES_ECB:
2667 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2668 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2669 session->cipher_alg);
2672 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2673 session->cipher_alg);
2680 #ifdef RTE_LIBRTE_SECURITY_TEST
2681 static uint8_t aes_cbc_iv[] = {
2682 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2683 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2687 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2688 struct rte_security_session_conf *conf,
2691 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2692 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2693 struct rte_crypto_auth_xform *auth_xform = NULL;
2694 struct rte_crypto_aead_xform *aead_xform = NULL;
2695 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2696 struct ctxt_priv *priv;
2697 struct ipsec_encap_pdb encap_pdb;
2698 struct ipsec_decap_pdb decap_pdb;
2699 struct alginfo authdata, cipherdata;
2701 struct sec_flow_context *flc;
2702 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2705 PMD_INIT_FUNC_TRACE();
2707 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2708 sizeof(struct ctxt_priv) +
2709 sizeof(struct sec_flc_desc),
2710 RTE_CACHE_LINE_SIZE);
2713 DPAA2_SEC_ERR("No memory for priv CTXT");
2717 priv->fle_pool = dev_priv->fle_pool;
2718 flc = &priv->flc_desc[0].flc;
2720 memset(session, 0, sizeof(dpaa2_sec_session));
2722 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2723 cipher_xform = &conf->crypto_xform->cipher;
2724 if (conf->crypto_xform->next)
2725 auth_xform = &conf->crypto_xform->next->auth;
2726 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2727 session, &cipherdata, &authdata);
2728 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2729 auth_xform = &conf->crypto_xform->auth;
2730 if (conf->crypto_xform->next)
2731 cipher_xform = &conf->crypto_xform->next->cipher;
2732 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2733 session, &cipherdata, &authdata);
2734 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2735 aead_xform = &conf->crypto_xform->aead;
2736 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2737 session, &cipherdata);
2739 DPAA2_SEC_ERR("XFORM not specified");
2744 DPAA2_SEC_ERR("Failed to process xform");
2748 session->ctxt_type = DPAA2_SEC_IPSEC;
2749 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2750 uint8_t *hdr = NULL;
2752 struct rte_ipv6_hdr ip6_hdr;
2754 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2755 /* For Sec Proto only one descriptor is required. */
2756 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2757 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2758 PDBOPTS_ESP_OIHI_PDB_INL |
2760 PDBHMO_ESP_ENCAP_DTTL |
2762 if (ipsec_xform->options.esn)
2763 encap_pdb.options |= PDBOPTS_ESP_ESN;
2764 encap_pdb.spi = ipsec_xform->spi;
2765 session->dir = DIR_ENC;
2766 if (ipsec_xform->tunnel.type ==
2767 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2768 encap_pdb.ip_hdr_len = sizeof(struct ip);
2769 ip4_hdr.ip_v = IPVERSION;
2771 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2772 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2775 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2776 ip4_hdr.ip_p = IPPROTO_ESP;
2778 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2779 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2780 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2781 &ip4_hdr, sizeof(struct ip));
2782 hdr = (uint8_t *)&ip4_hdr;
2783 } else if (ipsec_xform->tunnel.type ==
2784 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2785 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2786 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2787 ((ipsec_xform->tunnel.ipv6.dscp <<
2788 RTE_IPV6_HDR_TC_SHIFT) &
2789 RTE_IPV6_HDR_TC_MASK) |
2790 ((ipsec_xform->tunnel.ipv6.flabel <<
2791 RTE_IPV6_HDR_FL_SHIFT) &
2792 RTE_IPV6_HDR_FL_MASK));
2793 /* Payload length will be updated by HW */
2794 ip6_hdr.payload_len = 0;
2795 ip6_hdr.hop_limits =
2796 ipsec_xform->tunnel.ipv6.hlimit;
2797 ip6_hdr.proto = (ipsec_xform->proto ==
2798 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2799 IPPROTO_ESP : IPPROTO_AH;
2800 memcpy(&ip6_hdr.src_addr,
2801 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2802 memcpy(&ip6_hdr.dst_addr,
2803 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2804 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2805 hdr = (uint8_t *)&ip6_hdr;
2808 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2809 1, 0, SHR_SERIAL, &encap_pdb,
2810 hdr, &cipherdata, &authdata);
2811 } else if (ipsec_xform->direction ==
2812 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2813 flc->dhr = SEC_FLC_DHR_INBOUND;
2814 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2815 decap_pdb.options = (ipsec_xform->tunnel.type ==
2816 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2817 sizeof(struct ip) << 16 :
2818 sizeof(struct rte_ipv6_hdr) << 16;
2819 if (ipsec_xform->options.esn)
2820 decap_pdb.options |= PDBOPTS_ESP_ESN;
2821 session->dir = DIR_DEC;
2822 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2824 &decap_pdb, &cipherdata, &authdata);
2829 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2833 flc->word1_sdl = (uint8_t)bufsize;
2835 /* Enable the stashing control bit */
2836 DPAA2_SET_FLC_RSC(flc);
2837 flc->word2_rflc_31_0 = lower_32_bits(
2838 (size_t)&(((struct dpaa2_sec_qp *)
2839 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2840 flc->word3_rflc_63_32 = upper_32_bits(
2841 (size_t)&(((struct dpaa2_sec_qp *)
2842 dev->data->queue_pairs[0])->rx_vq));
2844 /* Set EWS bit i.e. enable write-safe */
2845 DPAA2_SET_FLC_EWS(flc);
2846 /* Set BS = 1 i.e reuse input buffers as output buffers */
2847 DPAA2_SET_FLC_REUSE_BS(flc);
2848 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2849 DPAA2_SET_FLC_REUSE_FF(flc);
2851 session->ctxt = priv;
2855 rte_free(session->auth_key.data);
2856 rte_free(session->cipher_key.data);
2862 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2863 struct rte_security_session_conf *conf,
2866 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2867 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2868 struct rte_crypto_auth_xform *auth_xform = NULL;
2869 struct rte_crypto_cipher_xform *cipher_xform;
2870 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2871 struct ctxt_priv *priv;
2872 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2873 struct alginfo authdata, cipherdata;
2874 struct alginfo *p_authdata = NULL;
2876 struct sec_flow_context *flc;
2877 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2883 PMD_INIT_FUNC_TRACE();
2885 memset(session, 0, sizeof(dpaa2_sec_session));
2887 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2888 sizeof(struct ctxt_priv) +
2889 sizeof(struct sec_flc_desc),
2890 RTE_CACHE_LINE_SIZE);
2893 DPAA2_SEC_ERR("No memory for priv CTXT");
2897 priv->fle_pool = dev_priv->fle_pool;
2898 flc = &priv->flc_desc[0].flc;
2900 /* find xfrm types */
2901 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2902 cipher_xform = &xform->cipher;
2903 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2904 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2905 session->ext_params.aead_ctxt.auth_cipher_text = true;
2906 cipher_xform = &xform->cipher;
2907 auth_xform = &xform->next->auth;
2908 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2909 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2910 session->ext_params.aead_ctxt.auth_cipher_text = false;
2911 cipher_xform = &xform->next->cipher;
2912 auth_xform = &xform->auth;
2914 DPAA2_SEC_ERR("Invalid crypto type");
2918 session->ctxt_type = DPAA2_SEC_PDCP;
2920 session->cipher_key.data = rte_zmalloc(NULL,
2921 cipher_xform->key.length,
2922 RTE_CACHE_LINE_SIZE);
2923 if (session->cipher_key.data == NULL &&
2924 cipher_xform->key.length > 0) {
2925 DPAA2_SEC_ERR("No Memory for cipher key");
2929 session->cipher_key.length = cipher_xform->key.length;
2930 memcpy(session->cipher_key.data, cipher_xform->key.data,
2931 cipher_xform->key.length);
2933 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2935 session->cipher_alg = cipher_xform->algo;
2937 session->cipher_key.data = NULL;
2938 session->cipher_key.length = 0;
2939 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2940 session->dir = DIR_ENC;
2943 session->pdcp.domain = pdcp_xform->domain;
2944 session->pdcp.bearer = pdcp_xform->bearer;
2945 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2946 session->pdcp.sn_size = pdcp_xform->sn_size;
2947 session->pdcp.hfn = pdcp_xform->hfn;
2948 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2949 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2950 /* hfv ovd offset location is stored in iv.offset value*/
2951 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2953 cipherdata.key = (size_t)session->cipher_key.data;
2954 cipherdata.keylen = session->cipher_key.length;
2955 cipherdata.key_enc_flags = 0;
2956 cipherdata.key_type = RTA_DATA_IMM;
2958 switch (session->cipher_alg) {
2959 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2960 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2962 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2963 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2965 case RTE_CRYPTO_CIPHER_AES_CTR:
2966 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2968 case RTE_CRYPTO_CIPHER_NULL:
2969 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2972 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2973 session->cipher_alg);
2978 session->auth_key.data = rte_zmalloc(NULL,
2979 auth_xform->key.length,
2980 RTE_CACHE_LINE_SIZE);
2981 if (!session->auth_key.data &&
2982 auth_xform->key.length > 0) {
2983 DPAA2_SEC_ERR("No Memory for auth key");
2984 rte_free(session->cipher_key.data);
2988 session->auth_key.length = auth_xform->key.length;
2989 memcpy(session->auth_key.data, auth_xform->key.data,
2990 auth_xform->key.length);
2991 session->auth_alg = auth_xform->algo;
2993 session->auth_key.data = NULL;
2994 session->auth_key.length = 0;
2995 session->auth_alg = 0;
2997 authdata.key = (size_t)session->auth_key.data;
2998 authdata.keylen = session->auth_key.length;
2999 authdata.key_enc_flags = 0;
3000 authdata.key_type = RTA_DATA_IMM;
3002 if (session->auth_alg) {
3003 switch (session->auth_alg) {
3004 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3005 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3007 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3008 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3010 case RTE_CRYPTO_AUTH_AES_CMAC:
3011 authdata.algtype = PDCP_AUTH_TYPE_AES;
3013 case RTE_CRYPTO_AUTH_NULL:
3014 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3017 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3022 p_authdata = &authdata;
3023 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3024 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3028 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3029 if (session->dir == DIR_ENC)
3030 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3031 priv->flc_desc[0].desc, 1, swap,
3033 session->pdcp.sn_size,
3035 pdcp_xform->pkt_dir,
3036 pdcp_xform->hfn_threshold,
3037 &cipherdata, &authdata,
3039 else if (session->dir == DIR_DEC)
3040 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3041 priv->flc_desc[0].desc, 1, swap,
3043 session->pdcp.sn_size,
3045 pdcp_xform->pkt_dir,
3046 pdcp_xform->hfn_threshold,
3047 &cipherdata, &authdata,
3050 if (session->dir == DIR_ENC)
3051 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3052 priv->flc_desc[0].desc, 1, swap,
3053 session->pdcp.sn_size,
3056 pdcp_xform->pkt_dir,
3057 pdcp_xform->hfn_threshold,
3058 &cipherdata, p_authdata, 0);
3059 else if (session->dir == DIR_DEC)
3060 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3061 priv->flc_desc[0].desc, 1, swap,
3062 session->pdcp.sn_size,
3065 pdcp_xform->pkt_dir,
3066 pdcp_xform->hfn_threshold,
3067 &cipherdata, p_authdata, 0);
3071 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3075 /* Enable the stashing control bit */
3076 DPAA2_SET_FLC_RSC(flc);
3077 flc->word2_rflc_31_0 = lower_32_bits(
3078 (size_t)&(((struct dpaa2_sec_qp *)
3079 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3080 flc->word3_rflc_63_32 = upper_32_bits(
3081 (size_t)&(((struct dpaa2_sec_qp *)
3082 dev->data->queue_pairs[0])->rx_vq));
3084 flc->word1_sdl = (uint8_t)bufsize;
3086 /* TODO - check the perf impact or
3087 * align as per descriptor type
3088 * Set EWS bit i.e. enable write-safe
3089 * DPAA2_SET_FLC_EWS(flc);
3092 /* Set BS = 1 i.e reuse input buffers as output buffers */
3093 DPAA2_SET_FLC_REUSE_BS(flc);
3094 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3095 DPAA2_SET_FLC_REUSE_FF(flc);
3097 session->ctxt = priv;
3101 rte_free(session->auth_key.data);
3102 rte_free(session->cipher_key.data);
3108 dpaa2_sec_security_session_create(void *dev,
3109 struct rte_security_session_conf *conf,
3110 struct rte_security_session *sess,
3111 struct rte_mempool *mempool)
3113 void *sess_private_data;
3114 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3117 if (rte_mempool_get(mempool, &sess_private_data)) {
3118 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3122 switch (conf->protocol) {
3123 case RTE_SECURITY_PROTOCOL_IPSEC:
3124 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3127 case RTE_SECURITY_PROTOCOL_MACSEC:
3129 case RTE_SECURITY_PROTOCOL_PDCP:
3130 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3137 DPAA2_SEC_ERR("Failed to configure session parameters");
3138 /* Return session to mempool */
3139 rte_mempool_put(mempool, sess_private_data);
3143 set_sec_session_private_data(sess, sess_private_data);
3148 /** Clear the memory of session so it doesn't leave key material behind */
3150 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3151 struct rte_security_session *sess)
3153 PMD_INIT_FUNC_TRACE();
3154 void *sess_priv = get_sec_session_private_data(sess);
3156 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3159 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3162 rte_free(s->cipher_key.data);
3163 rte_free(s->auth_key.data);
3164 memset(s, 0, sizeof(dpaa2_sec_session));
3165 set_sec_session_private_data(sess, NULL);
3166 rte_mempool_put(sess_mp, sess_priv);
3172 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3173 struct rte_crypto_sym_xform *xform,
3174 struct rte_cryptodev_sym_session *sess,
3175 struct rte_mempool *mempool)
3177 void *sess_private_data;
3180 if (rte_mempool_get(mempool, &sess_private_data)) {
3181 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3185 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3187 DPAA2_SEC_ERR("Failed to configure session parameters");
3188 /* Return session to mempool */
3189 rte_mempool_put(mempool, sess_private_data);
3193 set_sym_session_private_data(sess, dev->driver_id,
3199 /** Clear the memory of session so it doesn't leave key material behind */
3201 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3202 struct rte_cryptodev_sym_session *sess)
3204 PMD_INIT_FUNC_TRACE();
3205 uint8_t index = dev->driver_id;
3206 void *sess_priv = get_sym_session_private_data(sess, index);
3207 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3211 rte_free(s->cipher_key.data);
3212 rte_free(s->auth_key.data);
3213 memset(s, 0, sizeof(dpaa2_sec_session));
3214 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3215 set_sym_session_private_data(sess, index, NULL);
3216 rte_mempool_put(sess_mp, sess_priv);
3221 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3222 struct rte_cryptodev_config *config __rte_unused)
3224 PMD_INIT_FUNC_TRACE();
3230 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3232 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3233 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3234 struct dpseci_attr attr;
3235 struct dpaa2_queue *dpaa2_q;
3236 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3237 dev->data->queue_pairs;
3238 struct dpseci_rx_queue_attr rx_attr;
3239 struct dpseci_tx_queue_attr tx_attr;
3242 PMD_INIT_FUNC_TRACE();
3244 memset(&attr, 0, sizeof(struct dpseci_attr));
3246 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3248 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3250 goto get_attr_failure;
3252 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3254 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3255 goto get_attr_failure;
3257 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3258 dpaa2_q = &qp[i]->rx_vq;
3259 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3261 dpaa2_q->fqid = rx_attr.fqid;
3262 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3264 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3265 dpaa2_q = &qp[i]->tx_vq;
3266 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3268 dpaa2_q->fqid = tx_attr.fqid;
3269 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3274 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3279 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3281 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3282 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3285 PMD_INIT_FUNC_TRACE();
3287 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3289 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3294 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3296 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3302 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3304 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3305 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3308 PMD_INIT_FUNC_TRACE();
3310 /* Function is reverse of dpaa2_sec_dev_init.
3311 * It does the following:
3312 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3313 * 2. Close the DPSECI device
3314 * 3. Free the allocated resources.
3317 /*Close the device at underlying layer*/
3318 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3320 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3324 /*Free the allocated memory for ethernet private data and dpseci*/
3332 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3333 struct rte_cryptodev_info *info)
3335 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3337 PMD_INIT_FUNC_TRACE();
3339 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3340 info->feature_flags = dev->feature_flags;
3341 info->capabilities = dpaa2_sec_capabilities;
3342 /* No limit of number of sessions */
3343 info->sym.max_nb_sessions = 0;
3344 info->driver_id = cryptodev_driver_id;
3349 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3350 struct rte_cryptodev_stats *stats)
3352 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3353 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3354 struct dpseci_sec_counters counters = {0};
3355 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3356 dev->data->queue_pairs;
3359 PMD_INIT_FUNC_TRACE();
3360 if (stats == NULL) {
3361 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3364 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3365 if (qp[i] == NULL) {
3366 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3370 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3371 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3372 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3373 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3376 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3379 DPAA2_SEC_ERR("SEC counters failed");
3381 DPAA2_SEC_INFO("dpseci hardware stats:"
3382 "\n\tNum of Requests Dequeued = %" PRIu64
3383 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3384 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3385 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3386 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3387 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3388 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3389 counters.dequeued_requests,
3390 counters.ob_enc_requests,
3391 counters.ib_dec_requests,
3392 counters.ob_enc_bytes,
3393 counters.ob_prot_bytes,
3394 counters.ib_dec_bytes,
3395 counters.ib_valid_bytes);
3400 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3403 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3404 (dev->data->queue_pairs);
3406 PMD_INIT_FUNC_TRACE();
3408 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3409 if (qp[i] == NULL) {
3410 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3413 qp[i]->tx_vq.rx_pkts = 0;
3414 qp[i]->tx_vq.tx_pkts = 0;
3415 qp[i]->tx_vq.err_pkts = 0;
3416 qp[i]->rx_vq.rx_pkts = 0;
3417 qp[i]->rx_vq.tx_pkts = 0;
3418 qp[i]->rx_vq.err_pkts = 0;
3422 static void __attribute__((hot))
3423 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3424 const struct qbman_fd *fd,
3425 const struct qbman_result *dq,
3426 struct dpaa2_queue *rxq,
3427 struct rte_event *ev)
3429 /* Prefetching mbuf */
3430 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3431 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3433 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3434 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3436 ev->flow_id = rxq->ev.flow_id;
3437 ev->sub_event_type = rxq->ev.sub_event_type;
3438 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3439 ev->op = RTE_EVENT_OP_NEW;
3440 ev->sched_type = rxq->ev.sched_type;
3441 ev->queue_id = rxq->ev.queue_id;
3442 ev->priority = rxq->ev.priority;
3443 ev->event_ptr = sec_fd_to_mbuf(fd);
3445 qbman_swp_dqrr_consume(swp, dq);
3448 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3449 const struct qbman_fd *fd,
3450 const struct qbman_result *dq,
3451 struct dpaa2_queue *rxq,
3452 struct rte_event *ev)
3455 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3456 /* Prefetching mbuf */
3457 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3458 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3460 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3461 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3463 ev->flow_id = rxq->ev.flow_id;
3464 ev->sub_event_type = rxq->ev.sub_event_type;
3465 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3466 ev->op = RTE_EVENT_OP_NEW;
3467 ev->sched_type = rxq->ev.sched_type;
3468 ev->queue_id = rxq->ev.queue_id;
3469 ev->priority = rxq->ev.priority;
3471 ev->event_ptr = sec_fd_to_mbuf(fd);
3472 dqrr_index = qbman_get_dqrr_idx(dq);
3473 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3474 DPAA2_PER_LCORE_DQRR_SIZE++;
3475 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3476 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3480 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3482 struct dpaa2_dpcon_dev *dpcon,
3483 const struct rte_event *event)
3485 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3486 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3487 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3488 struct dpseci_rx_queue_cfg cfg;
3492 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3493 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3494 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3495 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3499 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3500 (dpcon->num_priorities - 1);
3502 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3503 cfg.options = DPSECI_QUEUE_OPT_DEST;
3504 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3505 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3506 cfg.dest_cfg.priority = priority;
3508 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3509 cfg.user_ctx = (size_t)(qp);
3510 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3511 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3512 cfg.order_preservation_en = 1;
3514 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3517 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3521 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3527 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3530 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3531 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3532 struct dpseci_rx_queue_cfg cfg;
3535 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3536 cfg.options = DPSECI_QUEUE_OPT_DEST;
3537 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3539 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3542 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3547 static struct rte_cryptodev_ops crypto_ops = {
3548 .dev_configure = dpaa2_sec_dev_configure,
3549 .dev_start = dpaa2_sec_dev_start,
3550 .dev_stop = dpaa2_sec_dev_stop,
3551 .dev_close = dpaa2_sec_dev_close,
3552 .dev_infos_get = dpaa2_sec_dev_infos_get,
3553 .stats_get = dpaa2_sec_stats_get,
3554 .stats_reset = dpaa2_sec_stats_reset,
3555 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3556 .queue_pair_release = dpaa2_sec_queue_pair_release,
3557 .queue_pair_count = dpaa2_sec_queue_pair_count,
3558 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3559 .sym_session_configure = dpaa2_sec_sym_session_configure,
3560 .sym_session_clear = dpaa2_sec_sym_session_clear,
3563 static const struct rte_security_capability *
3564 dpaa2_sec_capabilities_get(void *device __rte_unused)
3566 return dpaa2_sec_security_cap;
3569 static const struct rte_security_ops dpaa2_sec_security_ops = {
3570 .session_create = dpaa2_sec_security_session_create,
3571 .session_update = NULL,
3572 .session_stats_get = NULL,
3573 .session_destroy = dpaa2_sec_security_session_destroy,
3574 .set_pkt_metadata = NULL,
3575 .capabilities_get = dpaa2_sec_capabilities_get
3579 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3581 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3583 rte_free(dev->security_ctx);
3585 rte_mempool_free(internals->fle_pool);
3587 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3588 dev->data->name, rte_socket_id());
3594 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3596 struct dpaa2_sec_dev_private *internals;
3597 struct rte_device *dev = cryptodev->device;
3598 struct rte_dpaa2_device *dpaa2_dev;
3599 struct rte_security_ctx *security_instance;
3600 struct fsl_mc_io *dpseci;
3602 struct dpseci_attr attr;
3606 PMD_INIT_FUNC_TRACE();
3607 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3608 if (dpaa2_dev == NULL) {
3609 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3612 hw_id = dpaa2_dev->object_id;
3614 cryptodev->driver_id = cryptodev_driver_id;
3615 cryptodev->dev_ops = &crypto_ops;
3617 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3618 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3619 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3620 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3621 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3622 RTE_CRYPTODEV_FF_SECURITY |
3623 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3624 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3625 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3626 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3627 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3629 internals = cryptodev->data->dev_private;
3632 * For secondary processes, we don't initialise any further as primary
3633 * has already done this work. Only check we don't need a different
3636 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3637 DPAA2_SEC_DEBUG("Device already init by primary process");
3641 /* Initialize security_ctx only for primary process*/
3642 security_instance = rte_malloc("rte_security_instances_ops",
3643 sizeof(struct rte_security_ctx), 0);
3644 if (security_instance == NULL)
3646 security_instance->device = (void *)cryptodev;
3647 security_instance->ops = &dpaa2_sec_security_ops;
3648 security_instance->sess_cnt = 0;
3649 cryptodev->security_ctx = security_instance;
3651 /*Open the rte device via MC and save the handle for further use*/
3652 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3653 sizeof(struct fsl_mc_io), 0);
3656 "Error in allocating the memory for dpsec object");
3659 dpseci->regs = rte_mcp_ptr_list[0];
3661 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3663 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3667 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3670 "Cannot get dpsec device attributed: Error = %x",
3674 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3677 internals->max_nb_queue_pairs = attr.num_tx_queues;
3678 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3679 internals->hw = dpseci;
3680 internals->token = token;
3682 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3683 getpid(), cryptodev->data->dev_id);
3684 internals->fle_pool = rte_mempool_create((const char *)str,
3687 FLE_POOL_CACHE_SIZE, 0,
3688 NULL, NULL, NULL, NULL,
3690 if (!internals->fle_pool) {
3691 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3695 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3699 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3701 /* dpaa2_sec_uninit(crypto_dev_name); */
3706 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3707 struct rte_dpaa2_device *dpaa2_dev)
3709 struct rte_cryptodev *cryptodev;
3710 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3714 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3715 dpaa2_dev->object_id);
3717 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3718 if (cryptodev == NULL)
3721 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3722 cryptodev->data->dev_private = rte_zmalloc_socket(
3723 "cryptodev private structure",
3724 sizeof(struct dpaa2_sec_dev_private),
3725 RTE_CACHE_LINE_SIZE,
3728 if (cryptodev->data->dev_private == NULL)
3729 rte_panic("Cannot allocate memzone for private "
3733 dpaa2_dev->cryptodev = cryptodev;
3734 cryptodev->device = &dpaa2_dev->device;
3736 /* init user callbacks */
3737 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3739 if (dpaa2_svr_family == SVR_LX2160A)
3740 rta_set_sec_era(RTA_SEC_ERA_10);
3742 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3744 /* Invoke PMD device initialization function */
3745 retval = dpaa2_sec_dev_init(cryptodev);
3749 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3750 rte_free(cryptodev->data->dev_private);
3752 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3758 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3760 struct rte_cryptodev *cryptodev;
3763 cryptodev = dpaa2_dev->cryptodev;
3764 if (cryptodev == NULL)
3767 ret = dpaa2_sec_uninit(cryptodev);
3771 return rte_cryptodev_pmd_destroy(cryptodev);
3774 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3775 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3776 .drv_type = DPAA2_CRYPTO,
3778 .name = "DPAA2 SEC PMD"
3780 .probe = cryptodev_dpaa2_sec_probe,
3781 .remove = cryptodev_dpaa2_sec_remove,
3784 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3786 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3787 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3788 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3790 RTE_INIT(dpaa2_sec_init_log)
3792 /* Bus level logs */
3793 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3794 if (dpaa2_logtype_sec >= 0)
3795 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);