1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
33 /* RTA header files */
34 #include <hw/desc/ipsec.h>
35 #include <hw/desc/algo.h>
37 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
38 * a pointer to the shared descriptor
40 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
41 #define FSL_VENDOR_ID 0x1957
42 #define FSL_DEVICE_ID 0x410
43 #define FSL_SUBSYSTEM_SEC 1
44 #define FSL_MC_DPSECI_DEVID 3
47 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
48 #define FLE_POOL_NUM_BUFS 32000
49 #define FLE_POOL_BUF_SIZE 256
50 #define FLE_POOL_CACHE_SIZE 512
51 #define FLE_SG_MEM_SIZE 2048
52 #define SEC_FLC_DHR_OUTBOUND -114
53 #define SEC_FLC_DHR_INBOUND 0
55 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
57 static uint8_t cryptodev_driver_id;
59 int dpaa2_logtype_sec;
62 build_proto_fd(dpaa2_sec_session *sess,
63 struct rte_crypto_op *op,
64 struct qbman_fd *fd, uint16_t bpid)
66 struct rte_crypto_sym_op *sym_op = op->sym;
67 struct ctxt_priv *priv = sess->ctxt;
68 struct sec_flow_context *flc;
69 struct rte_mbuf *mbuf = sym_op->m_src;
71 if (likely(bpid < MAX_BPID))
72 DPAA2_SET_FD_BPID(fd, bpid);
76 /* Save the shared descriptor */
77 flc = &priv->flc_desc[0].flc;
79 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
80 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
81 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
82 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
84 /* save physical address of mbuf */
85 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
86 mbuf->buf_iova = (size_t)op;
92 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
93 struct rte_crypto_op *op,
94 struct qbman_fd *fd, __rte_unused uint16_t bpid)
96 struct rte_crypto_sym_op *sym_op = op->sym;
97 struct ctxt_priv *priv = sess->ctxt;
98 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
99 struct sec_flow_context *flc;
100 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
101 int icv_len = sess->digest_length;
103 struct rte_mbuf *mbuf;
104 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
107 PMD_INIT_FUNC_TRACE();
110 mbuf = sym_op->m_dst;
112 mbuf = sym_op->m_src;
114 /* first FLE entry used to store mbuf and session ctxt */
115 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
116 RTE_CACHE_LINE_SIZE);
117 if (unlikely(!fle)) {
118 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
121 memset(fle, 0, FLE_SG_MEM_SIZE);
122 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
123 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
129 /* Save the shared descriptor */
130 flc = &priv->flc_desc[0].flc;
132 /* Configure FD as a FRAME LIST */
133 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
134 DPAA2_SET_FD_COMPOUND_FMT(fd);
135 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
137 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
138 "iv-len=%d data_off: 0x%x\n",
139 sym_op->aead.data.offset,
140 sym_op->aead.data.length,
143 sym_op->m_src->data_off);
145 /* Configure Output FLE with Scatter/Gather Entry */
146 DPAA2_SET_FLE_SG_EXT(op_fle);
147 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
150 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
152 op_fle->length = (sess->dir == DIR_ENC) ?
153 (sym_op->aead.data.length + icv_len + auth_only_len) :
154 sym_op->aead.data.length + auth_only_len;
156 /* Configure Output SGE for Encap/Decap */
157 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
158 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
160 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
166 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
167 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
168 sge->length = mbuf->data_len;
171 sge->length -= icv_len;
173 if (sess->dir == DIR_ENC) {
175 DPAA2_SET_FLE_ADDR(sge,
176 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
177 sge->length = icv_len;
179 DPAA2_SET_FLE_FIN(sge);
182 mbuf = sym_op->m_src;
184 /* Configure Input FLE with Scatter/Gather Entry */
185 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
186 DPAA2_SET_FLE_SG_EXT(ip_fle);
187 DPAA2_SET_FLE_FIN(ip_fle);
188 ip_fle->length = (sess->dir == DIR_ENC) ?
189 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
190 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
193 /* Configure Input SGE for Encap/Decap */
194 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
195 sge->length = sess->iv.length;
199 DPAA2_SET_FLE_ADDR(sge,
200 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
201 sge->length = auth_only_len;
205 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
206 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
208 sge->length = mbuf->data_len - sym_op->aead.data.offset;
214 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
215 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
216 sge->length = mbuf->data_len;
220 if (sess->dir == DIR_DEC) {
222 old_icv = (uint8_t *)(sge + 1);
223 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
224 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
225 sge->length = icv_len;
228 DPAA2_SET_FLE_FIN(sge);
230 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
231 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
233 DPAA2_SET_FD_LEN(fd, ip_fle->length);
239 build_authenc_gcm_fd(dpaa2_sec_session *sess,
240 struct rte_crypto_op *op,
241 struct qbman_fd *fd, uint16_t bpid)
243 struct rte_crypto_sym_op *sym_op = op->sym;
244 struct ctxt_priv *priv = sess->ctxt;
245 struct qbman_fle *fle, *sge;
246 struct sec_flow_context *flc;
247 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
248 int icv_len = sess->digest_length, retval;
250 struct rte_mbuf *dst;
251 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
254 PMD_INIT_FUNC_TRACE();
261 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
262 * Currently we donot know which FLE has the mbuf stored.
263 * So while retreiving we can go back 1 FLE from the FD -ADDR
264 * to get the MBUF Addr from the previous FLE.
265 * We can have a better approach to use the inline Mbuf
267 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
269 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
272 memset(fle, 0, FLE_POOL_BUF_SIZE);
273 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
274 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
277 if (likely(bpid < MAX_BPID)) {
278 DPAA2_SET_FD_BPID(fd, bpid);
279 DPAA2_SET_FLE_BPID(fle, bpid);
280 DPAA2_SET_FLE_BPID(fle + 1, bpid);
281 DPAA2_SET_FLE_BPID(sge, bpid);
282 DPAA2_SET_FLE_BPID(sge + 1, bpid);
283 DPAA2_SET_FLE_BPID(sge + 2, bpid);
284 DPAA2_SET_FLE_BPID(sge + 3, bpid);
286 DPAA2_SET_FD_IVP(fd);
287 DPAA2_SET_FLE_IVP(fle);
288 DPAA2_SET_FLE_IVP((fle + 1));
289 DPAA2_SET_FLE_IVP(sge);
290 DPAA2_SET_FLE_IVP((sge + 1));
291 DPAA2_SET_FLE_IVP((sge + 2));
292 DPAA2_SET_FLE_IVP((sge + 3));
295 /* Save the shared descriptor */
296 flc = &priv->flc_desc[0].flc;
297 /* Configure FD as a FRAME LIST */
298 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
299 DPAA2_SET_FD_COMPOUND_FMT(fd);
300 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
302 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
303 "iv-len=%d data_off: 0x%x\n",
304 sym_op->aead.data.offset,
305 sym_op->aead.data.length,
308 sym_op->m_src->data_off);
310 /* Configure Output FLE with Scatter/Gather Entry */
311 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
313 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
314 fle->length = (sess->dir == DIR_ENC) ?
315 (sym_op->aead.data.length + icv_len + auth_only_len) :
316 sym_op->aead.data.length + auth_only_len;
318 DPAA2_SET_FLE_SG_EXT(fle);
320 /* Configure Output SGE for Encap/Decap */
321 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
322 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
323 dst->data_off - auth_only_len);
324 sge->length = sym_op->aead.data.length + auth_only_len;
326 if (sess->dir == DIR_ENC) {
328 DPAA2_SET_FLE_ADDR(sge,
329 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
330 sge->length = sess->digest_length;
331 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
332 sess->iv.length + auth_only_len));
334 DPAA2_SET_FLE_FIN(sge);
339 /* Configure Input FLE with Scatter/Gather Entry */
340 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
341 DPAA2_SET_FLE_SG_EXT(fle);
342 DPAA2_SET_FLE_FIN(fle);
343 fle->length = (sess->dir == DIR_ENC) ?
344 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
345 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
346 sess->digest_length);
348 /* Configure Input SGE for Encap/Decap */
349 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
350 sge->length = sess->iv.length;
353 DPAA2_SET_FLE_ADDR(sge,
354 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
355 sge->length = auth_only_len;
356 DPAA2_SET_FLE_BPID(sge, bpid);
360 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
361 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
362 sym_op->m_src->data_off);
363 sge->length = sym_op->aead.data.length;
364 if (sess->dir == DIR_DEC) {
366 old_icv = (uint8_t *)(sge + 1);
367 memcpy(old_icv, sym_op->aead.digest.data,
368 sess->digest_length);
369 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
370 sge->length = sess->digest_length;
371 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
372 sess->digest_length +
376 DPAA2_SET_FLE_FIN(sge);
379 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
380 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
387 build_authenc_sg_fd(dpaa2_sec_session *sess,
388 struct rte_crypto_op *op,
389 struct qbman_fd *fd, __rte_unused uint16_t bpid)
391 struct rte_crypto_sym_op *sym_op = op->sym;
392 struct ctxt_priv *priv = sess->ctxt;
393 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
394 struct sec_flow_context *flc;
395 uint32_t auth_only_len = sym_op->auth.data.length -
396 sym_op->cipher.data.length;
397 int icv_len = sess->digest_length;
399 struct rte_mbuf *mbuf;
400 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
403 PMD_INIT_FUNC_TRACE();
406 mbuf = sym_op->m_dst;
408 mbuf = sym_op->m_src;
410 /* first FLE entry used to store mbuf and session ctxt */
411 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
412 RTE_CACHE_LINE_SIZE);
413 if (unlikely(!fle)) {
414 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
417 memset(fle, 0, FLE_SG_MEM_SIZE);
418 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
419 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
425 /* Save the shared descriptor */
426 flc = &priv->flc_desc[0].flc;
428 /* Configure FD as a FRAME LIST */
429 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
430 DPAA2_SET_FD_COMPOUND_FMT(fd);
431 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
434 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
435 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
436 sym_op->auth.data.offset,
437 sym_op->auth.data.length,
439 sym_op->cipher.data.offset,
440 sym_op->cipher.data.length,
442 sym_op->m_src->data_off);
444 /* Configure Output FLE with Scatter/Gather Entry */
445 DPAA2_SET_FLE_SG_EXT(op_fle);
446 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
449 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
451 op_fle->length = (sess->dir == DIR_ENC) ?
452 (sym_op->cipher.data.length + icv_len) :
453 sym_op->cipher.data.length;
455 /* Configure Output SGE for Encap/Decap */
456 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
457 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
458 sge->length = mbuf->data_len - sym_op->auth.data.offset;
464 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
465 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
466 sge->length = mbuf->data_len;
469 sge->length -= icv_len;
471 if (sess->dir == DIR_ENC) {
473 DPAA2_SET_FLE_ADDR(sge,
474 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
475 sge->length = icv_len;
477 DPAA2_SET_FLE_FIN(sge);
480 mbuf = sym_op->m_src;
482 /* Configure Input FLE with Scatter/Gather Entry */
483 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
484 DPAA2_SET_FLE_SG_EXT(ip_fle);
485 DPAA2_SET_FLE_FIN(ip_fle);
486 ip_fle->length = (sess->dir == DIR_ENC) ?
487 (sym_op->auth.data.length + sess->iv.length) :
488 (sym_op->auth.data.length + sess->iv.length +
491 /* Configure Input SGE for Encap/Decap */
492 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
493 sge->length = sess->iv.length;
496 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
497 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
499 sge->length = mbuf->data_len - sym_op->auth.data.offset;
505 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
506 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
507 sge->length = mbuf->data_len;
510 sge->length -= icv_len;
512 if (sess->dir == DIR_DEC) {
514 old_icv = (uint8_t *)(sge + 1);
515 memcpy(old_icv, sym_op->auth.digest.data,
517 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
518 sge->length = icv_len;
521 DPAA2_SET_FLE_FIN(sge);
523 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
524 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
526 DPAA2_SET_FD_LEN(fd, ip_fle->length);
532 build_authenc_fd(dpaa2_sec_session *sess,
533 struct rte_crypto_op *op,
534 struct qbman_fd *fd, uint16_t bpid)
536 struct rte_crypto_sym_op *sym_op = op->sym;
537 struct ctxt_priv *priv = sess->ctxt;
538 struct qbman_fle *fle, *sge;
539 struct sec_flow_context *flc;
540 uint32_t auth_only_len = sym_op->auth.data.length -
541 sym_op->cipher.data.length;
542 int icv_len = sess->digest_length, retval;
544 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
546 struct rte_mbuf *dst;
548 PMD_INIT_FUNC_TRACE();
555 /* we are using the first FLE entry to store Mbuf.
556 * Currently we donot know which FLE has the mbuf stored.
557 * So while retreiving we can go back 1 FLE from the FD -ADDR
558 * to get the MBUF Addr from the previous FLE.
559 * We can have a better approach to use the inline Mbuf
561 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
563 DPAA2_SEC_ERR("Memory alloc failed for SGE");
566 memset(fle, 0, FLE_POOL_BUF_SIZE);
567 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
568 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
571 if (likely(bpid < MAX_BPID)) {
572 DPAA2_SET_FD_BPID(fd, bpid);
573 DPAA2_SET_FLE_BPID(fle, bpid);
574 DPAA2_SET_FLE_BPID(fle + 1, bpid);
575 DPAA2_SET_FLE_BPID(sge, bpid);
576 DPAA2_SET_FLE_BPID(sge + 1, bpid);
577 DPAA2_SET_FLE_BPID(sge + 2, bpid);
578 DPAA2_SET_FLE_BPID(sge + 3, bpid);
580 DPAA2_SET_FD_IVP(fd);
581 DPAA2_SET_FLE_IVP(fle);
582 DPAA2_SET_FLE_IVP((fle + 1));
583 DPAA2_SET_FLE_IVP(sge);
584 DPAA2_SET_FLE_IVP((sge + 1));
585 DPAA2_SET_FLE_IVP((sge + 2));
586 DPAA2_SET_FLE_IVP((sge + 3));
589 /* Save the shared descriptor */
590 flc = &priv->flc_desc[0].flc;
591 /* Configure FD as a FRAME LIST */
592 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
593 DPAA2_SET_FD_COMPOUND_FMT(fd);
594 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
597 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
598 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
599 sym_op->auth.data.offset,
600 sym_op->auth.data.length,
602 sym_op->cipher.data.offset,
603 sym_op->cipher.data.length,
605 sym_op->m_src->data_off);
607 /* Configure Output FLE with Scatter/Gather Entry */
608 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
610 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
611 fle->length = (sess->dir == DIR_ENC) ?
612 (sym_op->cipher.data.length + icv_len) :
613 sym_op->cipher.data.length;
615 DPAA2_SET_FLE_SG_EXT(fle);
617 /* Configure Output SGE for Encap/Decap */
618 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
619 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
621 sge->length = sym_op->cipher.data.length;
623 if (sess->dir == DIR_ENC) {
625 DPAA2_SET_FLE_ADDR(sge,
626 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
627 sge->length = sess->digest_length;
628 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
631 DPAA2_SET_FLE_FIN(sge);
636 /* Configure Input FLE with Scatter/Gather Entry */
637 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
638 DPAA2_SET_FLE_SG_EXT(fle);
639 DPAA2_SET_FLE_FIN(fle);
640 fle->length = (sess->dir == DIR_ENC) ?
641 (sym_op->auth.data.length + sess->iv.length) :
642 (sym_op->auth.data.length + sess->iv.length +
643 sess->digest_length);
645 /* Configure Input SGE for Encap/Decap */
646 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
647 sge->length = sess->iv.length;
650 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
651 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
652 sym_op->m_src->data_off);
653 sge->length = sym_op->auth.data.length;
654 if (sess->dir == DIR_DEC) {
656 old_icv = (uint8_t *)(sge + 1);
657 memcpy(old_icv, sym_op->auth.digest.data,
658 sess->digest_length);
659 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
660 sge->length = sess->digest_length;
661 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
662 sess->digest_length +
665 DPAA2_SET_FLE_FIN(sge);
667 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
668 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
673 static inline int build_auth_sg_fd(
674 dpaa2_sec_session *sess,
675 struct rte_crypto_op *op,
677 __rte_unused uint16_t bpid)
679 struct rte_crypto_sym_op *sym_op = op->sym;
680 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
681 struct sec_flow_context *flc;
682 struct ctxt_priv *priv = sess->ctxt;
684 struct rte_mbuf *mbuf;
686 PMD_INIT_FUNC_TRACE();
688 mbuf = sym_op->m_src;
689 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
690 RTE_CACHE_LINE_SIZE);
691 if (unlikely(!fle)) {
692 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
695 memset(fle, 0, FLE_SG_MEM_SIZE);
696 /* first FLE entry used to store mbuf and session ctxt */
697 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
698 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
703 flc = &priv->flc_desc[DESC_INITFINAL].flc;
705 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
706 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
707 DPAA2_SET_FD_COMPOUND_FMT(fd);
710 DPAA2_SET_FLE_ADDR(op_fle,
711 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
712 op_fle->length = sess->digest_length;
715 DPAA2_SET_FLE_SG_EXT(ip_fle);
716 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
718 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
719 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
720 sge->length = mbuf->data_len - sym_op->auth.data.offset;
726 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
727 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
728 sge->length = mbuf->data_len;
731 if (sess->dir == DIR_ENC) {
732 /* Digest calculation case */
733 sge->length -= sess->digest_length;
734 ip_fle->length = sym_op->auth.data.length;
736 /* Digest verification case */
738 old_digest = (uint8_t *)(sge + 1);
739 rte_memcpy(old_digest, sym_op->auth.digest.data,
740 sess->digest_length);
741 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
742 sge->length = sess->digest_length;
743 ip_fle->length = sym_op->auth.data.length +
746 DPAA2_SET_FLE_FIN(sge);
747 DPAA2_SET_FLE_FIN(ip_fle);
748 DPAA2_SET_FD_LEN(fd, ip_fle->length);
754 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
755 struct qbman_fd *fd, uint16_t bpid)
757 struct rte_crypto_sym_op *sym_op = op->sym;
758 struct qbman_fle *fle, *sge;
759 struct sec_flow_context *flc;
760 struct ctxt_priv *priv = sess->ctxt;
764 PMD_INIT_FUNC_TRACE();
766 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
768 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
771 memset(fle, 0, FLE_POOL_BUF_SIZE);
772 /* TODO we are using the first FLE entry to store Mbuf.
773 * Currently we donot know which FLE has the mbuf stored.
774 * So while retreiving we can go back 1 FLE from the FD -ADDR
775 * to get the MBUF Addr from the previous FLE.
776 * We can have a better approach to use the inline Mbuf
778 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
779 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
782 if (likely(bpid < MAX_BPID)) {
783 DPAA2_SET_FD_BPID(fd, bpid);
784 DPAA2_SET_FLE_BPID(fle, bpid);
785 DPAA2_SET_FLE_BPID(fle + 1, bpid);
787 DPAA2_SET_FD_IVP(fd);
788 DPAA2_SET_FLE_IVP(fle);
789 DPAA2_SET_FLE_IVP((fle + 1));
791 flc = &priv->flc_desc[DESC_INITFINAL].flc;
792 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
794 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
795 fle->length = sess->digest_length;
797 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
798 DPAA2_SET_FD_COMPOUND_FMT(fd);
801 if (sess->dir == DIR_ENC) {
802 DPAA2_SET_FLE_ADDR(fle,
803 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
804 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
805 sym_op->m_src->data_off);
806 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
807 fle->length = sym_op->auth.data.length;
810 DPAA2_SET_FLE_SG_EXT(fle);
811 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
813 if (likely(bpid < MAX_BPID)) {
814 DPAA2_SET_FLE_BPID(sge, bpid);
815 DPAA2_SET_FLE_BPID(sge + 1, bpid);
817 DPAA2_SET_FLE_IVP(sge);
818 DPAA2_SET_FLE_IVP((sge + 1));
820 DPAA2_SET_FLE_ADDR(sge,
821 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
822 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
823 sym_op->m_src->data_off);
825 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
826 sess->digest_length);
827 sge->length = sym_op->auth.data.length;
829 old_digest = (uint8_t *)(sge + 1);
830 rte_memcpy(old_digest, sym_op->auth.digest.data,
831 sess->digest_length);
832 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
833 sge->length = sess->digest_length;
834 fle->length = sym_op->auth.data.length +
836 DPAA2_SET_FLE_FIN(sge);
838 DPAA2_SET_FLE_FIN(fle);
844 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
845 struct qbman_fd *fd, __rte_unused uint16_t bpid)
847 struct rte_crypto_sym_op *sym_op = op->sym;
848 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
849 struct sec_flow_context *flc;
850 struct ctxt_priv *priv = sess->ctxt;
851 struct rte_mbuf *mbuf;
852 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
855 PMD_INIT_FUNC_TRACE();
858 mbuf = sym_op->m_dst;
860 mbuf = sym_op->m_src;
862 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
863 RTE_CACHE_LINE_SIZE);
865 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
868 memset(fle, 0, FLE_SG_MEM_SIZE);
869 /* first FLE entry used to store mbuf and session ctxt */
870 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
871 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
877 flc = &priv->flc_desc[0].flc;
880 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
882 sym_op->cipher.data.offset,
883 sym_op->cipher.data.length,
885 sym_op->m_src->data_off);
888 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
889 op_fle->length = sym_op->cipher.data.length;
890 DPAA2_SET_FLE_SG_EXT(op_fle);
893 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
894 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
895 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
901 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
902 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
903 sge->length = mbuf->data_len;
906 DPAA2_SET_FLE_FIN(sge);
909 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
910 flc, fle, fle->addr_hi, fle->addr_lo,
914 mbuf = sym_op->m_src;
916 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
917 ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
918 DPAA2_SET_FLE_SG_EXT(ip_fle);
921 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
922 DPAA2_SET_FLE_OFFSET(sge, 0);
923 sge->length = sess->iv.length;
928 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
929 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
931 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
937 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
938 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
939 sge->length = mbuf->data_len;
942 DPAA2_SET_FLE_FIN(sge);
943 DPAA2_SET_FLE_FIN(ip_fle);
946 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
947 DPAA2_SET_FD_LEN(fd, ip_fle->length);
948 DPAA2_SET_FD_COMPOUND_FMT(fd);
949 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
952 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
953 " off =%d, len =%d\n",
954 DPAA2_GET_FD_ADDR(fd),
955 DPAA2_GET_FD_BPID(fd),
956 rte_dpaa2_bpid_info[bpid].meta_data_size,
957 DPAA2_GET_FD_OFFSET(fd),
958 DPAA2_GET_FD_LEN(fd));
963 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
964 struct qbman_fd *fd, uint16_t bpid)
966 struct rte_crypto_sym_op *sym_op = op->sym;
967 struct qbman_fle *fle, *sge;
969 struct sec_flow_context *flc;
970 struct ctxt_priv *priv = sess->ctxt;
971 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
973 struct rte_mbuf *dst;
975 PMD_INIT_FUNC_TRACE();
982 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
984 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
987 memset(fle, 0, FLE_POOL_BUF_SIZE);
988 /* TODO we are using the first FLE entry to store Mbuf.
989 * Currently we donot know which FLE has the mbuf stored.
990 * So while retreiving we can go back 1 FLE from the FD -ADDR
991 * to get the MBUF Addr from the previous FLE.
992 * We can have a better approach to use the inline Mbuf
994 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
995 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
999 if (likely(bpid < MAX_BPID)) {
1000 DPAA2_SET_FD_BPID(fd, bpid);
1001 DPAA2_SET_FLE_BPID(fle, bpid);
1002 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1003 DPAA2_SET_FLE_BPID(sge, bpid);
1004 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1006 DPAA2_SET_FD_IVP(fd);
1007 DPAA2_SET_FLE_IVP(fle);
1008 DPAA2_SET_FLE_IVP((fle + 1));
1009 DPAA2_SET_FLE_IVP(sge);
1010 DPAA2_SET_FLE_IVP((sge + 1));
1013 flc = &priv->flc_desc[0].flc;
1014 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1015 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1017 DPAA2_SET_FD_COMPOUND_FMT(fd);
1018 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1021 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1022 " data_off: 0x%x\n",
1023 sym_op->cipher.data.offset,
1024 sym_op->cipher.data.length,
1026 sym_op->m_src->data_off);
1028 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1029 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1032 fle->length = sym_op->cipher.data.length + sess->iv.length;
1035 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1036 flc, fle, fle->addr_hi, fle->addr_lo,
1041 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1042 fle->length = sym_op->cipher.data.length + sess->iv.length;
1044 DPAA2_SET_FLE_SG_EXT(fle);
1046 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1047 sge->length = sess->iv.length;
1050 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1051 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1052 sym_op->m_src->data_off);
1054 sge->length = sym_op->cipher.data.length;
1055 DPAA2_SET_FLE_FIN(sge);
1056 DPAA2_SET_FLE_FIN(fle);
1059 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1060 " off =%d, len =%d\n",
1061 DPAA2_GET_FD_ADDR(fd),
1062 DPAA2_GET_FD_BPID(fd),
1063 rte_dpaa2_bpid_info[bpid].meta_data_size,
1064 DPAA2_GET_FD_OFFSET(fd),
1065 DPAA2_GET_FD_LEN(fd));
1071 build_sec_fd(struct rte_crypto_op *op,
1072 struct qbman_fd *fd, uint16_t bpid)
1075 dpaa2_sec_session *sess;
1077 PMD_INIT_FUNC_TRACE();
1079 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1080 sess = (dpaa2_sec_session *)get_session_private_data(
1081 op->sym->session, cryptodev_driver_id);
1082 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1083 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1084 op->sym->sec_session);
1088 /* Segmented buffer */
1089 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1090 switch (sess->ctxt_type) {
1091 case DPAA2_SEC_CIPHER:
1092 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1094 case DPAA2_SEC_AUTH:
1095 ret = build_auth_sg_fd(sess, op, fd, bpid);
1097 case DPAA2_SEC_AEAD:
1098 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1100 case DPAA2_SEC_CIPHER_HASH:
1101 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1103 case DPAA2_SEC_HASH_CIPHER:
1105 DPAA2_SEC_ERR("error: Unsupported session");
1108 switch (sess->ctxt_type) {
1109 case DPAA2_SEC_CIPHER:
1110 ret = build_cipher_fd(sess, op, fd, bpid);
1112 case DPAA2_SEC_AUTH:
1113 ret = build_auth_fd(sess, op, fd, bpid);
1115 case DPAA2_SEC_AEAD:
1116 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1118 case DPAA2_SEC_CIPHER_HASH:
1119 ret = build_authenc_fd(sess, op, fd, bpid);
1121 case DPAA2_SEC_IPSEC:
1122 ret = build_proto_fd(sess, op, fd, bpid);
1124 case DPAA2_SEC_HASH_CIPHER:
1126 DPAA2_SEC_ERR("error: Unsupported session");
1133 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1136 /* Function to transmit the frames to given device and VQ*/
1139 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1140 uint32_t frames_to_send;
1141 struct qbman_eq_desc eqdesc;
1142 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1143 struct qbman_swp *swp;
1144 uint16_t num_tx = 0;
1145 /*todo - need to support multiple buffer pools */
1147 struct rte_mempool *mb_pool;
1149 if (unlikely(nb_ops == 0))
1152 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1153 DPAA2_SEC_ERR("sessionless crypto op not supported");
1156 /*Prepare enqueue descriptor*/
1157 qbman_eq_desc_clear(&eqdesc);
1158 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1159 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1160 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1162 if (!DPAA2_PER_LCORE_SEC_DPIO) {
1163 ret = dpaa2_affine_qbman_swp_sec();
1165 DPAA2_SEC_ERR("Failure in affining portal");
1169 swp = DPAA2_PER_LCORE_SEC_PORTAL;
1172 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1174 for (loop = 0; loop < frames_to_send; loop++) {
1175 /*Clear the unused FD fields before sending*/
1176 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1177 mb_pool = (*ops)->sym->m_src->pool;
1178 bpid = mempool_to_bpid(mb_pool);
1179 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1181 DPAA2_SEC_ERR("error: Improper packet contents"
1182 " for crypto operation");
1188 while (loop < frames_to_send) {
1189 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1192 frames_to_send - loop);
1195 num_tx += frames_to_send;
1196 nb_ops -= frames_to_send;
1199 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1200 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1204 static inline struct rte_crypto_op *
1205 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1207 struct rte_crypto_op *op;
1208 uint16_t len = DPAA2_GET_FD_LEN(fd);
1210 dpaa2_sec_session *sess_priv;
1212 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1213 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1214 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1216 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1217 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1218 op->sym->aead.digest.phys_addr = 0L;
1220 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1221 op->sym->sec_session);
1222 if (sess_priv->dir == DIR_ENC)
1223 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1225 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1226 diff = len - mbuf->pkt_len;
1227 mbuf->pkt_len += diff;
1228 mbuf->data_len += diff;
1233 static inline struct rte_crypto_op *
1234 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1236 struct qbman_fle *fle;
1237 struct rte_crypto_op *op;
1238 struct ctxt_priv *priv;
1239 struct rte_mbuf *dst, *src;
1241 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1242 return sec_simple_fd_to_mbuf(fd, driver_id);
1244 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1246 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1247 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1249 /* we are using the first FLE entry to store Mbuf.
1250 * Currently we donot know which FLE has the mbuf stored.
1251 * So while retreiving we can go back 1 FLE from the FD -ADDR
1252 * to get the MBUF Addr from the previous FLE.
1253 * We can have a better approach to use the inline Mbuf
1256 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1257 /* TODO complete it. */
1258 DPAA2_SEC_ERR("error: non inline buffer");
1261 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
1262 DPAA2_GET_FLE_ADDR((fle - 1)));
1265 src = op->sym->m_src;
1268 if (op->sym->m_dst) {
1269 dst = op->sym->m_dst;
1274 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1275 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1278 DPAA2_GET_FD_ADDR(fd),
1279 DPAA2_GET_FD_BPID(fd),
1280 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1281 DPAA2_GET_FD_OFFSET(fd),
1282 DPAA2_GET_FD_LEN(fd));
1284 /* free the fle memory */
1285 if (likely(rte_pktmbuf_is_contiguous(src))) {
1286 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1287 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1289 rte_free((void *)(fle-1));
1295 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1298 /* Function is responsible to receive frames for a given device and VQ*/
1299 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1300 struct rte_cryptodev *dev =
1301 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1302 struct qbman_result *dq_storage;
1303 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1304 int ret, num_rx = 0;
1305 uint8_t is_last = 0, status;
1306 struct qbman_swp *swp;
1307 const struct qbman_fd *fd;
1308 struct qbman_pull_desc pulldesc;
1310 if (!DPAA2_PER_LCORE_SEC_DPIO) {
1311 ret = dpaa2_affine_qbman_swp_sec();
1313 DPAA2_SEC_ERR("Failure in affining portal");
1317 swp = DPAA2_PER_LCORE_SEC_PORTAL;
1318 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1320 qbman_pull_desc_clear(&pulldesc);
1321 qbman_pull_desc_set_numframes(&pulldesc,
1322 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1323 DPAA2_DQRR_RING_SIZE : nb_ops);
1324 qbman_pull_desc_set_fq(&pulldesc, fqid);
1325 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1326 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1329 /*Issue a volatile dequeue command. */
1331 if (qbman_swp_pull(swp, &pulldesc)) {
1333 "SEC VDQ command is not issued : QBMAN busy");
1334 /* Portal was busy, try again */
1340 /* Receive the packets till Last Dequeue entry is found with
1341 * respect to the above issues PULL command.
1344 /* Check if the previous issued command is completed.
1345 * Also seems like the SWP is shared between the Ethernet Driver
1346 * and the SEC driver.
1348 while (!qbman_check_command_complete(dq_storage))
1351 /* Loop until the dq_storage is updated with
1352 * new token by QBMAN
1354 while (!qbman_check_new_result(dq_storage))
1356 /* Check whether Last Pull command is Expired and
1357 * setting Condition for Loop termination
1359 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1361 /* Check for valid frame. */
1362 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1364 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1365 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1370 fd = qbman_result_DQ_fd(dq_storage);
1371 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1373 if (unlikely(fd->simple.frc)) {
1374 /* TODO Parse SEC errors */
1375 DPAA2_SEC_ERR("SEC returned Error - %x",
1377 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1379 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1384 } /* End of Packet Rx loop */
1386 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1388 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1389 /*Return the total number of packets received to DPAA2 app*/
1393 /** Release queue pair */
1395 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1397 struct dpaa2_sec_qp *qp =
1398 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1400 PMD_INIT_FUNC_TRACE();
1402 if (qp->rx_vq.q_storage) {
1403 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1404 rte_free(qp->rx_vq.q_storage);
1408 dev->data->queue_pairs[queue_pair_id] = NULL;
1413 /** Setup a queue pair */
1415 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1416 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1417 __rte_unused int socket_id,
1418 __rte_unused struct rte_mempool *session_pool)
1420 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1421 struct dpaa2_sec_qp *qp;
1422 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1423 struct dpseci_rx_queue_cfg cfg;
1426 PMD_INIT_FUNC_TRACE();
1428 /* If qp is already in use free ring memory and qp metadata. */
1429 if (dev->data->queue_pairs[qp_id] != NULL) {
1430 DPAA2_SEC_INFO("QP already setup");
1434 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1435 dev, qp_id, qp_conf);
1437 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1439 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1440 RTE_CACHE_LINE_SIZE);
1442 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1446 qp->rx_vq.dev = dev;
1447 qp->tx_vq.dev = dev;
1448 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1449 sizeof(struct queue_storage_info_t),
1450 RTE_CACHE_LINE_SIZE);
1451 if (!qp->rx_vq.q_storage) {
1452 DPAA2_SEC_ERR("malloc failed for q_storage");
1455 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1457 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1458 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1462 dev->data->queue_pairs[qp_id] = qp;
1464 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1465 cfg.user_ctx = (size_t)(&qp->rx_vq);
1466 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1471 /** Start queue pair */
1473 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1474 __rte_unused uint16_t queue_pair_id)
1476 PMD_INIT_FUNC_TRACE();
1481 /** Stop queue pair */
1483 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1484 __rte_unused uint16_t queue_pair_id)
1486 PMD_INIT_FUNC_TRACE();
1491 /** Return the number of allocated queue pairs */
1493 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1495 PMD_INIT_FUNC_TRACE();
1497 return dev->data->nb_queue_pairs;
1500 /** Returns the size of the aesni gcm session structure */
1502 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1504 PMD_INIT_FUNC_TRACE();
1506 return sizeof(dpaa2_sec_session);
1510 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1511 struct rte_crypto_sym_xform *xform,
1512 dpaa2_sec_session *session)
1514 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1515 struct alginfo cipherdata;
1517 struct ctxt_priv *priv;
1518 struct sec_flow_context *flc;
1520 PMD_INIT_FUNC_TRACE();
1522 /* For SEC CIPHER only one descriptor is required. */
1523 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1524 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1525 RTE_CACHE_LINE_SIZE);
1527 DPAA2_SEC_ERR("No Memory for priv CTXT");
1531 priv->fle_pool = dev_priv->fle_pool;
1533 flc = &priv->flc_desc[0].flc;
1535 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1536 RTE_CACHE_LINE_SIZE);
1537 if (session->cipher_key.data == NULL) {
1538 DPAA2_SEC_ERR("No Memory for cipher key");
1542 session->cipher_key.length = xform->cipher.key.length;
1544 memcpy(session->cipher_key.data, xform->cipher.key.data,
1545 xform->cipher.key.length);
1546 cipherdata.key = (size_t)session->cipher_key.data;
1547 cipherdata.keylen = session->cipher_key.length;
1548 cipherdata.key_enc_flags = 0;
1549 cipherdata.key_type = RTA_DATA_IMM;
1551 /* Set IV parameters */
1552 session->iv.offset = xform->cipher.iv.offset;
1553 session->iv.length = xform->cipher.iv.length;
1555 switch (xform->cipher.algo) {
1556 case RTE_CRYPTO_CIPHER_AES_CBC:
1557 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1558 cipherdata.algmode = OP_ALG_AAI_CBC;
1559 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1561 case RTE_CRYPTO_CIPHER_3DES_CBC:
1562 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1563 cipherdata.algmode = OP_ALG_AAI_CBC;
1564 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1566 case RTE_CRYPTO_CIPHER_AES_CTR:
1567 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1568 cipherdata.algmode = OP_ALG_AAI_CTR;
1569 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1571 case RTE_CRYPTO_CIPHER_3DES_CTR:
1572 case RTE_CRYPTO_CIPHER_AES_ECB:
1573 case RTE_CRYPTO_CIPHER_3DES_ECB:
1574 case RTE_CRYPTO_CIPHER_AES_XTS:
1575 case RTE_CRYPTO_CIPHER_AES_F8:
1576 case RTE_CRYPTO_CIPHER_ARC4:
1577 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1578 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1579 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1580 case RTE_CRYPTO_CIPHER_NULL:
1581 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1582 xform->cipher.algo);
1585 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1586 xform->cipher.algo);
1589 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1592 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1593 &cipherdata, NULL, session->iv.length,
1596 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1601 flc->mode_bits = 0x8000;
1603 flc->word1_sdl = (uint8_t)bufsize;
1604 flc->word2_rflc_31_0 = lower_32_bits(
1605 (size_t)&(((struct dpaa2_sec_qp *)
1606 dev->data->queue_pairs[0])->rx_vq));
1607 flc->word3_rflc_63_32 = upper_32_bits(
1608 (size_t)&(((struct dpaa2_sec_qp *)
1609 dev->data->queue_pairs[0])->rx_vq));
1610 session->ctxt = priv;
1612 for (i = 0; i < bufsize; i++)
1613 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1618 rte_free(session->cipher_key.data);
1624 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1625 struct rte_crypto_sym_xform *xform,
1626 dpaa2_sec_session *session)
1628 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1629 struct alginfo authdata;
1630 unsigned int bufsize, i;
1631 struct ctxt_priv *priv;
1632 struct sec_flow_context *flc;
1634 PMD_INIT_FUNC_TRACE();
1636 /* For SEC AUTH three descriptors are required for various stages */
1637 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1638 sizeof(struct ctxt_priv) + 3 *
1639 sizeof(struct sec_flc_desc),
1640 RTE_CACHE_LINE_SIZE);
1642 DPAA2_SEC_ERR("No Memory for priv CTXT");
1646 priv->fle_pool = dev_priv->fle_pool;
1647 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1649 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1650 RTE_CACHE_LINE_SIZE);
1651 if (session->auth_key.data == NULL) {
1652 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1656 session->auth_key.length = xform->auth.key.length;
1658 memcpy(session->auth_key.data, xform->auth.key.data,
1659 xform->auth.key.length);
1660 authdata.key = (size_t)session->auth_key.data;
1661 authdata.keylen = session->auth_key.length;
1662 authdata.key_enc_flags = 0;
1663 authdata.key_type = RTA_DATA_IMM;
1665 session->digest_length = xform->auth.digest_length;
1667 switch (xform->auth.algo) {
1668 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1669 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1670 authdata.algmode = OP_ALG_AAI_HMAC;
1671 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1673 case RTE_CRYPTO_AUTH_MD5_HMAC:
1674 authdata.algtype = OP_ALG_ALGSEL_MD5;
1675 authdata.algmode = OP_ALG_AAI_HMAC;
1676 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1678 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1679 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1680 authdata.algmode = OP_ALG_AAI_HMAC;
1681 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1683 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1684 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1685 authdata.algmode = OP_ALG_AAI_HMAC;
1686 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1688 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1689 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1690 authdata.algmode = OP_ALG_AAI_HMAC;
1691 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1693 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1694 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1695 authdata.algmode = OP_ALG_AAI_HMAC;
1696 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1698 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1699 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1700 case RTE_CRYPTO_AUTH_NULL:
1701 case RTE_CRYPTO_AUTH_SHA1:
1702 case RTE_CRYPTO_AUTH_SHA256:
1703 case RTE_CRYPTO_AUTH_SHA512:
1704 case RTE_CRYPTO_AUTH_SHA224:
1705 case RTE_CRYPTO_AUTH_SHA384:
1706 case RTE_CRYPTO_AUTH_MD5:
1707 case RTE_CRYPTO_AUTH_AES_GMAC:
1708 case RTE_CRYPTO_AUTH_KASUMI_F9:
1709 case RTE_CRYPTO_AUTH_AES_CMAC:
1710 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1711 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1712 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1716 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1720 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1723 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1724 1, 0, &authdata, !session->dir,
1725 session->digest_length);
1727 flc->word1_sdl = (uint8_t)bufsize;
1728 flc->word2_rflc_31_0 = lower_32_bits(
1729 (size_t)&(((struct dpaa2_sec_qp *)
1730 dev->data->queue_pairs[0])->rx_vq));
1731 flc->word3_rflc_63_32 = upper_32_bits(
1732 (size_t)&(((struct dpaa2_sec_qp *)
1733 dev->data->queue_pairs[0])->rx_vq));
1734 session->ctxt = priv;
1735 for (i = 0; i < bufsize; i++)
1736 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1737 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1743 rte_free(session->auth_key.data);
1749 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1750 struct rte_crypto_sym_xform *xform,
1751 dpaa2_sec_session *session)
1753 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1754 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1755 struct alginfo aeaddata;
1756 unsigned int bufsize, i;
1757 struct ctxt_priv *priv;
1758 struct sec_flow_context *flc;
1759 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1762 PMD_INIT_FUNC_TRACE();
1764 /* Set IV parameters */
1765 session->iv.offset = aead_xform->iv.offset;
1766 session->iv.length = aead_xform->iv.length;
1767 session->ctxt_type = DPAA2_SEC_AEAD;
1769 /* For SEC AEAD only one descriptor is required */
1770 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1771 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1772 RTE_CACHE_LINE_SIZE);
1774 DPAA2_SEC_ERR("No Memory for priv CTXT");
1778 priv->fle_pool = dev_priv->fle_pool;
1779 flc = &priv->flc_desc[0].flc;
1781 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1782 RTE_CACHE_LINE_SIZE);
1783 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1784 DPAA2_SEC_ERR("No Memory for aead key");
1788 memcpy(session->aead_key.data, aead_xform->key.data,
1789 aead_xform->key.length);
1791 session->digest_length = aead_xform->digest_length;
1792 session->aead_key.length = aead_xform->key.length;
1793 ctxt->auth_only_len = aead_xform->aad_length;
1795 aeaddata.key = (size_t)session->aead_key.data;
1796 aeaddata.keylen = session->aead_key.length;
1797 aeaddata.key_enc_flags = 0;
1798 aeaddata.key_type = RTA_DATA_IMM;
1800 switch (aead_xform->algo) {
1801 case RTE_CRYPTO_AEAD_AES_GCM:
1802 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1803 aeaddata.algmode = OP_ALG_AAI_GCM;
1804 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1806 case RTE_CRYPTO_AEAD_AES_CCM:
1807 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1811 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1815 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1818 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1819 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1821 (unsigned int *)priv->flc_desc[0].desc,
1822 &priv->flc_desc[0].desc[1], 1);
1825 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1828 if (priv->flc_desc[0].desc[1] & 1) {
1829 aeaddata.key_type = RTA_DATA_IMM;
1831 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1832 aeaddata.key_type = RTA_DATA_PTR;
1834 priv->flc_desc[0].desc[0] = 0;
1835 priv->flc_desc[0].desc[1] = 0;
1837 if (session->dir == DIR_ENC)
1838 bufsize = cnstr_shdsc_gcm_encap(
1839 priv->flc_desc[0].desc, 1, 0,
1840 &aeaddata, session->iv.length,
1841 session->digest_length);
1843 bufsize = cnstr_shdsc_gcm_decap(
1844 priv->flc_desc[0].desc, 1, 0,
1845 &aeaddata, session->iv.length,
1846 session->digest_length);
1847 flc->word1_sdl = (uint8_t)bufsize;
1848 flc->word2_rflc_31_0 = lower_32_bits(
1849 (size_t)&(((struct dpaa2_sec_qp *)
1850 dev->data->queue_pairs[0])->rx_vq));
1851 flc->word3_rflc_63_32 = upper_32_bits(
1852 (size_t)&(((struct dpaa2_sec_qp *)
1853 dev->data->queue_pairs[0])->rx_vq));
1854 session->ctxt = priv;
1855 for (i = 0; i < bufsize; i++)
1856 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1857 i, priv->flc_desc[0].desc[i]);
1862 rte_free(session->aead_key.data);
1869 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1870 struct rte_crypto_sym_xform *xform,
1871 dpaa2_sec_session *session)
1873 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1874 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1875 struct alginfo authdata, cipherdata;
1876 unsigned int bufsize, i;
1877 struct ctxt_priv *priv;
1878 struct sec_flow_context *flc;
1879 struct rte_crypto_cipher_xform *cipher_xform;
1880 struct rte_crypto_auth_xform *auth_xform;
1883 PMD_INIT_FUNC_TRACE();
1885 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1886 cipher_xform = &xform->cipher;
1887 auth_xform = &xform->next->auth;
1888 session->ctxt_type =
1889 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1890 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1892 cipher_xform = &xform->next->cipher;
1893 auth_xform = &xform->auth;
1894 session->ctxt_type =
1895 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1896 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1899 /* Set IV parameters */
1900 session->iv.offset = cipher_xform->iv.offset;
1901 session->iv.length = cipher_xform->iv.length;
1903 /* For SEC AEAD only one descriptor is required */
1904 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1905 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1906 RTE_CACHE_LINE_SIZE);
1908 DPAA2_SEC_ERR("No Memory for priv CTXT");
1912 priv->fle_pool = dev_priv->fle_pool;
1913 flc = &priv->flc_desc[0].flc;
1915 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1916 RTE_CACHE_LINE_SIZE);
1917 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1918 DPAA2_SEC_ERR("No Memory for cipher key");
1922 session->cipher_key.length = cipher_xform->key.length;
1923 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1924 RTE_CACHE_LINE_SIZE);
1925 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1926 DPAA2_SEC_ERR("No Memory for auth key");
1927 rte_free(session->cipher_key.data);
1931 session->auth_key.length = auth_xform->key.length;
1932 memcpy(session->cipher_key.data, cipher_xform->key.data,
1933 cipher_xform->key.length);
1934 memcpy(session->auth_key.data, auth_xform->key.data,
1935 auth_xform->key.length);
1937 authdata.key = (size_t)session->auth_key.data;
1938 authdata.keylen = session->auth_key.length;
1939 authdata.key_enc_flags = 0;
1940 authdata.key_type = RTA_DATA_IMM;
1942 session->digest_length = auth_xform->digest_length;
1944 switch (auth_xform->algo) {
1945 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1946 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1947 authdata.algmode = OP_ALG_AAI_HMAC;
1948 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1950 case RTE_CRYPTO_AUTH_MD5_HMAC:
1951 authdata.algtype = OP_ALG_ALGSEL_MD5;
1952 authdata.algmode = OP_ALG_AAI_HMAC;
1953 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1955 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1956 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1957 authdata.algmode = OP_ALG_AAI_HMAC;
1958 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1960 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1961 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1962 authdata.algmode = OP_ALG_AAI_HMAC;
1963 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1965 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1966 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1967 authdata.algmode = OP_ALG_AAI_HMAC;
1968 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1970 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1971 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1972 authdata.algmode = OP_ALG_AAI_HMAC;
1973 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1975 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1976 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1977 case RTE_CRYPTO_AUTH_NULL:
1978 case RTE_CRYPTO_AUTH_SHA1:
1979 case RTE_CRYPTO_AUTH_SHA256:
1980 case RTE_CRYPTO_AUTH_SHA512:
1981 case RTE_CRYPTO_AUTH_SHA224:
1982 case RTE_CRYPTO_AUTH_SHA384:
1983 case RTE_CRYPTO_AUTH_MD5:
1984 case RTE_CRYPTO_AUTH_AES_GMAC:
1985 case RTE_CRYPTO_AUTH_KASUMI_F9:
1986 case RTE_CRYPTO_AUTH_AES_CMAC:
1987 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1988 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1989 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
1993 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1997 cipherdata.key = (size_t)session->cipher_key.data;
1998 cipherdata.keylen = session->cipher_key.length;
1999 cipherdata.key_enc_flags = 0;
2000 cipherdata.key_type = RTA_DATA_IMM;
2002 switch (cipher_xform->algo) {
2003 case RTE_CRYPTO_CIPHER_AES_CBC:
2004 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2005 cipherdata.algmode = OP_ALG_AAI_CBC;
2006 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2008 case RTE_CRYPTO_CIPHER_3DES_CBC:
2009 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2010 cipherdata.algmode = OP_ALG_AAI_CBC;
2011 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2013 case RTE_CRYPTO_CIPHER_AES_CTR:
2014 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2015 cipherdata.algmode = OP_ALG_AAI_CTR;
2016 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2018 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2019 case RTE_CRYPTO_CIPHER_NULL:
2020 case RTE_CRYPTO_CIPHER_3DES_ECB:
2021 case RTE_CRYPTO_CIPHER_AES_ECB:
2022 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2023 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2024 cipher_xform->algo);
2027 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2028 cipher_xform->algo);
2031 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2034 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2035 priv->flc_desc[0].desc[1] = authdata.keylen;
2036 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2038 (unsigned int *)priv->flc_desc[0].desc,
2039 &priv->flc_desc[0].desc[2], 2);
2042 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2045 if (priv->flc_desc[0].desc[2] & 1) {
2046 cipherdata.key_type = RTA_DATA_IMM;
2048 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2049 cipherdata.key_type = RTA_DATA_PTR;
2051 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2052 authdata.key_type = RTA_DATA_IMM;
2054 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2055 authdata.key_type = RTA_DATA_PTR;
2057 priv->flc_desc[0].desc[0] = 0;
2058 priv->flc_desc[0].desc[1] = 0;
2059 priv->flc_desc[0].desc[2] = 0;
2061 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2062 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2063 0, &cipherdata, &authdata,
2065 ctxt->auth_only_len,
2066 session->digest_length,
2069 DPAA2_SEC_ERR("Hash before cipher not supported");
2073 flc->word1_sdl = (uint8_t)bufsize;
2074 flc->word2_rflc_31_0 = lower_32_bits(
2075 (size_t)&(((struct dpaa2_sec_qp *)
2076 dev->data->queue_pairs[0])->rx_vq));
2077 flc->word3_rflc_63_32 = upper_32_bits(
2078 (size_t)&(((struct dpaa2_sec_qp *)
2079 dev->data->queue_pairs[0])->rx_vq));
2080 session->ctxt = priv;
2081 for (i = 0; i < bufsize; i++)
2082 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2083 i, priv->flc_desc[0].desc[i]);
2088 rte_free(session->cipher_key.data);
2089 rte_free(session->auth_key.data);
2095 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2096 struct rte_crypto_sym_xform *xform, void *sess)
2098 dpaa2_sec_session *session = sess;
2100 PMD_INIT_FUNC_TRACE();
2102 if (unlikely(sess == NULL)) {
2103 DPAA2_SEC_ERR("Invalid session struct");
2107 /* Default IV length = 0 */
2108 session->iv.length = 0;
2111 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2112 session->ctxt_type = DPAA2_SEC_CIPHER;
2113 dpaa2_sec_cipher_init(dev, xform, session);
2115 /* Authentication Only */
2116 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2117 xform->next == NULL) {
2118 session->ctxt_type = DPAA2_SEC_AUTH;
2119 dpaa2_sec_auth_init(dev, xform, session);
2121 /* Cipher then Authenticate */
2122 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2123 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2124 session->ext_params.aead_ctxt.auth_cipher_text = true;
2125 dpaa2_sec_aead_chain_init(dev, xform, session);
2127 /* Authenticate then Cipher */
2128 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2129 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2130 session->ext_params.aead_ctxt.auth_cipher_text = false;
2131 dpaa2_sec_aead_chain_init(dev, xform, session);
2133 /* AEAD operation for AES-GCM kind of Algorithms */
2134 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2135 xform->next == NULL) {
2136 dpaa2_sec_aead_init(dev, xform, session);
2139 DPAA2_SEC_ERR("Invalid crypto type");
2147 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2148 struct rte_security_session_conf *conf,
2151 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2152 struct rte_crypto_auth_xform *auth_xform;
2153 struct rte_crypto_cipher_xform *cipher_xform;
2154 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2155 struct ctxt_priv *priv;
2156 struct ipsec_encap_pdb encap_pdb;
2157 struct ipsec_decap_pdb decap_pdb;
2158 struct alginfo authdata, cipherdata;
2159 unsigned int bufsize;
2160 struct sec_flow_context *flc;
2162 PMD_INIT_FUNC_TRACE();
2164 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2165 cipher_xform = &conf->crypto_xform->cipher;
2166 auth_xform = &conf->crypto_xform->next->auth;
2168 auth_xform = &conf->crypto_xform->auth;
2169 cipher_xform = &conf->crypto_xform->next->cipher;
2171 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2172 sizeof(struct ctxt_priv) +
2173 sizeof(struct sec_flc_desc),
2174 RTE_CACHE_LINE_SIZE);
2177 DPAA2_SEC_ERR("No memory for priv CTXT");
2181 flc = &priv->flc_desc[0].flc;
2183 session->ctxt_type = DPAA2_SEC_IPSEC;
2184 session->cipher_key.data = rte_zmalloc(NULL,
2185 cipher_xform->key.length,
2186 RTE_CACHE_LINE_SIZE);
2187 if (session->cipher_key.data == NULL &&
2188 cipher_xform->key.length > 0) {
2189 DPAA2_SEC_ERR("No Memory for cipher key");
2194 session->cipher_key.length = cipher_xform->key.length;
2195 session->auth_key.data = rte_zmalloc(NULL,
2196 auth_xform->key.length,
2197 RTE_CACHE_LINE_SIZE);
2198 if (session->auth_key.data == NULL &&
2199 auth_xform->key.length > 0) {
2200 DPAA2_SEC_ERR("No Memory for auth key");
2201 rte_free(session->cipher_key.data);
2205 session->auth_key.length = auth_xform->key.length;
2206 memcpy(session->cipher_key.data, cipher_xform->key.data,
2207 cipher_xform->key.length);
2208 memcpy(session->auth_key.data, auth_xform->key.data,
2209 auth_xform->key.length);
2211 authdata.key = (size_t)session->auth_key.data;
2212 authdata.keylen = session->auth_key.length;
2213 authdata.key_enc_flags = 0;
2214 authdata.key_type = RTA_DATA_IMM;
2215 switch (auth_xform->algo) {
2216 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2217 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2218 authdata.algmode = OP_ALG_AAI_HMAC;
2219 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2221 case RTE_CRYPTO_AUTH_MD5_HMAC:
2222 authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2223 authdata.algmode = OP_ALG_AAI_HMAC;
2224 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2226 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2227 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2228 authdata.algmode = OP_ALG_AAI_HMAC;
2229 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2231 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2232 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2233 authdata.algmode = OP_ALG_AAI_HMAC;
2234 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2236 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2237 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2238 authdata.algmode = OP_ALG_AAI_HMAC;
2239 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2241 case RTE_CRYPTO_AUTH_AES_CMAC:
2242 authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2243 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2245 case RTE_CRYPTO_AUTH_NULL:
2246 authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2247 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2249 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2250 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2251 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2252 case RTE_CRYPTO_AUTH_SHA1:
2253 case RTE_CRYPTO_AUTH_SHA256:
2254 case RTE_CRYPTO_AUTH_SHA512:
2255 case RTE_CRYPTO_AUTH_SHA224:
2256 case RTE_CRYPTO_AUTH_SHA384:
2257 case RTE_CRYPTO_AUTH_MD5:
2258 case RTE_CRYPTO_AUTH_AES_GMAC:
2259 case RTE_CRYPTO_AUTH_KASUMI_F9:
2260 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2261 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2262 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2266 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2270 cipherdata.key = (size_t)session->cipher_key.data;
2271 cipherdata.keylen = session->cipher_key.length;
2272 cipherdata.key_enc_flags = 0;
2273 cipherdata.key_type = RTA_DATA_IMM;
2275 switch (cipher_xform->algo) {
2276 case RTE_CRYPTO_CIPHER_AES_CBC:
2277 cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2278 cipherdata.algmode = OP_ALG_AAI_CBC;
2279 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2281 case RTE_CRYPTO_CIPHER_3DES_CBC:
2282 cipherdata.algtype = OP_PCL_IPSEC_3DES;
2283 cipherdata.algmode = OP_ALG_AAI_CBC;
2284 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2286 case RTE_CRYPTO_CIPHER_AES_CTR:
2287 cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2288 cipherdata.algmode = OP_ALG_AAI_CTR;
2289 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2291 case RTE_CRYPTO_CIPHER_NULL:
2292 cipherdata.algtype = OP_PCL_IPSEC_NULL;
2294 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2295 case RTE_CRYPTO_CIPHER_3DES_ECB:
2296 case RTE_CRYPTO_CIPHER_AES_ECB:
2297 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2298 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2299 cipher_xform->algo);
2302 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2303 cipher_xform->algo);
2307 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2310 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2311 ip4_hdr.ip_v = IPVERSION;
2313 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2314 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2317 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2318 ip4_hdr.ip_p = 0x32;
2320 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2321 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2322 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2325 /* For Sec Proto only one descriptor is required. */
2326 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2327 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2328 PDBOPTS_ESP_OIHI_PDB_INL |
2330 PDBHMO_ESP_ENCAP_DTTL;
2331 encap_pdb.spi = ipsec_xform->spi;
2332 encap_pdb.ip_hdr_len = sizeof(struct ip);
2334 session->dir = DIR_ENC;
2335 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2337 (uint8_t *)&ip4_hdr,
2338 &cipherdata, &authdata);
2339 } else if (ipsec_xform->direction ==
2340 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2341 flc->dhr = SEC_FLC_DHR_INBOUND;
2342 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2343 decap_pdb.options = sizeof(struct ip) << 16;
2344 session->dir = DIR_DEC;
2345 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2346 1, 0, &decap_pdb, &cipherdata, &authdata);
2349 flc->word1_sdl = (uint8_t)bufsize;
2351 /* Enable the stashing control bit */
2352 DPAA2_SET_FLC_RSC(flc);
2353 flc->word2_rflc_31_0 = lower_32_bits(
2354 (size_t)&(((struct dpaa2_sec_qp *)
2355 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2356 flc->word3_rflc_63_32 = upper_32_bits(
2357 (size_t)&(((struct dpaa2_sec_qp *)
2358 dev->data->queue_pairs[0])->rx_vq));
2360 /* Set EWS bit i.e. enable write-safe */
2361 DPAA2_SET_FLC_EWS(flc);
2362 /* Set BS = 1 i.e reuse input buffers as output buffers */
2363 DPAA2_SET_FLC_REUSE_BS(flc);
2364 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2365 DPAA2_SET_FLC_REUSE_FF(flc);
2367 session->ctxt = priv;
2371 rte_free(session->auth_key.data);
2372 rte_free(session->cipher_key.data);
2378 dpaa2_sec_security_session_create(void *dev,
2379 struct rte_security_session_conf *conf,
2380 struct rte_security_session *sess,
2381 struct rte_mempool *mempool)
2383 void *sess_private_data;
2384 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2387 if (rte_mempool_get(mempool, &sess_private_data)) {
2388 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2392 switch (conf->protocol) {
2393 case RTE_SECURITY_PROTOCOL_IPSEC:
2394 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2397 case RTE_SECURITY_PROTOCOL_MACSEC:
2403 DPAA2_SEC_ERR("Failed to configure session parameters");
2404 /* Return session to mempool */
2405 rte_mempool_put(mempool, sess_private_data);
2409 set_sec_session_private_data(sess, sess_private_data);
2414 /** Clear the memory of session so it doesn't leave key material behind */
2416 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2417 struct rte_security_session *sess)
2419 PMD_INIT_FUNC_TRACE();
2420 void *sess_priv = get_sec_session_private_data(sess);
2422 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2425 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2428 rte_free(s->cipher_key.data);
2429 rte_free(s->auth_key.data);
2430 memset(sess, 0, sizeof(dpaa2_sec_session));
2431 set_sec_session_private_data(sess, NULL);
2432 rte_mempool_put(sess_mp, sess_priv);
2438 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
2439 struct rte_crypto_sym_xform *xform,
2440 struct rte_cryptodev_sym_session *sess,
2441 struct rte_mempool *mempool)
2443 void *sess_private_data;
2446 if (rte_mempool_get(mempool, &sess_private_data)) {
2447 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2451 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2453 DPAA2_SEC_ERR("Failed to configure session parameters");
2454 /* Return session to mempool */
2455 rte_mempool_put(mempool, sess_private_data);
2459 set_session_private_data(sess, dev->driver_id,
2465 /** Clear the memory of session so it doesn't leave key material behind */
2467 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
2468 struct rte_cryptodev_sym_session *sess)
2470 PMD_INIT_FUNC_TRACE();
2471 uint8_t index = dev->driver_id;
2472 void *sess_priv = get_session_private_data(sess, index);
2473 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2477 rte_free(s->cipher_key.data);
2478 rte_free(s->auth_key.data);
2479 memset(sess, 0, sizeof(dpaa2_sec_session));
2480 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2481 set_session_private_data(sess, index, NULL);
2482 rte_mempool_put(sess_mp, sess_priv);
2487 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2488 struct rte_cryptodev_config *config __rte_unused)
2490 PMD_INIT_FUNC_TRACE();
2496 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2498 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2499 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2500 struct dpseci_attr attr;
2501 struct dpaa2_queue *dpaa2_q;
2502 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2503 dev->data->queue_pairs;
2504 struct dpseci_rx_queue_attr rx_attr;
2505 struct dpseci_tx_queue_attr tx_attr;
2508 PMD_INIT_FUNC_TRACE();
2510 memset(&attr, 0, sizeof(struct dpseci_attr));
2512 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2514 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2516 goto get_attr_failure;
2518 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2520 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2521 goto get_attr_failure;
2523 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2524 dpaa2_q = &qp[i]->rx_vq;
2525 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2527 dpaa2_q->fqid = rx_attr.fqid;
2528 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2530 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2531 dpaa2_q = &qp[i]->tx_vq;
2532 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2534 dpaa2_q->fqid = tx_attr.fqid;
2535 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2540 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2545 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2547 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2548 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2551 PMD_INIT_FUNC_TRACE();
2553 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2555 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2560 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2562 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2568 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2570 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2571 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2574 PMD_INIT_FUNC_TRACE();
2576 /* Function is reverse of dpaa2_sec_dev_init.
2577 * It does the following:
2578 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2579 * 2. Close the DPSECI device
2580 * 3. Free the allocated resources.
2583 /*Close the device at underlying layer*/
2584 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2586 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2590 /*Free the allocated memory for ethernet private data and dpseci*/
2598 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2599 struct rte_cryptodev_info *info)
2601 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2603 PMD_INIT_FUNC_TRACE();
2605 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2606 info->feature_flags = dev->feature_flags;
2607 info->capabilities = dpaa2_sec_capabilities;
2608 info->sym.max_nb_sessions = internals->max_nb_sessions;
2609 info->driver_id = cryptodev_driver_id;
2614 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2615 struct rte_cryptodev_stats *stats)
2617 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2618 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2619 struct dpseci_sec_counters counters = {0};
2620 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2621 dev->data->queue_pairs;
2624 PMD_INIT_FUNC_TRACE();
2625 if (stats == NULL) {
2626 DPAA2_SEC_ERR("Invalid stats ptr NULL");
2629 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2630 if (qp[i] == NULL) {
2631 DPAA2_SEC_DEBUG("Uninitialised queue pair");
2635 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2636 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2637 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2638 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2641 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2644 DPAA2_SEC_ERR("SEC counters failed");
2646 DPAA2_SEC_INFO("dpseci hardware stats:"
2647 "\n\tNum of Requests Dequeued = %" PRIu64
2648 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2649 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2650 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2651 "\n\tNum of Outbound Bytes Protected = %" PRIu64
2652 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2653 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2654 counters.dequeued_requests,
2655 counters.ob_enc_requests,
2656 counters.ib_dec_requests,
2657 counters.ob_enc_bytes,
2658 counters.ob_prot_bytes,
2659 counters.ib_dec_bytes,
2660 counters.ib_valid_bytes);
2665 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2668 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2669 (dev->data->queue_pairs);
2671 PMD_INIT_FUNC_TRACE();
2673 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2674 if (qp[i] == NULL) {
2675 DPAA2_SEC_DEBUG("Uninitialised queue pair");
2678 qp[i]->tx_vq.rx_pkts = 0;
2679 qp[i]->tx_vq.tx_pkts = 0;
2680 qp[i]->tx_vq.err_pkts = 0;
2681 qp[i]->rx_vq.rx_pkts = 0;
2682 qp[i]->rx_vq.tx_pkts = 0;
2683 qp[i]->rx_vq.err_pkts = 0;
2687 static struct rte_cryptodev_ops crypto_ops = {
2688 .dev_configure = dpaa2_sec_dev_configure,
2689 .dev_start = dpaa2_sec_dev_start,
2690 .dev_stop = dpaa2_sec_dev_stop,
2691 .dev_close = dpaa2_sec_dev_close,
2692 .dev_infos_get = dpaa2_sec_dev_infos_get,
2693 .stats_get = dpaa2_sec_stats_get,
2694 .stats_reset = dpaa2_sec_stats_reset,
2695 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
2696 .queue_pair_release = dpaa2_sec_queue_pair_release,
2697 .queue_pair_start = dpaa2_sec_queue_pair_start,
2698 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
2699 .queue_pair_count = dpaa2_sec_queue_pair_count,
2700 .session_get_size = dpaa2_sec_session_get_size,
2701 .session_configure = dpaa2_sec_session_configure,
2702 .session_clear = dpaa2_sec_session_clear,
2705 static const struct rte_security_capability *
2706 dpaa2_sec_capabilities_get(void *device __rte_unused)
2708 return dpaa2_sec_security_cap;
2711 struct rte_security_ops dpaa2_sec_security_ops = {
2712 .session_create = dpaa2_sec_security_session_create,
2713 .session_update = NULL,
2714 .session_stats_get = NULL,
2715 .session_destroy = dpaa2_sec_security_session_destroy,
2716 .set_pkt_metadata = NULL,
2717 .capabilities_get = dpaa2_sec_capabilities_get
2721 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2723 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2725 rte_free(dev->security_ctx);
2727 rte_mempool_free(internals->fle_pool);
2729 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2730 dev->data->name, rte_socket_id());
2736 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2738 struct dpaa2_sec_dev_private *internals;
2739 struct rte_device *dev = cryptodev->device;
2740 struct rte_dpaa2_device *dpaa2_dev;
2741 struct rte_security_ctx *security_instance;
2742 struct fsl_mc_io *dpseci;
2744 struct dpseci_attr attr;
2748 PMD_INIT_FUNC_TRACE();
2749 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2750 if (dpaa2_dev == NULL) {
2751 DPAA2_SEC_ERR("DPAA2 SEC device not found");
2754 hw_id = dpaa2_dev->object_id;
2756 cryptodev->driver_id = cryptodev_driver_id;
2757 cryptodev->dev_ops = &crypto_ops;
2759 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2760 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2761 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2762 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2763 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2764 RTE_CRYPTODEV_FF_SECURITY |
2765 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2767 internals = cryptodev->data->dev_private;
2768 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
2771 * For secondary processes, we don't initialise any further as primary
2772 * has already done this work. Only check we don't need a different
2775 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2776 DPAA2_SEC_DEBUG("Device already init by primary process");
2780 /* Initialize security_ctx only for primary process*/
2781 security_instance = rte_malloc("rte_security_instances_ops",
2782 sizeof(struct rte_security_ctx), 0);
2783 if (security_instance == NULL)
2785 security_instance->device = (void *)cryptodev;
2786 security_instance->ops = &dpaa2_sec_security_ops;
2787 security_instance->sess_cnt = 0;
2788 cryptodev->security_ctx = security_instance;
2790 /*Open the rte device via MC and save the handle for further use*/
2791 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2792 sizeof(struct fsl_mc_io), 0);
2795 "Error in allocating the memory for dpsec object");
2798 dpseci->regs = rte_mcp_ptr_list[0];
2800 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2802 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2806 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2809 "Cannot get dpsec device attributed: Error = %x",
2813 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2815 internals->max_nb_queue_pairs = attr.num_tx_queues;
2816 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2817 internals->hw = dpseci;
2818 internals->token = token;
2820 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2821 internals->fle_pool = rte_mempool_create((const char *)str,
2824 FLE_POOL_CACHE_SIZE, 0,
2825 NULL, NULL, NULL, NULL,
2827 if (!internals->fle_pool) {
2828 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2832 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2836 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2838 /* dpaa2_sec_uninit(crypto_dev_name); */
2843 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2844 struct rte_dpaa2_device *dpaa2_dev)
2846 struct rte_cryptodev *cryptodev;
2847 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2851 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2853 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2854 if (cryptodev == NULL)
2857 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2858 cryptodev->data->dev_private = rte_zmalloc_socket(
2859 "cryptodev private structure",
2860 sizeof(struct dpaa2_sec_dev_private),
2861 RTE_CACHE_LINE_SIZE,
2864 if (cryptodev->data->dev_private == NULL)
2865 rte_panic("Cannot allocate memzone for private "
2869 dpaa2_dev->cryptodev = cryptodev;
2870 cryptodev->device = &dpaa2_dev->device;
2871 cryptodev->device->driver = &dpaa2_drv->driver;
2873 /* init user callbacks */
2874 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2876 /* Invoke PMD device initialization function */
2877 retval = dpaa2_sec_dev_init(cryptodev);
2881 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2882 rte_free(cryptodev->data->dev_private);
2884 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2890 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2892 struct rte_cryptodev *cryptodev;
2895 cryptodev = dpaa2_dev->cryptodev;
2896 if (cryptodev == NULL)
2899 ret = dpaa2_sec_uninit(cryptodev);
2903 return rte_cryptodev_pmd_destroy(cryptodev);
2906 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2907 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2908 .drv_type = DPAA2_CRYPTO,
2910 .name = "DPAA2 SEC PMD"
2912 .probe = cryptodev_dpaa2_sec_probe,
2913 .remove = cryptodev_dpaa2_sec_remove,
2916 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2918 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2919 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2920 cryptodev_driver_id);
2922 RTE_INIT(dpaa2_sec_init_log);
2924 dpaa2_sec_init_log(void)
2926 /* Bus level logs */
2927 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
2928 if (dpaa2_logtype_sec >= 0)
2929 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);