1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
34 typedef uint64_t dma_addr_t;
36 /* RTA header files */
37 #include <hw/desc/ipsec.h>
38 #include <hw/desc/algo.h>
40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
41 * a pointer to the shared descriptor
43 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
44 #define FSL_VENDOR_ID 0x1957
45 #define FSL_DEVICE_ID 0x410
46 #define FSL_SUBSYSTEM_SEC 1
47 #define FSL_MC_DPSECI_DEVID 3
50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
51 #define FLE_POOL_NUM_BUFS 32000
52 #define FLE_POOL_BUF_SIZE 256
53 #define FLE_POOL_CACHE_SIZE 512
54 #define FLE_SG_MEM_SIZE 2048
55 #define SEC_FLC_DHR_OUTBOUND -114
56 #define SEC_FLC_DHR_INBOUND 0
58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
60 static uint8_t cryptodev_driver_id;
62 int dpaa2_logtype_sec;
65 build_proto_fd(dpaa2_sec_session *sess,
66 struct rte_crypto_op *op,
67 struct qbman_fd *fd, uint16_t bpid)
69 struct rte_crypto_sym_op *sym_op = op->sym;
70 struct ctxt_priv *priv = sess->ctxt;
71 struct sec_flow_context *flc;
72 struct rte_mbuf *mbuf = sym_op->m_src;
74 if (likely(bpid < MAX_BPID))
75 DPAA2_SET_FD_BPID(fd, bpid);
79 /* Save the shared descriptor */
80 flc = &priv->flc_desc[0].flc;
82 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
83 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
84 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
85 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
87 /* save physical address of mbuf */
88 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
89 mbuf->buf_iova = (size_t)op;
95 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
96 struct rte_crypto_op *op,
97 struct qbman_fd *fd, __rte_unused uint16_t bpid)
99 struct rte_crypto_sym_op *sym_op = op->sym;
100 struct ctxt_priv *priv = sess->ctxt;
101 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
102 struct sec_flow_context *flc;
103 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
104 int icv_len = sess->digest_length;
106 struct rte_mbuf *mbuf;
107 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
110 PMD_INIT_FUNC_TRACE();
113 mbuf = sym_op->m_dst;
115 mbuf = sym_op->m_src;
117 /* first FLE entry used to store mbuf and session ctxt */
118 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
119 RTE_CACHE_LINE_SIZE);
120 if (unlikely(!fle)) {
121 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
124 memset(fle, 0, FLE_SG_MEM_SIZE);
125 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
126 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
132 /* Save the shared descriptor */
133 flc = &priv->flc_desc[0].flc;
135 /* Configure FD as a FRAME LIST */
136 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
137 DPAA2_SET_FD_COMPOUND_FMT(fd);
138 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
140 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
141 "iv-len=%d data_off: 0x%x\n",
142 sym_op->aead.data.offset,
143 sym_op->aead.data.length,
146 sym_op->m_src->data_off);
148 /* Configure Output FLE with Scatter/Gather Entry */
149 DPAA2_SET_FLE_SG_EXT(op_fle);
150 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
153 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
155 op_fle->length = (sess->dir == DIR_ENC) ?
156 (sym_op->aead.data.length + icv_len + auth_only_len) :
157 sym_op->aead.data.length + auth_only_len;
159 /* Configure Output SGE for Encap/Decap */
160 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
163 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
169 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
170 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
171 sge->length = mbuf->data_len;
174 sge->length -= icv_len;
176 if (sess->dir == DIR_ENC) {
178 DPAA2_SET_FLE_ADDR(sge,
179 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
180 sge->length = icv_len;
182 DPAA2_SET_FLE_FIN(sge);
185 mbuf = sym_op->m_src;
187 /* Configure Input FLE with Scatter/Gather Entry */
188 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
189 DPAA2_SET_FLE_SG_EXT(ip_fle);
190 DPAA2_SET_FLE_FIN(ip_fle);
191 ip_fle->length = (sess->dir == DIR_ENC) ?
192 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
193 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
196 /* Configure Input SGE for Encap/Decap */
197 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
198 sge->length = sess->iv.length;
202 DPAA2_SET_FLE_ADDR(sge,
203 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
204 sge->length = auth_only_len;
208 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
209 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
211 sge->length = mbuf->data_len - sym_op->aead.data.offset;
217 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
218 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
219 sge->length = mbuf->data_len;
223 if (sess->dir == DIR_DEC) {
225 old_icv = (uint8_t *)(sge + 1);
226 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
227 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
228 sge->length = icv_len;
231 DPAA2_SET_FLE_FIN(sge);
233 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
234 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
236 DPAA2_SET_FD_LEN(fd, ip_fle->length);
242 build_authenc_gcm_fd(dpaa2_sec_session *sess,
243 struct rte_crypto_op *op,
244 struct qbman_fd *fd, uint16_t bpid)
246 struct rte_crypto_sym_op *sym_op = op->sym;
247 struct ctxt_priv *priv = sess->ctxt;
248 struct qbman_fle *fle, *sge;
249 struct sec_flow_context *flc;
250 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
251 int icv_len = sess->digest_length, retval;
253 struct rte_mbuf *dst;
254 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
257 PMD_INIT_FUNC_TRACE();
264 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
265 * Currently we donot know which FLE has the mbuf stored.
266 * So while retreiving we can go back 1 FLE from the FD -ADDR
267 * to get the MBUF Addr from the previous FLE.
268 * We can have a better approach to use the inline Mbuf
270 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
272 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
275 memset(fle, 0, FLE_POOL_BUF_SIZE);
276 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
277 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
280 if (likely(bpid < MAX_BPID)) {
281 DPAA2_SET_FD_BPID(fd, bpid);
282 DPAA2_SET_FLE_BPID(fle, bpid);
283 DPAA2_SET_FLE_BPID(fle + 1, bpid);
284 DPAA2_SET_FLE_BPID(sge, bpid);
285 DPAA2_SET_FLE_BPID(sge + 1, bpid);
286 DPAA2_SET_FLE_BPID(sge + 2, bpid);
287 DPAA2_SET_FLE_BPID(sge + 3, bpid);
289 DPAA2_SET_FD_IVP(fd);
290 DPAA2_SET_FLE_IVP(fle);
291 DPAA2_SET_FLE_IVP((fle + 1));
292 DPAA2_SET_FLE_IVP(sge);
293 DPAA2_SET_FLE_IVP((sge + 1));
294 DPAA2_SET_FLE_IVP((sge + 2));
295 DPAA2_SET_FLE_IVP((sge + 3));
298 /* Save the shared descriptor */
299 flc = &priv->flc_desc[0].flc;
300 /* Configure FD as a FRAME LIST */
301 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
302 DPAA2_SET_FD_COMPOUND_FMT(fd);
303 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
305 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
306 "iv-len=%d data_off: 0x%x\n",
307 sym_op->aead.data.offset,
308 sym_op->aead.data.length,
311 sym_op->m_src->data_off);
313 /* Configure Output FLE with Scatter/Gather Entry */
314 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
316 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
317 fle->length = (sess->dir == DIR_ENC) ?
318 (sym_op->aead.data.length + icv_len + auth_only_len) :
319 sym_op->aead.data.length + auth_only_len;
321 DPAA2_SET_FLE_SG_EXT(fle);
323 /* Configure Output SGE for Encap/Decap */
324 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
325 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
326 dst->data_off - auth_only_len);
327 sge->length = sym_op->aead.data.length + auth_only_len;
329 if (sess->dir == DIR_ENC) {
331 DPAA2_SET_FLE_ADDR(sge,
332 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
333 sge->length = sess->digest_length;
334 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
335 sess->iv.length + auth_only_len));
337 DPAA2_SET_FLE_FIN(sge);
342 /* Configure Input FLE with Scatter/Gather Entry */
343 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
344 DPAA2_SET_FLE_SG_EXT(fle);
345 DPAA2_SET_FLE_FIN(fle);
346 fle->length = (sess->dir == DIR_ENC) ?
347 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
348 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
349 sess->digest_length);
351 /* Configure Input SGE for Encap/Decap */
352 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
353 sge->length = sess->iv.length;
356 DPAA2_SET_FLE_ADDR(sge,
357 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
358 sge->length = auth_only_len;
359 DPAA2_SET_FLE_BPID(sge, bpid);
363 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
364 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
365 sym_op->m_src->data_off);
366 sge->length = sym_op->aead.data.length;
367 if (sess->dir == DIR_DEC) {
369 old_icv = (uint8_t *)(sge + 1);
370 memcpy(old_icv, sym_op->aead.digest.data,
371 sess->digest_length);
372 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
373 sge->length = sess->digest_length;
374 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
375 sess->digest_length +
379 DPAA2_SET_FLE_FIN(sge);
382 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
383 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
390 build_authenc_sg_fd(dpaa2_sec_session *sess,
391 struct rte_crypto_op *op,
392 struct qbman_fd *fd, __rte_unused uint16_t bpid)
394 struct rte_crypto_sym_op *sym_op = op->sym;
395 struct ctxt_priv *priv = sess->ctxt;
396 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
397 struct sec_flow_context *flc;
398 uint32_t auth_only_len = sym_op->auth.data.length -
399 sym_op->cipher.data.length;
400 int icv_len = sess->digest_length;
402 struct rte_mbuf *mbuf;
403 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
406 PMD_INIT_FUNC_TRACE();
409 mbuf = sym_op->m_dst;
411 mbuf = sym_op->m_src;
413 /* first FLE entry used to store mbuf and session ctxt */
414 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
415 RTE_CACHE_LINE_SIZE);
416 if (unlikely(!fle)) {
417 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
420 memset(fle, 0, FLE_SG_MEM_SIZE);
421 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
422 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
428 /* Save the shared descriptor */
429 flc = &priv->flc_desc[0].flc;
431 /* Configure FD as a FRAME LIST */
432 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
433 DPAA2_SET_FD_COMPOUND_FMT(fd);
434 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
437 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
438 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
439 sym_op->auth.data.offset,
440 sym_op->auth.data.length,
442 sym_op->cipher.data.offset,
443 sym_op->cipher.data.length,
445 sym_op->m_src->data_off);
447 /* Configure Output FLE with Scatter/Gather Entry */
448 DPAA2_SET_FLE_SG_EXT(op_fle);
449 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
452 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
454 op_fle->length = (sess->dir == DIR_ENC) ?
455 (sym_op->cipher.data.length + icv_len) :
456 sym_op->cipher.data.length;
458 /* Configure Output SGE for Encap/Decap */
459 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
460 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
461 sge->length = mbuf->data_len - sym_op->auth.data.offset;
467 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
468 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
469 sge->length = mbuf->data_len;
472 sge->length -= icv_len;
474 if (sess->dir == DIR_ENC) {
476 DPAA2_SET_FLE_ADDR(sge,
477 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
478 sge->length = icv_len;
480 DPAA2_SET_FLE_FIN(sge);
483 mbuf = sym_op->m_src;
485 /* Configure Input FLE with Scatter/Gather Entry */
486 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
487 DPAA2_SET_FLE_SG_EXT(ip_fle);
488 DPAA2_SET_FLE_FIN(ip_fle);
489 ip_fle->length = (sess->dir == DIR_ENC) ?
490 (sym_op->auth.data.length + sess->iv.length) :
491 (sym_op->auth.data.length + sess->iv.length +
494 /* Configure Input SGE for Encap/Decap */
495 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
496 sge->length = sess->iv.length;
499 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
500 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
502 sge->length = mbuf->data_len - sym_op->auth.data.offset;
508 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
509 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
510 sge->length = mbuf->data_len;
513 sge->length -= icv_len;
515 if (sess->dir == DIR_DEC) {
517 old_icv = (uint8_t *)(sge + 1);
518 memcpy(old_icv, sym_op->auth.digest.data,
520 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
521 sge->length = icv_len;
524 DPAA2_SET_FLE_FIN(sge);
526 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
527 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
529 DPAA2_SET_FD_LEN(fd, ip_fle->length);
535 build_authenc_fd(dpaa2_sec_session *sess,
536 struct rte_crypto_op *op,
537 struct qbman_fd *fd, uint16_t bpid)
539 struct rte_crypto_sym_op *sym_op = op->sym;
540 struct ctxt_priv *priv = sess->ctxt;
541 struct qbman_fle *fle, *sge;
542 struct sec_flow_context *flc;
543 uint32_t auth_only_len = sym_op->auth.data.length -
544 sym_op->cipher.data.length;
545 int icv_len = sess->digest_length, retval;
547 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
549 struct rte_mbuf *dst;
551 PMD_INIT_FUNC_TRACE();
558 /* we are using the first FLE entry to store Mbuf.
559 * Currently we donot know which FLE has the mbuf stored.
560 * So while retreiving we can go back 1 FLE from the FD -ADDR
561 * to get the MBUF Addr from the previous FLE.
562 * We can have a better approach to use the inline Mbuf
564 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
566 DPAA2_SEC_ERR("Memory alloc failed for SGE");
569 memset(fle, 0, FLE_POOL_BUF_SIZE);
570 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
571 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
574 if (likely(bpid < MAX_BPID)) {
575 DPAA2_SET_FD_BPID(fd, bpid);
576 DPAA2_SET_FLE_BPID(fle, bpid);
577 DPAA2_SET_FLE_BPID(fle + 1, bpid);
578 DPAA2_SET_FLE_BPID(sge, bpid);
579 DPAA2_SET_FLE_BPID(sge + 1, bpid);
580 DPAA2_SET_FLE_BPID(sge + 2, bpid);
581 DPAA2_SET_FLE_BPID(sge + 3, bpid);
583 DPAA2_SET_FD_IVP(fd);
584 DPAA2_SET_FLE_IVP(fle);
585 DPAA2_SET_FLE_IVP((fle + 1));
586 DPAA2_SET_FLE_IVP(sge);
587 DPAA2_SET_FLE_IVP((sge + 1));
588 DPAA2_SET_FLE_IVP((sge + 2));
589 DPAA2_SET_FLE_IVP((sge + 3));
592 /* Save the shared descriptor */
593 flc = &priv->flc_desc[0].flc;
594 /* Configure FD as a FRAME LIST */
595 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
596 DPAA2_SET_FD_COMPOUND_FMT(fd);
597 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
600 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
601 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
602 sym_op->auth.data.offset,
603 sym_op->auth.data.length,
605 sym_op->cipher.data.offset,
606 sym_op->cipher.data.length,
608 sym_op->m_src->data_off);
610 /* Configure Output FLE with Scatter/Gather Entry */
611 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
613 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
614 fle->length = (sess->dir == DIR_ENC) ?
615 (sym_op->cipher.data.length + icv_len) :
616 sym_op->cipher.data.length;
618 DPAA2_SET_FLE_SG_EXT(fle);
620 /* Configure Output SGE for Encap/Decap */
621 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
622 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
624 sge->length = sym_op->cipher.data.length;
626 if (sess->dir == DIR_ENC) {
628 DPAA2_SET_FLE_ADDR(sge,
629 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
630 sge->length = sess->digest_length;
631 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
634 DPAA2_SET_FLE_FIN(sge);
639 /* Configure Input FLE with Scatter/Gather Entry */
640 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
641 DPAA2_SET_FLE_SG_EXT(fle);
642 DPAA2_SET_FLE_FIN(fle);
643 fle->length = (sess->dir == DIR_ENC) ?
644 (sym_op->auth.data.length + sess->iv.length) :
645 (sym_op->auth.data.length + sess->iv.length +
646 sess->digest_length);
648 /* Configure Input SGE for Encap/Decap */
649 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
650 sge->length = sess->iv.length;
653 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
654 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
655 sym_op->m_src->data_off);
656 sge->length = sym_op->auth.data.length;
657 if (sess->dir == DIR_DEC) {
659 old_icv = (uint8_t *)(sge + 1);
660 memcpy(old_icv, sym_op->auth.digest.data,
661 sess->digest_length);
662 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
663 sge->length = sess->digest_length;
664 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
665 sess->digest_length +
668 DPAA2_SET_FLE_FIN(sge);
670 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
671 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
676 static inline int build_auth_sg_fd(
677 dpaa2_sec_session *sess,
678 struct rte_crypto_op *op,
680 __rte_unused uint16_t bpid)
682 struct rte_crypto_sym_op *sym_op = op->sym;
683 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
684 struct sec_flow_context *flc;
685 struct ctxt_priv *priv = sess->ctxt;
687 struct rte_mbuf *mbuf;
689 PMD_INIT_FUNC_TRACE();
691 mbuf = sym_op->m_src;
692 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
693 RTE_CACHE_LINE_SIZE);
694 if (unlikely(!fle)) {
695 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
698 memset(fle, 0, FLE_SG_MEM_SIZE);
699 /* first FLE entry used to store mbuf and session ctxt */
700 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
701 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
706 flc = &priv->flc_desc[DESC_INITFINAL].flc;
708 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
709 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
710 DPAA2_SET_FD_COMPOUND_FMT(fd);
713 DPAA2_SET_FLE_ADDR(op_fle,
714 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
715 op_fle->length = sess->digest_length;
718 DPAA2_SET_FLE_SG_EXT(ip_fle);
719 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
721 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
723 sge->length = mbuf->data_len - sym_op->auth.data.offset;
729 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
730 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
731 sge->length = mbuf->data_len;
734 if (sess->dir == DIR_ENC) {
735 /* Digest calculation case */
736 sge->length -= sess->digest_length;
737 ip_fle->length = sym_op->auth.data.length;
739 /* Digest verification case */
741 old_digest = (uint8_t *)(sge + 1);
742 rte_memcpy(old_digest, sym_op->auth.digest.data,
743 sess->digest_length);
744 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
745 sge->length = sess->digest_length;
746 ip_fle->length = sym_op->auth.data.length +
749 DPAA2_SET_FLE_FIN(sge);
750 DPAA2_SET_FLE_FIN(ip_fle);
751 DPAA2_SET_FD_LEN(fd, ip_fle->length);
757 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
758 struct qbman_fd *fd, uint16_t bpid)
760 struct rte_crypto_sym_op *sym_op = op->sym;
761 struct qbman_fle *fle, *sge;
762 struct sec_flow_context *flc;
763 struct ctxt_priv *priv = sess->ctxt;
767 PMD_INIT_FUNC_TRACE();
769 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
771 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
774 memset(fle, 0, FLE_POOL_BUF_SIZE);
775 /* TODO we are using the first FLE entry to store Mbuf.
776 * Currently we donot know which FLE has the mbuf stored.
777 * So while retreiving we can go back 1 FLE from the FD -ADDR
778 * to get the MBUF Addr from the previous FLE.
779 * We can have a better approach to use the inline Mbuf
781 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
782 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
785 if (likely(bpid < MAX_BPID)) {
786 DPAA2_SET_FD_BPID(fd, bpid);
787 DPAA2_SET_FLE_BPID(fle, bpid);
788 DPAA2_SET_FLE_BPID(fle + 1, bpid);
790 DPAA2_SET_FD_IVP(fd);
791 DPAA2_SET_FLE_IVP(fle);
792 DPAA2_SET_FLE_IVP((fle + 1));
794 flc = &priv->flc_desc[DESC_INITFINAL].flc;
795 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
797 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
798 fle->length = sess->digest_length;
800 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
801 DPAA2_SET_FD_COMPOUND_FMT(fd);
804 if (sess->dir == DIR_ENC) {
805 DPAA2_SET_FLE_ADDR(fle,
806 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
807 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
808 sym_op->m_src->data_off);
809 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
810 fle->length = sym_op->auth.data.length;
813 DPAA2_SET_FLE_SG_EXT(fle);
814 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
816 if (likely(bpid < MAX_BPID)) {
817 DPAA2_SET_FLE_BPID(sge, bpid);
818 DPAA2_SET_FLE_BPID(sge + 1, bpid);
820 DPAA2_SET_FLE_IVP(sge);
821 DPAA2_SET_FLE_IVP((sge + 1));
823 DPAA2_SET_FLE_ADDR(sge,
824 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
825 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
826 sym_op->m_src->data_off);
828 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
829 sess->digest_length);
830 sge->length = sym_op->auth.data.length;
832 old_digest = (uint8_t *)(sge + 1);
833 rte_memcpy(old_digest, sym_op->auth.digest.data,
834 sess->digest_length);
835 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
836 sge->length = sess->digest_length;
837 fle->length = sym_op->auth.data.length +
839 DPAA2_SET_FLE_FIN(sge);
841 DPAA2_SET_FLE_FIN(fle);
847 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
848 struct qbman_fd *fd, __rte_unused uint16_t bpid)
850 struct rte_crypto_sym_op *sym_op = op->sym;
851 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
852 struct sec_flow_context *flc;
853 struct ctxt_priv *priv = sess->ctxt;
854 struct rte_mbuf *mbuf;
855 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
858 PMD_INIT_FUNC_TRACE();
861 mbuf = sym_op->m_dst;
863 mbuf = sym_op->m_src;
865 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
866 RTE_CACHE_LINE_SIZE);
868 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
871 memset(fle, 0, FLE_SG_MEM_SIZE);
872 /* first FLE entry used to store mbuf and session ctxt */
873 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
874 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
880 flc = &priv->flc_desc[0].flc;
883 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
885 sym_op->cipher.data.offset,
886 sym_op->cipher.data.length,
888 sym_op->m_src->data_off);
891 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
892 op_fle->length = sym_op->cipher.data.length;
893 DPAA2_SET_FLE_SG_EXT(op_fle);
896 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
897 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
898 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
904 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
905 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
906 sge->length = mbuf->data_len;
909 DPAA2_SET_FLE_FIN(sge);
912 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
913 flc, fle, fle->addr_hi, fle->addr_lo,
917 mbuf = sym_op->m_src;
919 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
920 ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
921 DPAA2_SET_FLE_SG_EXT(ip_fle);
924 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
925 DPAA2_SET_FLE_OFFSET(sge, 0);
926 sge->length = sess->iv.length;
931 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
932 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
934 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
940 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
941 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
942 sge->length = mbuf->data_len;
945 DPAA2_SET_FLE_FIN(sge);
946 DPAA2_SET_FLE_FIN(ip_fle);
949 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
950 DPAA2_SET_FD_LEN(fd, ip_fle->length);
951 DPAA2_SET_FD_COMPOUND_FMT(fd);
952 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
955 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
956 " off =%d, len =%d\n",
957 DPAA2_GET_FD_ADDR(fd),
958 DPAA2_GET_FD_BPID(fd),
959 rte_dpaa2_bpid_info[bpid].meta_data_size,
960 DPAA2_GET_FD_OFFSET(fd),
961 DPAA2_GET_FD_LEN(fd));
966 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
967 struct qbman_fd *fd, uint16_t bpid)
969 struct rte_crypto_sym_op *sym_op = op->sym;
970 struct qbman_fle *fle, *sge;
972 struct sec_flow_context *flc;
973 struct ctxt_priv *priv = sess->ctxt;
974 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
976 struct rte_mbuf *dst;
978 PMD_INIT_FUNC_TRACE();
985 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
987 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
990 memset(fle, 0, FLE_POOL_BUF_SIZE);
991 /* TODO we are using the first FLE entry to store Mbuf.
992 * Currently we donot know which FLE has the mbuf stored.
993 * So while retreiving we can go back 1 FLE from the FD -ADDR
994 * to get the MBUF Addr from the previous FLE.
995 * We can have a better approach to use the inline Mbuf
997 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
998 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1002 if (likely(bpid < MAX_BPID)) {
1003 DPAA2_SET_FD_BPID(fd, bpid);
1004 DPAA2_SET_FLE_BPID(fle, bpid);
1005 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1006 DPAA2_SET_FLE_BPID(sge, bpid);
1007 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1009 DPAA2_SET_FD_IVP(fd);
1010 DPAA2_SET_FLE_IVP(fle);
1011 DPAA2_SET_FLE_IVP((fle + 1));
1012 DPAA2_SET_FLE_IVP(sge);
1013 DPAA2_SET_FLE_IVP((sge + 1));
1016 flc = &priv->flc_desc[0].flc;
1017 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1018 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1020 DPAA2_SET_FD_COMPOUND_FMT(fd);
1021 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1024 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1025 " data_off: 0x%x\n",
1026 sym_op->cipher.data.offset,
1027 sym_op->cipher.data.length,
1029 sym_op->m_src->data_off);
1031 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1032 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1035 fle->length = sym_op->cipher.data.length + sess->iv.length;
1038 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1039 flc, fle, fle->addr_hi, fle->addr_lo,
1044 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1045 fle->length = sym_op->cipher.data.length + sess->iv.length;
1047 DPAA2_SET_FLE_SG_EXT(fle);
1049 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1050 sge->length = sess->iv.length;
1053 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1054 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1055 sym_op->m_src->data_off);
1057 sge->length = sym_op->cipher.data.length;
1058 DPAA2_SET_FLE_FIN(sge);
1059 DPAA2_SET_FLE_FIN(fle);
1062 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1063 " off =%d, len =%d\n",
1064 DPAA2_GET_FD_ADDR(fd),
1065 DPAA2_GET_FD_BPID(fd),
1066 rte_dpaa2_bpid_info[bpid].meta_data_size,
1067 DPAA2_GET_FD_OFFSET(fd),
1068 DPAA2_GET_FD_LEN(fd));
1074 build_sec_fd(struct rte_crypto_op *op,
1075 struct qbman_fd *fd, uint16_t bpid)
1078 dpaa2_sec_session *sess;
1080 PMD_INIT_FUNC_TRACE();
1082 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1083 sess = (dpaa2_sec_session *)get_session_private_data(
1084 op->sym->session, cryptodev_driver_id);
1085 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1086 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1087 op->sym->sec_session);
1091 /* Segmented buffer */
1092 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1093 switch (sess->ctxt_type) {
1094 case DPAA2_SEC_CIPHER:
1095 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1097 case DPAA2_SEC_AUTH:
1098 ret = build_auth_sg_fd(sess, op, fd, bpid);
1100 case DPAA2_SEC_AEAD:
1101 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1103 case DPAA2_SEC_CIPHER_HASH:
1104 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1106 case DPAA2_SEC_HASH_CIPHER:
1108 DPAA2_SEC_ERR("error: Unsupported session");
1111 switch (sess->ctxt_type) {
1112 case DPAA2_SEC_CIPHER:
1113 ret = build_cipher_fd(sess, op, fd, bpid);
1115 case DPAA2_SEC_AUTH:
1116 ret = build_auth_fd(sess, op, fd, bpid);
1118 case DPAA2_SEC_AEAD:
1119 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1121 case DPAA2_SEC_CIPHER_HASH:
1122 ret = build_authenc_fd(sess, op, fd, bpid);
1124 case DPAA2_SEC_IPSEC:
1125 ret = build_proto_fd(sess, op, fd, bpid);
1127 case DPAA2_SEC_HASH_CIPHER:
1129 DPAA2_SEC_ERR("error: Unsupported session");
1136 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1139 /* Function to transmit the frames to given device and VQ*/
1142 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1143 uint32_t frames_to_send;
1144 struct qbman_eq_desc eqdesc;
1145 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1146 struct qbman_swp *swp;
1147 uint16_t num_tx = 0;
1148 /*todo - need to support multiple buffer pools */
1150 struct rte_mempool *mb_pool;
1152 if (unlikely(nb_ops == 0))
1155 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1156 DPAA2_SEC_ERR("sessionless crypto op not supported");
1159 /*Prepare enqueue descriptor*/
1160 qbman_eq_desc_clear(&eqdesc);
1161 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1162 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1163 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1165 if (!DPAA2_PER_LCORE_DPIO) {
1166 ret = dpaa2_affine_qbman_swp();
1168 DPAA2_SEC_ERR("Failure in affining portal");
1172 swp = DPAA2_PER_LCORE_PORTAL;
1175 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1177 for (loop = 0; loop < frames_to_send; loop++) {
1178 /*Clear the unused FD fields before sending*/
1179 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1180 mb_pool = (*ops)->sym->m_src->pool;
1181 bpid = mempool_to_bpid(mb_pool);
1182 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1184 DPAA2_SEC_ERR("error: Improper packet contents"
1185 " for crypto operation");
1191 while (loop < frames_to_send) {
1192 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1195 frames_to_send - loop);
1198 num_tx += frames_to_send;
1199 nb_ops -= frames_to_send;
1202 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1203 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1207 static inline struct rte_crypto_op *
1208 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1210 struct rte_crypto_op *op;
1211 uint16_t len = DPAA2_GET_FD_LEN(fd);
1213 dpaa2_sec_session *sess_priv;
1215 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1216 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1217 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1219 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1220 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1221 op->sym->aead.digest.phys_addr = 0L;
1223 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1224 op->sym->sec_session);
1225 if (sess_priv->dir == DIR_ENC)
1226 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1228 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1229 diff = len - mbuf->pkt_len;
1230 mbuf->pkt_len += diff;
1231 mbuf->data_len += diff;
1236 static inline struct rte_crypto_op *
1237 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1239 struct qbman_fle *fle;
1240 struct rte_crypto_op *op;
1241 struct ctxt_priv *priv;
1242 struct rte_mbuf *dst, *src;
1244 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1245 return sec_simple_fd_to_mbuf(fd, driver_id);
1247 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1249 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1250 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1252 /* we are using the first FLE entry to store Mbuf.
1253 * Currently we donot know which FLE has the mbuf stored.
1254 * So while retreiving we can go back 1 FLE from the FD -ADDR
1255 * to get the MBUF Addr from the previous FLE.
1256 * We can have a better approach to use the inline Mbuf
1259 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1260 /* TODO complete it. */
1261 DPAA2_SEC_ERR("error: non inline buffer");
1264 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
1265 DPAA2_GET_FLE_ADDR((fle - 1)));
1268 src = op->sym->m_src;
1271 if (op->sym->m_dst) {
1272 dst = op->sym->m_dst;
1277 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1278 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1281 DPAA2_GET_FD_ADDR(fd),
1282 DPAA2_GET_FD_BPID(fd),
1283 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1284 DPAA2_GET_FD_OFFSET(fd),
1285 DPAA2_GET_FD_LEN(fd));
1287 /* free the fle memory */
1288 if (likely(rte_pktmbuf_is_contiguous(src))) {
1289 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1290 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1292 rte_free((void *)(fle-1));
1298 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1301 /* Function is responsible to receive frames for a given device and VQ*/
1302 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1303 struct rte_cryptodev *dev =
1304 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1305 struct qbman_result *dq_storage;
1306 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1307 int ret, num_rx = 0;
1308 uint8_t is_last = 0, status;
1309 struct qbman_swp *swp;
1310 const struct qbman_fd *fd;
1311 struct qbman_pull_desc pulldesc;
1313 if (!DPAA2_PER_LCORE_DPIO) {
1314 ret = dpaa2_affine_qbman_swp();
1316 DPAA2_SEC_ERR("Failure in affining portal");
1320 swp = DPAA2_PER_LCORE_PORTAL;
1321 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1323 qbman_pull_desc_clear(&pulldesc);
1324 qbman_pull_desc_set_numframes(&pulldesc,
1325 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1326 DPAA2_DQRR_RING_SIZE : nb_ops);
1327 qbman_pull_desc_set_fq(&pulldesc, fqid);
1328 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1329 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1332 /*Issue a volatile dequeue command. */
1334 if (qbman_swp_pull(swp, &pulldesc)) {
1336 "SEC VDQ command is not issued : QBMAN busy");
1337 /* Portal was busy, try again */
1343 /* Receive the packets till Last Dequeue entry is found with
1344 * respect to the above issues PULL command.
1347 /* Check if the previous issued command is completed.
1348 * Also seems like the SWP is shared between the Ethernet Driver
1349 * and the SEC driver.
1351 while (!qbman_check_command_complete(dq_storage))
1354 /* Loop until the dq_storage is updated with
1355 * new token by QBMAN
1357 while (!qbman_check_new_result(dq_storage))
1359 /* Check whether Last Pull command is Expired and
1360 * setting Condition for Loop termination
1362 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1364 /* Check for valid frame. */
1365 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1367 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1368 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1373 fd = qbman_result_DQ_fd(dq_storage);
1374 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1376 if (unlikely(fd->simple.frc)) {
1377 /* TODO Parse SEC errors */
1378 DPAA2_SEC_ERR("SEC returned Error - %x",
1380 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1382 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1387 } /* End of Packet Rx loop */
1389 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1391 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1392 /*Return the total number of packets received to DPAA2 app*/
1396 /** Release queue pair */
1398 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1400 struct dpaa2_sec_qp *qp =
1401 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1403 PMD_INIT_FUNC_TRACE();
1405 if (qp->rx_vq.q_storage) {
1406 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1407 rte_free(qp->rx_vq.q_storage);
1411 dev->data->queue_pairs[queue_pair_id] = NULL;
1416 /** Setup a queue pair */
1418 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1419 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1420 __rte_unused int socket_id,
1421 __rte_unused struct rte_mempool *session_pool)
1423 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1424 struct dpaa2_sec_qp *qp;
1425 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1426 struct dpseci_rx_queue_cfg cfg;
1429 PMD_INIT_FUNC_TRACE();
1431 /* If qp is already in use free ring memory and qp metadata. */
1432 if (dev->data->queue_pairs[qp_id] != NULL) {
1433 DPAA2_SEC_INFO("QP already setup");
1437 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1438 dev, qp_id, qp_conf);
1440 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1442 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1443 RTE_CACHE_LINE_SIZE);
1445 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1449 qp->rx_vq.dev = dev;
1450 qp->tx_vq.dev = dev;
1451 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1452 sizeof(struct queue_storage_info_t),
1453 RTE_CACHE_LINE_SIZE);
1454 if (!qp->rx_vq.q_storage) {
1455 DPAA2_SEC_ERR("malloc failed for q_storage");
1458 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1460 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1461 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1465 dev->data->queue_pairs[qp_id] = qp;
1467 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1468 cfg.user_ctx = (size_t)(&qp->rx_vq);
1469 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1474 /** Start queue pair */
1476 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1477 __rte_unused uint16_t queue_pair_id)
1479 PMD_INIT_FUNC_TRACE();
1484 /** Stop queue pair */
1486 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1487 __rte_unused uint16_t queue_pair_id)
1489 PMD_INIT_FUNC_TRACE();
1494 /** Return the number of allocated queue pairs */
1496 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1498 PMD_INIT_FUNC_TRACE();
1500 return dev->data->nb_queue_pairs;
1503 /** Returns the size of the aesni gcm session structure */
1505 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1507 PMD_INIT_FUNC_TRACE();
1509 return sizeof(dpaa2_sec_session);
1513 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1514 struct rte_crypto_sym_xform *xform,
1515 dpaa2_sec_session *session)
1517 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1518 struct alginfo cipherdata;
1520 struct ctxt_priv *priv;
1521 struct sec_flow_context *flc;
1523 PMD_INIT_FUNC_TRACE();
1525 /* For SEC CIPHER only one descriptor is required. */
1526 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1527 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1528 RTE_CACHE_LINE_SIZE);
1530 DPAA2_SEC_ERR("No Memory for priv CTXT");
1534 priv->fle_pool = dev_priv->fle_pool;
1536 flc = &priv->flc_desc[0].flc;
1538 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1539 RTE_CACHE_LINE_SIZE);
1540 if (session->cipher_key.data == NULL) {
1541 DPAA2_SEC_ERR("No Memory for cipher key");
1545 session->cipher_key.length = xform->cipher.key.length;
1547 memcpy(session->cipher_key.data, xform->cipher.key.data,
1548 xform->cipher.key.length);
1549 cipherdata.key = (size_t)session->cipher_key.data;
1550 cipherdata.keylen = session->cipher_key.length;
1551 cipherdata.key_enc_flags = 0;
1552 cipherdata.key_type = RTA_DATA_IMM;
1554 /* Set IV parameters */
1555 session->iv.offset = xform->cipher.iv.offset;
1556 session->iv.length = xform->cipher.iv.length;
1558 switch (xform->cipher.algo) {
1559 case RTE_CRYPTO_CIPHER_AES_CBC:
1560 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1561 cipherdata.algmode = OP_ALG_AAI_CBC;
1562 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1564 case RTE_CRYPTO_CIPHER_3DES_CBC:
1565 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1566 cipherdata.algmode = OP_ALG_AAI_CBC;
1567 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1569 case RTE_CRYPTO_CIPHER_AES_CTR:
1570 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1571 cipherdata.algmode = OP_ALG_AAI_CTR;
1572 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1574 case RTE_CRYPTO_CIPHER_3DES_CTR:
1575 case RTE_CRYPTO_CIPHER_AES_ECB:
1576 case RTE_CRYPTO_CIPHER_3DES_ECB:
1577 case RTE_CRYPTO_CIPHER_AES_XTS:
1578 case RTE_CRYPTO_CIPHER_AES_F8:
1579 case RTE_CRYPTO_CIPHER_ARC4:
1580 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1581 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1582 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1583 case RTE_CRYPTO_CIPHER_NULL:
1584 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1585 xform->cipher.algo);
1588 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1589 xform->cipher.algo);
1592 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1595 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1596 &cipherdata, NULL, session->iv.length,
1599 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1604 flc->mode_bits = 0x8000;
1606 flc->word1_sdl = (uint8_t)bufsize;
1607 flc->word2_rflc_31_0 = lower_32_bits(
1608 (size_t)&(((struct dpaa2_sec_qp *)
1609 dev->data->queue_pairs[0])->rx_vq));
1610 flc->word3_rflc_63_32 = upper_32_bits(
1611 (size_t)&(((struct dpaa2_sec_qp *)
1612 dev->data->queue_pairs[0])->rx_vq));
1613 session->ctxt = priv;
1615 for (i = 0; i < bufsize; i++)
1616 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1621 rte_free(session->cipher_key.data);
1627 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1628 struct rte_crypto_sym_xform *xform,
1629 dpaa2_sec_session *session)
1631 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1632 struct alginfo authdata;
1633 unsigned int bufsize, i;
1634 struct ctxt_priv *priv;
1635 struct sec_flow_context *flc;
1637 PMD_INIT_FUNC_TRACE();
1639 /* For SEC AUTH three descriptors are required for various stages */
1640 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1641 sizeof(struct ctxt_priv) + 3 *
1642 sizeof(struct sec_flc_desc),
1643 RTE_CACHE_LINE_SIZE);
1645 DPAA2_SEC_ERR("No Memory for priv CTXT");
1649 priv->fle_pool = dev_priv->fle_pool;
1650 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1652 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1653 RTE_CACHE_LINE_SIZE);
1654 if (session->auth_key.data == NULL) {
1655 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1659 session->auth_key.length = xform->auth.key.length;
1661 memcpy(session->auth_key.data, xform->auth.key.data,
1662 xform->auth.key.length);
1663 authdata.key = (size_t)session->auth_key.data;
1664 authdata.keylen = session->auth_key.length;
1665 authdata.key_enc_flags = 0;
1666 authdata.key_type = RTA_DATA_IMM;
1668 session->digest_length = xform->auth.digest_length;
1670 switch (xform->auth.algo) {
1671 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1672 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1673 authdata.algmode = OP_ALG_AAI_HMAC;
1674 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1676 case RTE_CRYPTO_AUTH_MD5_HMAC:
1677 authdata.algtype = OP_ALG_ALGSEL_MD5;
1678 authdata.algmode = OP_ALG_AAI_HMAC;
1679 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1681 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1682 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1683 authdata.algmode = OP_ALG_AAI_HMAC;
1684 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1686 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1687 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1688 authdata.algmode = OP_ALG_AAI_HMAC;
1689 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1691 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1692 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1693 authdata.algmode = OP_ALG_AAI_HMAC;
1694 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1696 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1697 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1698 authdata.algmode = OP_ALG_AAI_HMAC;
1699 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1701 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1702 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1703 case RTE_CRYPTO_AUTH_NULL:
1704 case RTE_CRYPTO_AUTH_SHA1:
1705 case RTE_CRYPTO_AUTH_SHA256:
1706 case RTE_CRYPTO_AUTH_SHA512:
1707 case RTE_CRYPTO_AUTH_SHA224:
1708 case RTE_CRYPTO_AUTH_SHA384:
1709 case RTE_CRYPTO_AUTH_MD5:
1710 case RTE_CRYPTO_AUTH_AES_GMAC:
1711 case RTE_CRYPTO_AUTH_KASUMI_F9:
1712 case RTE_CRYPTO_AUTH_AES_CMAC:
1713 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1714 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1715 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1719 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1723 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1726 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1727 1, 0, &authdata, !session->dir,
1728 session->digest_length);
1730 flc->word1_sdl = (uint8_t)bufsize;
1731 flc->word2_rflc_31_0 = lower_32_bits(
1732 (size_t)&(((struct dpaa2_sec_qp *)
1733 dev->data->queue_pairs[0])->rx_vq));
1734 flc->word3_rflc_63_32 = upper_32_bits(
1735 (size_t)&(((struct dpaa2_sec_qp *)
1736 dev->data->queue_pairs[0])->rx_vq));
1737 session->ctxt = priv;
1738 for (i = 0; i < bufsize; i++)
1739 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1740 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1746 rte_free(session->auth_key.data);
1752 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1753 struct rte_crypto_sym_xform *xform,
1754 dpaa2_sec_session *session)
1756 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1757 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1758 struct alginfo aeaddata;
1759 unsigned int bufsize, i;
1760 struct ctxt_priv *priv;
1761 struct sec_flow_context *flc;
1762 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1765 PMD_INIT_FUNC_TRACE();
1767 /* Set IV parameters */
1768 session->iv.offset = aead_xform->iv.offset;
1769 session->iv.length = aead_xform->iv.length;
1770 session->ctxt_type = DPAA2_SEC_AEAD;
1772 /* For SEC AEAD only one descriptor is required */
1773 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1774 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1775 RTE_CACHE_LINE_SIZE);
1777 DPAA2_SEC_ERR("No Memory for priv CTXT");
1781 priv->fle_pool = dev_priv->fle_pool;
1782 flc = &priv->flc_desc[0].flc;
1784 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1785 RTE_CACHE_LINE_SIZE);
1786 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1787 DPAA2_SEC_ERR("No Memory for aead key");
1791 memcpy(session->aead_key.data, aead_xform->key.data,
1792 aead_xform->key.length);
1794 session->digest_length = aead_xform->digest_length;
1795 session->aead_key.length = aead_xform->key.length;
1796 ctxt->auth_only_len = aead_xform->aad_length;
1798 aeaddata.key = (size_t)session->aead_key.data;
1799 aeaddata.keylen = session->aead_key.length;
1800 aeaddata.key_enc_flags = 0;
1801 aeaddata.key_type = RTA_DATA_IMM;
1803 switch (aead_xform->algo) {
1804 case RTE_CRYPTO_AEAD_AES_GCM:
1805 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1806 aeaddata.algmode = OP_ALG_AAI_GCM;
1807 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1809 case RTE_CRYPTO_AEAD_AES_CCM:
1810 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1814 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1818 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1821 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1822 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1824 (unsigned int *)priv->flc_desc[0].desc,
1825 &priv->flc_desc[0].desc[1], 1);
1828 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1831 if (priv->flc_desc[0].desc[1] & 1) {
1832 aeaddata.key_type = RTA_DATA_IMM;
1834 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1835 aeaddata.key_type = RTA_DATA_PTR;
1837 priv->flc_desc[0].desc[0] = 0;
1838 priv->flc_desc[0].desc[1] = 0;
1840 if (session->dir == DIR_ENC)
1841 bufsize = cnstr_shdsc_gcm_encap(
1842 priv->flc_desc[0].desc, 1, 0,
1843 &aeaddata, session->iv.length,
1844 session->digest_length);
1846 bufsize = cnstr_shdsc_gcm_decap(
1847 priv->flc_desc[0].desc, 1, 0,
1848 &aeaddata, session->iv.length,
1849 session->digest_length);
1850 flc->word1_sdl = (uint8_t)bufsize;
1851 flc->word2_rflc_31_0 = lower_32_bits(
1852 (size_t)&(((struct dpaa2_sec_qp *)
1853 dev->data->queue_pairs[0])->rx_vq));
1854 flc->word3_rflc_63_32 = upper_32_bits(
1855 (size_t)&(((struct dpaa2_sec_qp *)
1856 dev->data->queue_pairs[0])->rx_vq));
1857 session->ctxt = priv;
1858 for (i = 0; i < bufsize; i++)
1859 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1860 i, priv->flc_desc[0].desc[i]);
1865 rte_free(session->aead_key.data);
1872 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1873 struct rte_crypto_sym_xform *xform,
1874 dpaa2_sec_session *session)
1876 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1877 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1878 struct alginfo authdata, cipherdata;
1879 unsigned int bufsize, i;
1880 struct ctxt_priv *priv;
1881 struct sec_flow_context *flc;
1882 struct rte_crypto_cipher_xform *cipher_xform;
1883 struct rte_crypto_auth_xform *auth_xform;
1886 PMD_INIT_FUNC_TRACE();
1888 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1889 cipher_xform = &xform->cipher;
1890 auth_xform = &xform->next->auth;
1891 session->ctxt_type =
1892 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1893 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1895 cipher_xform = &xform->next->cipher;
1896 auth_xform = &xform->auth;
1897 session->ctxt_type =
1898 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1899 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1902 /* Set IV parameters */
1903 session->iv.offset = cipher_xform->iv.offset;
1904 session->iv.length = cipher_xform->iv.length;
1906 /* For SEC AEAD only one descriptor is required */
1907 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1908 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1909 RTE_CACHE_LINE_SIZE);
1911 DPAA2_SEC_ERR("No Memory for priv CTXT");
1915 priv->fle_pool = dev_priv->fle_pool;
1916 flc = &priv->flc_desc[0].flc;
1918 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1919 RTE_CACHE_LINE_SIZE);
1920 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1921 DPAA2_SEC_ERR("No Memory for cipher key");
1925 session->cipher_key.length = cipher_xform->key.length;
1926 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1927 RTE_CACHE_LINE_SIZE);
1928 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1929 DPAA2_SEC_ERR("No Memory for auth key");
1930 rte_free(session->cipher_key.data);
1934 session->auth_key.length = auth_xform->key.length;
1935 memcpy(session->cipher_key.data, cipher_xform->key.data,
1936 cipher_xform->key.length);
1937 memcpy(session->auth_key.data, auth_xform->key.data,
1938 auth_xform->key.length);
1940 authdata.key = (size_t)session->auth_key.data;
1941 authdata.keylen = session->auth_key.length;
1942 authdata.key_enc_flags = 0;
1943 authdata.key_type = RTA_DATA_IMM;
1945 session->digest_length = auth_xform->digest_length;
1947 switch (auth_xform->algo) {
1948 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1949 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1950 authdata.algmode = OP_ALG_AAI_HMAC;
1951 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1953 case RTE_CRYPTO_AUTH_MD5_HMAC:
1954 authdata.algtype = OP_ALG_ALGSEL_MD5;
1955 authdata.algmode = OP_ALG_AAI_HMAC;
1956 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1958 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1959 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1960 authdata.algmode = OP_ALG_AAI_HMAC;
1961 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1963 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1964 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1965 authdata.algmode = OP_ALG_AAI_HMAC;
1966 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1968 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1969 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1970 authdata.algmode = OP_ALG_AAI_HMAC;
1971 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1973 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1974 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1975 authdata.algmode = OP_ALG_AAI_HMAC;
1976 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1978 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1979 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1980 case RTE_CRYPTO_AUTH_NULL:
1981 case RTE_CRYPTO_AUTH_SHA1:
1982 case RTE_CRYPTO_AUTH_SHA256:
1983 case RTE_CRYPTO_AUTH_SHA512:
1984 case RTE_CRYPTO_AUTH_SHA224:
1985 case RTE_CRYPTO_AUTH_SHA384:
1986 case RTE_CRYPTO_AUTH_MD5:
1987 case RTE_CRYPTO_AUTH_AES_GMAC:
1988 case RTE_CRYPTO_AUTH_KASUMI_F9:
1989 case RTE_CRYPTO_AUTH_AES_CMAC:
1990 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1991 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1992 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
1996 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2000 cipherdata.key = (size_t)session->cipher_key.data;
2001 cipherdata.keylen = session->cipher_key.length;
2002 cipherdata.key_enc_flags = 0;
2003 cipherdata.key_type = RTA_DATA_IMM;
2005 switch (cipher_xform->algo) {
2006 case RTE_CRYPTO_CIPHER_AES_CBC:
2007 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2008 cipherdata.algmode = OP_ALG_AAI_CBC;
2009 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2011 case RTE_CRYPTO_CIPHER_3DES_CBC:
2012 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2013 cipherdata.algmode = OP_ALG_AAI_CBC;
2014 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2016 case RTE_CRYPTO_CIPHER_AES_CTR:
2017 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2018 cipherdata.algmode = OP_ALG_AAI_CTR;
2019 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2021 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2022 case RTE_CRYPTO_CIPHER_NULL:
2023 case RTE_CRYPTO_CIPHER_3DES_ECB:
2024 case RTE_CRYPTO_CIPHER_AES_ECB:
2025 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2026 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2027 cipher_xform->algo);
2030 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2031 cipher_xform->algo);
2034 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2037 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2038 priv->flc_desc[0].desc[1] = authdata.keylen;
2039 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2041 (unsigned int *)priv->flc_desc[0].desc,
2042 &priv->flc_desc[0].desc[2], 2);
2045 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2048 if (priv->flc_desc[0].desc[2] & 1) {
2049 cipherdata.key_type = RTA_DATA_IMM;
2051 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2052 cipherdata.key_type = RTA_DATA_PTR;
2054 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2055 authdata.key_type = RTA_DATA_IMM;
2057 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2058 authdata.key_type = RTA_DATA_PTR;
2060 priv->flc_desc[0].desc[0] = 0;
2061 priv->flc_desc[0].desc[1] = 0;
2062 priv->flc_desc[0].desc[2] = 0;
2064 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2065 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2066 0, &cipherdata, &authdata,
2068 ctxt->auth_only_len,
2069 session->digest_length,
2072 DPAA2_SEC_ERR("Hash before cipher not supported");
2076 flc->word1_sdl = (uint8_t)bufsize;
2077 flc->word2_rflc_31_0 = lower_32_bits(
2078 (size_t)&(((struct dpaa2_sec_qp *)
2079 dev->data->queue_pairs[0])->rx_vq));
2080 flc->word3_rflc_63_32 = upper_32_bits(
2081 (size_t)&(((struct dpaa2_sec_qp *)
2082 dev->data->queue_pairs[0])->rx_vq));
2083 session->ctxt = priv;
2084 for (i = 0; i < bufsize; i++)
2085 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2086 i, priv->flc_desc[0].desc[i]);
2091 rte_free(session->cipher_key.data);
2092 rte_free(session->auth_key.data);
2098 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2099 struct rte_crypto_sym_xform *xform, void *sess)
2101 dpaa2_sec_session *session = sess;
2103 PMD_INIT_FUNC_TRACE();
2105 if (unlikely(sess == NULL)) {
2106 DPAA2_SEC_ERR("Invalid session struct");
2110 /* Default IV length = 0 */
2111 session->iv.length = 0;
2114 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2115 session->ctxt_type = DPAA2_SEC_CIPHER;
2116 dpaa2_sec_cipher_init(dev, xform, session);
2118 /* Authentication Only */
2119 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2120 xform->next == NULL) {
2121 session->ctxt_type = DPAA2_SEC_AUTH;
2122 dpaa2_sec_auth_init(dev, xform, session);
2124 /* Cipher then Authenticate */
2125 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2126 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2127 session->ext_params.aead_ctxt.auth_cipher_text = true;
2128 dpaa2_sec_aead_chain_init(dev, xform, session);
2130 /* Authenticate then Cipher */
2131 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2132 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2133 session->ext_params.aead_ctxt.auth_cipher_text = false;
2134 dpaa2_sec_aead_chain_init(dev, xform, session);
2136 /* AEAD operation for AES-GCM kind of Algorithms */
2137 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2138 xform->next == NULL) {
2139 dpaa2_sec_aead_init(dev, xform, session);
2142 DPAA2_SEC_ERR("Invalid crypto type");
2150 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2151 struct rte_security_session_conf *conf,
2154 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2155 struct rte_crypto_auth_xform *auth_xform;
2156 struct rte_crypto_cipher_xform *cipher_xform;
2157 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2158 struct ctxt_priv *priv;
2159 struct ipsec_encap_pdb encap_pdb;
2160 struct ipsec_decap_pdb decap_pdb;
2161 struct alginfo authdata, cipherdata;
2162 unsigned int bufsize;
2163 struct sec_flow_context *flc;
2165 PMD_INIT_FUNC_TRACE();
2167 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2168 cipher_xform = &conf->crypto_xform->cipher;
2169 auth_xform = &conf->crypto_xform->next->auth;
2171 auth_xform = &conf->crypto_xform->auth;
2172 cipher_xform = &conf->crypto_xform->next->cipher;
2174 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2175 sizeof(struct ctxt_priv) +
2176 sizeof(struct sec_flc_desc),
2177 RTE_CACHE_LINE_SIZE);
2180 DPAA2_SEC_ERR("No memory for priv CTXT");
2184 flc = &priv->flc_desc[0].flc;
2186 session->ctxt_type = DPAA2_SEC_IPSEC;
2187 session->cipher_key.data = rte_zmalloc(NULL,
2188 cipher_xform->key.length,
2189 RTE_CACHE_LINE_SIZE);
2190 if (session->cipher_key.data == NULL &&
2191 cipher_xform->key.length > 0) {
2192 DPAA2_SEC_ERR("No Memory for cipher key");
2197 session->cipher_key.length = cipher_xform->key.length;
2198 session->auth_key.data = rte_zmalloc(NULL,
2199 auth_xform->key.length,
2200 RTE_CACHE_LINE_SIZE);
2201 if (session->auth_key.data == NULL &&
2202 auth_xform->key.length > 0) {
2203 DPAA2_SEC_ERR("No Memory for auth key");
2204 rte_free(session->cipher_key.data);
2208 session->auth_key.length = auth_xform->key.length;
2209 memcpy(session->cipher_key.data, cipher_xform->key.data,
2210 cipher_xform->key.length);
2211 memcpy(session->auth_key.data, auth_xform->key.data,
2212 auth_xform->key.length);
2214 authdata.key = (size_t)session->auth_key.data;
2215 authdata.keylen = session->auth_key.length;
2216 authdata.key_enc_flags = 0;
2217 authdata.key_type = RTA_DATA_IMM;
2218 switch (auth_xform->algo) {
2219 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2220 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2221 authdata.algmode = OP_ALG_AAI_HMAC;
2222 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2224 case RTE_CRYPTO_AUTH_MD5_HMAC:
2225 authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2226 authdata.algmode = OP_ALG_AAI_HMAC;
2227 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2229 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2230 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2231 authdata.algmode = OP_ALG_AAI_HMAC;
2232 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2234 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2235 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2236 authdata.algmode = OP_ALG_AAI_HMAC;
2237 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2239 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2240 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2241 authdata.algmode = OP_ALG_AAI_HMAC;
2242 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2244 case RTE_CRYPTO_AUTH_AES_CMAC:
2245 authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2246 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2248 case RTE_CRYPTO_AUTH_NULL:
2249 authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2250 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2252 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2253 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2254 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2255 case RTE_CRYPTO_AUTH_SHA1:
2256 case RTE_CRYPTO_AUTH_SHA256:
2257 case RTE_CRYPTO_AUTH_SHA512:
2258 case RTE_CRYPTO_AUTH_SHA224:
2259 case RTE_CRYPTO_AUTH_SHA384:
2260 case RTE_CRYPTO_AUTH_MD5:
2261 case RTE_CRYPTO_AUTH_AES_GMAC:
2262 case RTE_CRYPTO_AUTH_KASUMI_F9:
2263 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2264 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2265 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2269 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2273 cipherdata.key = (size_t)session->cipher_key.data;
2274 cipherdata.keylen = session->cipher_key.length;
2275 cipherdata.key_enc_flags = 0;
2276 cipherdata.key_type = RTA_DATA_IMM;
2278 switch (cipher_xform->algo) {
2279 case RTE_CRYPTO_CIPHER_AES_CBC:
2280 cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2281 cipherdata.algmode = OP_ALG_AAI_CBC;
2282 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2284 case RTE_CRYPTO_CIPHER_3DES_CBC:
2285 cipherdata.algtype = OP_PCL_IPSEC_3DES;
2286 cipherdata.algmode = OP_ALG_AAI_CBC;
2287 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2289 case RTE_CRYPTO_CIPHER_AES_CTR:
2290 cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2291 cipherdata.algmode = OP_ALG_AAI_CTR;
2292 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2294 case RTE_CRYPTO_CIPHER_NULL:
2295 cipherdata.algtype = OP_PCL_IPSEC_NULL;
2297 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2298 case RTE_CRYPTO_CIPHER_3DES_ECB:
2299 case RTE_CRYPTO_CIPHER_AES_ECB:
2300 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2301 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2302 cipher_xform->algo);
2305 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2306 cipher_xform->algo);
2310 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2313 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2314 ip4_hdr.ip_v = IPVERSION;
2316 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2317 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2320 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2321 ip4_hdr.ip_p = 0x32;
2323 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2324 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2325 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2328 /* For Sec Proto only one descriptor is required. */
2329 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2330 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2331 PDBOPTS_ESP_OIHI_PDB_INL |
2333 PDBHMO_ESP_ENCAP_DTTL;
2334 encap_pdb.spi = ipsec_xform->spi;
2335 encap_pdb.ip_hdr_len = sizeof(struct ip);
2337 session->dir = DIR_ENC;
2338 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2340 (uint8_t *)&ip4_hdr,
2341 &cipherdata, &authdata);
2342 } else if (ipsec_xform->direction ==
2343 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2344 flc->dhr = SEC_FLC_DHR_INBOUND;
2345 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2346 decap_pdb.options = sizeof(struct ip) << 16;
2347 session->dir = DIR_DEC;
2348 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2349 1, 0, &decap_pdb, &cipherdata, &authdata);
2352 flc->word1_sdl = (uint8_t)bufsize;
2354 /* Enable the stashing control bit */
2355 DPAA2_SET_FLC_RSC(flc);
2356 flc->word2_rflc_31_0 = lower_32_bits(
2357 (size_t)&(((struct dpaa2_sec_qp *)
2358 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2359 flc->word3_rflc_63_32 = upper_32_bits(
2360 (size_t)&(((struct dpaa2_sec_qp *)
2361 dev->data->queue_pairs[0])->rx_vq));
2363 /* Set EWS bit i.e. enable write-safe */
2364 DPAA2_SET_FLC_EWS(flc);
2365 /* Set BS = 1 i.e reuse input buffers as output buffers */
2366 DPAA2_SET_FLC_REUSE_BS(flc);
2367 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2368 DPAA2_SET_FLC_REUSE_FF(flc);
2370 session->ctxt = priv;
2374 rte_free(session->auth_key.data);
2375 rte_free(session->cipher_key.data);
2381 dpaa2_sec_security_session_create(void *dev,
2382 struct rte_security_session_conf *conf,
2383 struct rte_security_session *sess,
2384 struct rte_mempool *mempool)
2386 void *sess_private_data;
2387 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2390 if (rte_mempool_get(mempool, &sess_private_data)) {
2391 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2395 switch (conf->protocol) {
2396 case RTE_SECURITY_PROTOCOL_IPSEC:
2397 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2400 case RTE_SECURITY_PROTOCOL_MACSEC:
2406 DPAA2_SEC_ERR("Failed to configure session parameters");
2407 /* Return session to mempool */
2408 rte_mempool_put(mempool, sess_private_data);
2412 set_sec_session_private_data(sess, sess_private_data);
2417 /** Clear the memory of session so it doesn't leave key material behind */
2419 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2420 struct rte_security_session *sess)
2422 PMD_INIT_FUNC_TRACE();
2423 void *sess_priv = get_sec_session_private_data(sess);
2425 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2428 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2431 rte_free(s->cipher_key.data);
2432 rte_free(s->auth_key.data);
2433 memset(sess, 0, sizeof(dpaa2_sec_session));
2434 set_sec_session_private_data(sess, NULL);
2435 rte_mempool_put(sess_mp, sess_priv);
2441 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
2442 struct rte_crypto_sym_xform *xform,
2443 struct rte_cryptodev_sym_session *sess,
2444 struct rte_mempool *mempool)
2446 void *sess_private_data;
2449 if (rte_mempool_get(mempool, &sess_private_data)) {
2450 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2454 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2456 DPAA2_SEC_ERR("Failed to configure session parameters");
2457 /* Return session to mempool */
2458 rte_mempool_put(mempool, sess_private_data);
2462 set_session_private_data(sess, dev->driver_id,
2468 /** Clear the memory of session so it doesn't leave key material behind */
2470 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
2471 struct rte_cryptodev_sym_session *sess)
2473 PMD_INIT_FUNC_TRACE();
2474 uint8_t index = dev->driver_id;
2475 void *sess_priv = get_session_private_data(sess, index);
2476 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2480 rte_free(s->cipher_key.data);
2481 rte_free(s->auth_key.data);
2482 memset(sess, 0, sizeof(dpaa2_sec_session));
2483 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2484 set_session_private_data(sess, index, NULL);
2485 rte_mempool_put(sess_mp, sess_priv);
2490 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2491 struct rte_cryptodev_config *config __rte_unused)
2493 PMD_INIT_FUNC_TRACE();
2499 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2501 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2502 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2503 struct dpseci_attr attr;
2504 struct dpaa2_queue *dpaa2_q;
2505 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2506 dev->data->queue_pairs;
2507 struct dpseci_rx_queue_attr rx_attr;
2508 struct dpseci_tx_queue_attr tx_attr;
2511 PMD_INIT_FUNC_TRACE();
2513 memset(&attr, 0, sizeof(struct dpseci_attr));
2515 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2517 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2519 goto get_attr_failure;
2521 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2523 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2524 goto get_attr_failure;
2526 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2527 dpaa2_q = &qp[i]->rx_vq;
2528 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2530 dpaa2_q->fqid = rx_attr.fqid;
2531 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2533 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2534 dpaa2_q = &qp[i]->tx_vq;
2535 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2537 dpaa2_q->fqid = tx_attr.fqid;
2538 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2543 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2548 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2550 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2551 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2554 PMD_INIT_FUNC_TRACE();
2556 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2558 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2563 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2565 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2571 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2573 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2574 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2577 PMD_INIT_FUNC_TRACE();
2579 /* Function is reverse of dpaa2_sec_dev_init.
2580 * It does the following:
2581 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2582 * 2. Close the DPSECI device
2583 * 3. Free the allocated resources.
2586 /*Close the device at underlying layer*/
2587 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2589 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2593 /*Free the allocated memory for ethernet private data and dpseci*/
2601 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2602 struct rte_cryptodev_info *info)
2604 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2606 PMD_INIT_FUNC_TRACE();
2608 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2609 info->feature_flags = dev->feature_flags;
2610 info->capabilities = dpaa2_sec_capabilities;
2611 info->sym.max_nb_sessions = internals->max_nb_sessions;
2612 info->driver_id = cryptodev_driver_id;
2617 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2618 struct rte_cryptodev_stats *stats)
2620 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2621 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2622 struct dpseci_sec_counters counters = {0};
2623 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2624 dev->data->queue_pairs;
2627 PMD_INIT_FUNC_TRACE();
2628 if (stats == NULL) {
2629 DPAA2_SEC_ERR("Invalid stats ptr NULL");
2632 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2633 if (qp[i] == NULL) {
2634 DPAA2_SEC_DEBUG("Uninitialised queue pair");
2638 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2639 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2640 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2641 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2644 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2647 DPAA2_SEC_ERR("SEC counters failed");
2649 DPAA2_SEC_INFO("dpseci hardware stats:"
2650 "\n\tNum of Requests Dequeued = %" PRIu64
2651 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2652 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2653 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2654 "\n\tNum of Outbound Bytes Protected = %" PRIu64
2655 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2656 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2657 counters.dequeued_requests,
2658 counters.ob_enc_requests,
2659 counters.ib_dec_requests,
2660 counters.ob_enc_bytes,
2661 counters.ob_prot_bytes,
2662 counters.ib_dec_bytes,
2663 counters.ib_valid_bytes);
2668 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2671 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2672 (dev->data->queue_pairs);
2674 PMD_INIT_FUNC_TRACE();
2676 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2677 if (qp[i] == NULL) {
2678 DPAA2_SEC_DEBUG("Uninitialised queue pair");
2681 qp[i]->tx_vq.rx_pkts = 0;
2682 qp[i]->tx_vq.tx_pkts = 0;
2683 qp[i]->tx_vq.err_pkts = 0;
2684 qp[i]->rx_vq.rx_pkts = 0;
2685 qp[i]->rx_vq.tx_pkts = 0;
2686 qp[i]->rx_vq.err_pkts = 0;
2690 static struct rte_cryptodev_ops crypto_ops = {
2691 .dev_configure = dpaa2_sec_dev_configure,
2692 .dev_start = dpaa2_sec_dev_start,
2693 .dev_stop = dpaa2_sec_dev_stop,
2694 .dev_close = dpaa2_sec_dev_close,
2695 .dev_infos_get = dpaa2_sec_dev_infos_get,
2696 .stats_get = dpaa2_sec_stats_get,
2697 .stats_reset = dpaa2_sec_stats_reset,
2698 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
2699 .queue_pair_release = dpaa2_sec_queue_pair_release,
2700 .queue_pair_start = dpaa2_sec_queue_pair_start,
2701 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
2702 .queue_pair_count = dpaa2_sec_queue_pair_count,
2703 .session_get_size = dpaa2_sec_session_get_size,
2704 .session_configure = dpaa2_sec_session_configure,
2705 .session_clear = dpaa2_sec_session_clear,
2708 static const struct rte_security_capability *
2709 dpaa2_sec_capabilities_get(void *device __rte_unused)
2711 return dpaa2_sec_security_cap;
2714 struct rte_security_ops dpaa2_sec_security_ops = {
2715 .session_create = dpaa2_sec_security_session_create,
2716 .session_update = NULL,
2717 .session_stats_get = NULL,
2718 .session_destroy = dpaa2_sec_security_session_destroy,
2719 .set_pkt_metadata = NULL,
2720 .capabilities_get = dpaa2_sec_capabilities_get
2724 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2726 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2728 rte_free(dev->security_ctx);
2730 rte_mempool_free(internals->fle_pool);
2732 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2733 dev->data->name, rte_socket_id());
2739 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2741 struct dpaa2_sec_dev_private *internals;
2742 struct rte_device *dev = cryptodev->device;
2743 struct rte_dpaa2_device *dpaa2_dev;
2744 struct rte_security_ctx *security_instance;
2745 struct fsl_mc_io *dpseci;
2747 struct dpseci_attr attr;
2751 PMD_INIT_FUNC_TRACE();
2752 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2753 if (dpaa2_dev == NULL) {
2754 DPAA2_SEC_ERR("DPAA2 SEC device not found");
2757 hw_id = dpaa2_dev->object_id;
2759 cryptodev->driver_id = cryptodev_driver_id;
2760 cryptodev->dev_ops = &crypto_ops;
2762 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2763 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2764 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2765 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2766 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2767 RTE_CRYPTODEV_FF_SECURITY |
2768 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2770 internals = cryptodev->data->dev_private;
2771 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
2774 * For secondary processes, we don't initialise any further as primary
2775 * has already done this work. Only check we don't need a different
2778 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2779 DPAA2_SEC_DEBUG("Device already init by primary process");
2783 /* Initialize security_ctx only for primary process*/
2784 security_instance = rte_malloc("rte_security_instances_ops",
2785 sizeof(struct rte_security_ctx), 0);
2786 if (security_instance == NULL)
2788 security_instance->device = (void *)cryptodev;
2789 security_instance->ops = &dpaa2_sec_security_ops;
2790 security_instance->sess_cnt = 0;
2791 cryptodev->security_ctx = security_instance;
2793 /*Open the rte device via MC and save the handle for further use*/
2794 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2795 sizeof(struct fsl_mc_io), 0);
2798 "Error in allocating the memory for dpsec object");
2801 dpseci->regs = rte_mcp_ptr_list[0];
2803 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2805 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2809 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2812 "Cannot get dpsec device attributed: Error = %x",
2816 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2818 internals->max_nb_queue_pairs = attr.num_tx_queues;
2819 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2820 internals->hw = dpseci;
2821 internals->token = token;
2823 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2824 internals->fle_pool = rte_mempool_create((const char *)str,
2827 FLE_POOL_CACHE_SIZE, 0,
2828 NULL, NULL, NULL, NULL,
2830 if (!internals->fle_pool) {
2831 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2835 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2839 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2841 /* dpaa2_sec_uninit(crypto_dev_name); */
2846 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2847 struct rte_dpaa2_device *dpaa2_dev)
2849 struct rte_cryptodev *cryptodev;
2850 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2854 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2856 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2857 if (cryptodev == NULL)
2860 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2861 cryptodev->data->dev_private = rte_zmalloc_socket(
2862 "cryptodev private structure",
2863 sizeof(struct dpaa2_sec_dev_private),
2864 RTE_CACHE_LINE_SIZE,
2867 if (cryptodev->data->dev_private == NULL)
2868 rte_panic("Cannot allocate memzone for private "
2872 dpaa2_dev->cryptodev = cryptodev;
2873 cryptodev->device = &dpaa2_dev->device;
2874 cryptodev->device->driver = &dpaa2_drv->driver;
2876 /* init user callbacks */
2877 TAILQ_INIT(&(cryptodev->link_intr_cbs));
2879 /* Invoke PMD device initialization function */
2880 retval = dpaa2_sec_dev_init(cryptodev);
2884 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2885 rte_free(cryptodev->data->dev_private);
2887 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2893 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2895 struct rte_cryptodev *cryptodev;
2898 cryptodev = dpaa2_dev->cryptodev;
2899 if (cryptodev == NULL)
2902 ret = dpaa2_sec_uninit(cryptodev);
2906 return rte_cryptodev_pmd_destroy(cryptodev);
2909 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2910 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2911 .drv_type = DPAA2_CRYPTO,
2913 .name = "DPAA2 SEC PMD"
2915 .probe = cryptodev_dpaa2_sec_probe,
2916 .remove = cryptodev_dpaa2_sec_remove,
2919 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2921 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2922 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2923 cryptodev_driver_id);
2925 RTE_INIT(dpaa2_sec_init_log);
2927 dpaa2_sec_init_log(void)
2929 /* Bus level logs */
2930 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
2931 if (dpaa2_logtype_sec >= 0)
2932 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);