1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
12 #include <rte_cryptodev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
19 #include <rte_cryptodev_pmd.h>
20 #include <rte_common.h>
21 #include <rte_fslmc.h>
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_dpio.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <fsl_dpopr.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_event.h"
32 #include "dpaa2_sec_logs.h"
35 typedef uint64_t dma_addr_t;
37 /* RTA header files */
38 #include <hw/desc/ipsec.h>
39 #include <hw/desc/pdcp.h>
40 #include <hw/desc/algo.h>
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43 * a pointer to the shared descriptor
45 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID 0x1957
47 #define FSL_DEVICE_ID 0x410
48 #define FSL_SUBSYSTEM_SEC 1
49 #define FSL_MC_DPSECI_DEVID 3
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS 32000
54 #define FLE_POOL_BUF_SIZE 256
55 #define FLE_POOL_CACHE_SIZE 512
56 #define FLE_SG_MEM_SIZE 2048
57 #define SEC_FLC_DHR_OUTBOUND -114
58 #define SEC_FLC_DHR_INBOUND 0
60 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
62 static uint8_t cryptodev_driver_id;
64 int dpaa2_logtype_sec;
67 build_proto_compound_fd(dpaa2_sec_session *sess,
68 struct rte_crypto_op *op,
69 struct qbman_fd *fd, uint16_t bpid)
71 struct rte_crypto_sym_op *sym_op = op->sym;
72 struct ctxt_priv *priv = sess->ctxt;
73 struct qbman_fle *fle, *ip_fle, *op_fle;
74 struct sec_flow_context *flc;
75 struct rte_mbuf *src_mbuf = sym_op->m_src;
76 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
82 /* Save the shared descriptor */
83 flc = &priv->flc_desc[0].flc;
85 /* we are using the first FLE entry to store Mbuf */
86 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
88 DPAA2_SEC_ERR("Memory alloc failed");
91 memset(fle, 0, FLE_POOL_BUF_SIZE);
92 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
93 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
98 if (likely(bpid < MAX_BPID)) {
99 DPAA2_SET_FD_BPID(fd, bpid);
100 DPAA2_SET_FLE_BPID(op_fle, bpid);
101 DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 DPAA2_SET_FD_IVP(fd);
104 DPAA2_SET_FLE_IVP(op_fle);
105 DPAA2_SET_FLE_IVP(ip_fle);
108 /* Configure FD as a FRAME LIST */
109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
110 DPAA2_SET_FD_COMPOUND_FMT(fd);
111 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
113 /* Configure Output FLE with dst mbuf data */
114 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
115 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
116 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
118 /* Configure Input FLE with src mbuf data */
119 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
120 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
121 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
123 DPAA2_SET_FD_LEN(fd, ip_fle->length);
124 DPAA2_SET_FLE_FIN(ip_fle);
126 #ifdef ENABLE_HFN_OVERRIDE
127 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
128 /*enable HFN override override */
129 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
130 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
131 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
140 build_proto_fd(dpaa2_sec_session *sess,
141 struct rte_crypto_op *op,
142 struct qbman_fd *fd, uint16_t bpid)
144 struct rte_crypto_sym_op *sym_op = op->sym;
146 return build_proto_compound_fd(sess, op, fd, bpid);
148 struct ctxt_priv *priv = sess->ctxt;
149 struct sec_flow_context *flc;
150 struct rte_mbuf *mbuf = sym_op->m_src;
152 if (likely(bpid < MAX_BPID))
153 DPAA2_SET_FD_BPID(fd, bpid);
155 DPAA2_SET_FD_IVP(fd);
157 /* Save the shared descriptor */
158 flc = &priv->flc_desc[0].flc;
160 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
161 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
162 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
163 DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
165 /* save physical address of mbuf */
166 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
167 mbuf->buf_iova = (size_t)op;
173 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
174 struct rte_crypto_op *op,
175 struct qbman_fd *fd, __rte_unused uint16_t bpid)
177 struct rte_crypto_sym_op *sym_op = op->sym;
178 struct ctxt_priv *priv = sess->ctxt;
179 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
180 struct sec_flow_context *flc;
181 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
182 int icv_len = sess->digest_length;
184 struct rte_mbuf *mbuf;
185 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
188 PMD_INIT_FUNC_TRACE();
191 mbuf = sym_op->m_dst;
193 mbuf = sym_op->m_src;
195 /* first FLE entry used to store mbuf and session ctxt */
196 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
197 RTE_CACHE_LINE_SIZE);
198 if (unlikely(!fle)) {
199 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
202 memset(fle, 0, FLE_SG_MEM_SIZE);
203 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
204 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
210 /* Save the shared descriptor */
211 flc = &priv->flc_desc[0].flc;
213 /* Configure FD as a FRAME LIST */
214 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
215 DPAA2_SET_FD_COMPOUND_FMT(fd);
216 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
218 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
219 "iv-len=%d data_off: 0x%x\n",
220 sym_op->aead.data.offset,
221 sym_op->aead.data.length,
224 sym_op->m_src->data_off);
226 /* Configure Output FLE with Scatter/Gather Entry */
227 DPAA2_SET_FLE_SG_EXT(op_fle);
228 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
231 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
233 op_fle->length = (sess->dir == DIR_ENC) ?
234 (sym_op->aead.data.length + icv_len + auth_only_len) :
235 sym_op->aead.data.length + auth_only_len;
237 /* Configure Output SGE for Encap/Decap */
238 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
239 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
241 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
247 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
248 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
249 sge->length = mbuf->data_len;
252 sge->length -= icv_len;
254 if (sess->dir == DIR_ENC) {
256 DPAA2_SET_FLE_ADDR(sge,
257 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
258 sge->length = icv_len;
260 DPAA2_SET_FLE_FIN(sge);
263 mbuf = sym_op->m_src;
265 /* Configure Input FLE with Scatter/Gather Entry */
266 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
267 DPAA2_SET_FLE_SG_EXT(ip_fle);
268 DPAA2_SET_FLE_FIN(ip_fle);
269 ip_fle->length = (sess->dir == DIR_ENC) ?
270 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
271 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
274 /* Configure Input SGE for Encap/Decap */
275 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
276 sge->length = sess->iv.length;
280 DPAA2_SET_FLE_ADDR(sge,
281 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
282 sge->length = auth_only_len;
286 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
287 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
289 sge->length = mbuf->data_len - sym_op->aead.data.offset;
295 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
296 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
297 sge->length = mbuf->data_len;
301 if (sess->dir == DIR_DEC) {
303 old_icv = (uint8_t *)(sge + 1);
304 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
305 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
306 sge->length = icv_len;
309 DPAA2_SET_FLE_FIN(sge);
311 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
312 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
314 DPAA2_SET_FD_LEN(fd, ip_fle->length);
320 build_authenc_gcm_fd(dpaa2_sec_session *sess,
321 struct rte_crypto_op *op,
322 struct qbman_fd *fd, uint16_t bpid)
324 struct rte_crypto_sym_op *sym_op = op->sym;
325 struct ctxt_priv *priv = sess->ctxt;
326 struct qbman_fle *fle, *sge;
327 struct sec_flow_context *flc;
328 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
329 int icv_len = sess->digest_length, retval;
331 struct rte_mbuf *dst;
332 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
335 PMD_INIT_FUNC_TRACE();
342 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
343 * Currently we donot know which FLE has the mbuf stored.
344 * So while retreiving we can go back 1 FLE from the FD -ADDR
345 * to get the MBUF Addr from the previous FLE.
346 * We can have a better approach to use the inline Mbuf
348 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
350 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
353 memset(fle, 0, FLE_POOL_BUF_SIZE);
354 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
355 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
358 if (likely(bpid < MAX_BPID)) {
359 DPAA2_SET_FD_BPID(fd, bpid);
360 DPAA2_SET_FLE_BPID(fle, bpid);
361 DPAA2_SET_FLE_BPID(fle + 1, bpid);
362 DPAA2_SET_FLE_BPID(sge, bpid);
363 DPAA2_SET_FLE_BPID(sge + 1, bpid);
364 DPAA2_SET_FLE_BPID(sge + 2, bpid);
365 DPAA2_SET_FLE_BPID(sge + 3, bpid);
367 DPAA2_SET_FD_IVP(fd);
368 DPAA2_SET_FLE_IVP(fle);
369 DPAA2_SET_FLE_IVP((fle + 1));
370 DPAA2_SET_FLE_IVP(sge);
371 DPAA2_SET_FLE_IVP((sge + 1));
372 DPAA2_SET_FLE_IVP((sge + 2));
373 DPAA2_SET_FLE_IVP((sge + 3));
376 /* Save the shared descriptor */
377 flc = &priv->flc_desc[0].flc;
378 /* Configure FD as a FRAME LIST */
379 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
380 DPAA2_SET_FD_COMPOUND_FMT(fd);
381 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
383 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
384 "iv-len=%d data_off: 0x%x\n",
385 sym_op->aead.data.offset,
386 sym_op->aead.data.length,
389 sym_op->m_src->data_off);
391 /* Configure Output FLE with Scatter/Gather Entry */
392 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
394 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
395 fle->length = (sess->dir == DIR_ENC) ?
396 (sym_op->aead.data.length + icv_len + auth_only_len) :
397 sym_op->aead.data.length + auth_only_len;
399 DPAA2_SET_FLE_SG_EXT(fle);
401 /* Configure Output SGE for Encap/Decap */
402 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
403 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
404 dst->data_off - auth_only_len);
405 sge->length = sym_op->aead.data.length + auth_only_len;
407 if (sess->dir == DIR_ENC) {
409 DPAA2_SET_FLE_ADDR(sge,
410 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
411 sge->length = sess->digest_length;
412 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
413 sess->iv.length + auth_only_len));
415 DPAA2_SET_FLE_FIN(sge);
420 /* Configure Input FLE with Scatter/Gather Entry */
421 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
422 DPAA2_SET_FLE_SG_EXT(fle);
423 DPAA2_SET_FLE_FIN(fle);
424 fle->length = (sess->dir == DIR_ENC) ?
425 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
426 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
427 sess->digest_length);
429 /* Configure Input SGE for Encap/Decap */
430 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
431 sge->length = sess->iv.length;
434 DPAA2_SET_FLE_ADDR(sge,
435 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
436 sge->length = auth_only_len;
437 DPAA2_SET_FLE_BPID(sge, bpid);
441 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
442 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
443 sym_op->m_src->data_off);
444 sge->length = sym_op->aead.data.length;
445 if (sess->dir == DIR_DEC) {
447 old_icv = (uint8_t *)(sge + 1);
448 memcpy(old_icv, sym_op->aead.digest.data,
449 sess->digest_length);
450 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
451 sge->length = sess->digest_length;
452 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
453 sess->digest_length +
457 DPAA2_SET_FLE_FIN(sge);
460 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
461 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
468 build_authenc_sg_fd(dpaa2_sec_session *sess,
469 struct rte_crypto_op *op,
470 struct qbman_fd *fd, __rte_unused uint16_t bpid)
472 struct rte_crypto_sym_op *sym_op = op->sym;
473 struct ctxt_priv *priv = sess->ctxt;
474 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
475 struct sec_flow_context *flc;
476 uint32_t auth_only_len = sym_op->auth.data.length -
477 sym_op->cipher.data.length;
478 int icv_len = sess->digest_length;
480 struct rte_mbuf *mbuf;
481 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
484 PMD_INIT_FUNC_TRACE();
487 mbuf = sym_op->m_dst;
489 mbuf = sym_op->m_src;
491 /* first FLE entry used to store mbuf and session ctxt */
492 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
493 RTE_CACHE_LINE_SIZE);
494 if (unlikely(!fle)) {
495 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
498 memset(fle, 0, FLE_SG_MEM_SIZE);
499 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
500 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
506 /* Save the shared descriptor */
507 flc = &priv->flc_desc[0].flc;
509 /* Configure FD as a FRAME LIST */
510 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
511 DPAA2_SET_FD_COMPOUND_FMT(fd);
512 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
515 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
516 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
517 sym_op->auth.data.offset,
518 sym_op->auth.data.length,
520 sym_op->cipher.data.offset,
521 sym_op->cipher.data.length,
523 sym_op->m_src->data_off);
525 /* Configure Output FLE with Scatter/Gather Entry */
526 DPAA2_SET_FLE_SG_EXT(op_fle);
527 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
530 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
532 op_fle->length = (sess->dir == DIR_ENC) ?
533 (sym_op->cipher.data.length + icv_len) :
534 sym_op->cipher.data.length;
536 /* Configure Output SGE for Encap/Decap */
537 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
538 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
539 sge->length = mbuf->data_len - sym_op->auth.data.offset;
545 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
546 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
547 sge->length = mbuf->data_len;
550 sge->length -= icv_len;
552 if (sess->dir == DIR_ENC) {
554 DPAA2_SET_FLE_ADDR(sge,
555 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
556 sge->length = icv_len;
558 DPAA2_SET_FLE_FIN(sge);
561 mbuf = sym_op->m_src;
563 /* Configure Input FLE with Scatter/Gather Entry */
564 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
565 DPAA2_SET_FLE_SG_EXT(ip_fle);
566 DPAA2_SET_FLE_FIN(ip_fle);
567 ip_fle->length = (sess->dir == DIR_ENC) ?
568 (sym_op->auth.data.length + sess->iv.length) :
569 (sym_op->auth.data.length + sess->iv.length +
572 /* Configure Input SGE for Encap/Decap */
573 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
574 sge->length = sess->iv.length;
577 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
578 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
580 sge->length = mbuf->data_len - sym_op->auth.data.offset;
586 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
587 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
588 sge->length = mbuf->data_len;
591 sge->length -= icv_len;
593 if (sess->dir == DIR_DEC) {
595 old_icv = (uint8_t *)(sge + 1);
596 memcpy(old_icv, sym_op->auth.digest.data,
598 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
599 sge->length = icv_len;
602 DPAA2_SET_FLE_FIN(sge);
604 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
605 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
607 DPAA2_SET_FD_LEN(fd, ip_fle->length);
613 build_authenc_fd(dpaa2_sec_session *sess,
614 struct rte_crypto_op *op,
615 struct qbman_fd *fd, uint16_t bpid)
617 struct rte_crypto_sym_op *sym_op = op->sym;
618 struct ctxt_priv *priv = sess->ctxt;
619 struct qbman_fle *fle, *sge;
620 struct sec_flow_context *flc;
621 uint32_t auth_only_len = sym_op->auth.data.length -
622 sym_op->cipher.data.length;
623 int icv_len = sess->digest_length, retval;
625 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
627 struct rte_mbuf *dst;
629 PMD_INIT_FUNC_TRACE();
636 /* we are using the first FLE entry to store Mbuf.
637 * Currently we donot know which FLE has the mbuf stored.
638 * So while retreiving we can go back 1 FLE from the FD -ADDR
639 * to get the MBUF Addr from the previous FLE.
640 * We can have a better approach to use the inline Mbuf
642 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
644 DPAA2_SEC_ERR("Memory alloc failed for SGE");
647 memset(fle, 0, FLE_POOL_BUF_SIZE);
648 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
649 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
652 if (likely(bpid < MAX_BPID)) {
653 DPAA2_SET_FD_BPID(fd, bpid);
654 DPAA2_SET_FLE_BPID(fle, bpid);
655 DPAA2_SET_FLE_BPID(fle + 1, bpid);
656 DPAA2_SET_FLE_BPID(sge, bpid);
657 DPAA2_SET_FLE_BPID(sge + 1, bpid);
658 DPAA2_SET_FLE_BPID(sge + 2, bpid);
659 DPAA2_SET_FLE_BPID(sge + 3, bpid);
661 DPAA2_SET_FD_IVP(fd);
662 DPAA2_SET_FLE_IVP(fle);
663 DPAA2_SET_FLE_IVP((fle + 1));
664 DPAA2_SET_FLE_IVP(sge);
665 DPAA2_SET_FLE_IVP((sge + 1));
666 DPAA2_SET_FLE_IVP((sge + 2));
667 DPAA2_SET_FLE_IVP((sge + 3));
670 /* Save the shared descriptor */
671 flc = &priv->flc_desc[0].flc;
672 /* Configure FD as a FRAME LIST */
673 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
674 DPAA2_SET_FD_COMPOUND_FMT(fd);
675 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
678 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
679 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
680 sym_op->auth.data.offset,
681 sym_op->auth.data.length,
683 sym_op->cipher.data.offset,
684 sym_op->cipher.data.length,
686 sym_op->m_src->data_off);
688 /* Configure Output FLE with Scatter/Gather Entry */
689 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
691 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
692 fle->length = (sess->dir == DIR_ENC) ?
693 (sym_op->cipher.data.length + icv_len) :
694 sym_op->cipher.data.length;
696 DPAA2_SET_FLE_SG_EXT(fle);
698 /* Configure Output SGE for Encap/Decap */
699 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
700 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
702 sge->length = sym_op->cipher.data.length;
704 if (sess->dir == DIR_ENC) {
706 DPAA2_SET_FLE_ADDR(sge,
707 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
708 sge->length = sess->digest_length;
709 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
712 DPAA2_SET_FLE_FIN(sge);
717 /* Configure Input FLE with Scatter/Gather Entry */
718 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
719 DPAA2_SET_FLE_SG_EXT(fle);
720 DPAA2_SET_FLE_FIN(fle);
721 fle->length = (sess->dir == DIR_ENC) ?
722 (sym_op->auth.data.length + sess->iv.length) :
723 (sym_op->auth.data.length + sess->iv.length +
724 sess->digest_length);
726 /* Configure Input SGE for Encap/Decap */
727 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
728 sge->length = sess->iv.length;
731 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
732 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
733 sym_op->m_src->data_off);
734 sge->length = sym_op->auth.data.length;
735 if (sess->dir == DIR_DEC) {
737 old_icv = (uint8_t *)(sge + 1);
738 memcpy(old_icv, sym_op->auth.digest.data,
739 sess->digest_length);
740 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
741 sge->length = sess->digest_length;
742 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
743 sess->digest_length +
746 DPAA2_SET_FLE_FIN(sge);
748 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
749 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
754 static inline int build_auth_sg_fd(
755 dpaa2_sec_session *sess,
756 struct rte_crypto_op *op,
758 __rte_unused uint16_t bpid)
760 struct rte_crypto_sym_op *sym_op = op->sym;
761 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
762 struct sec_flow_context *flc;
763 struct ctxt_priv *priv = sess->ctxt;
765 struct rte_mbuf *mbuf;
767 PMD_INIT_FUNC_TRACE();
769 mbuf = sym_op->m_src;
770 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
771 RTE_CACHE_LINE_SIZE);
772 if (unlikely(!fle)) {
773 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
776 memset(fle, 0, FLE_SG_MEM_SIZE);
777 /* first FLE entry used to store mbuf and session ctxt */
778 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
779 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
784 flc = &priv->flc_desc[DESC_INITFINAL].flc;
786 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
787 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
788 DPAA2_SET_FD_COMPOUND_FMT(fd);
791 DPAA2_SET_FLE_ADDR(op_fle,
792 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
793 op_fle->length = sess->digest_length;
796 DPAA2_SET_FLE_SG_EXT(ip_fle);
797 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
799 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
800 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
801 sge->length = mbuf->data_len - sym_op->auth.data.offset;
807 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
808 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
809 sge->length = mbuf->data_len;
812 if (sess->dir == DIR_ENC) {
813 /* Digest calculation case */
814 sge->length -= sess->digest_length;
815 ip_fle->length = sym_op->auth.data.length;
817 /* Digest verification case */
819 old_digest = (uint8_t *)(sge + 1);
820 rte_memcpy(old_digest, sym_op->auth.digest.data,
821 sess->digest_length);
822 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
823 sge->length = sess->digest_length;
824 ip_fle->length = sym_op->auth.data.length +
827 DPAA2_SET_FLE_FIN(sge);
828 DPAA2_SET_FLE_FIN(ip_fle);
829 DPAA2_SET_FD_LEN(fd, ip_fle->length);
835 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
836 struct qbman_fd *fd, uint16_t bpid)
838 struct rte_crypto_sym_op *sym_op = op->sym;
839 struct qbman_fle *fle, *sge;
840 struct sec_flow_context *flc;
841 struct ctxt_priv *priv = sess->ctxt;
845 PMD_INIT_FUNC_TRACE();
847 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
849 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
852 memset(fle, 0, FLE_POOL_BUF_SIZE);
853 /* TODO we are using the first FLE entry to store Mbuf.
854 * Currently we donot know which FLE has the mbuf stored.
855 * So while retreiving we can go back 1 FLE from the FD -ADDR
856 * to get the MBUF Addr from the previous FLE.
857 * We can have a better approach to use the inline Mbuf
859 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
860 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
863 if (likely(bpid < MAX_BPID)) {
864 DPAA2_SET_FD_BPID(fd, bpid);
865 DPAA2_SET_FLE_BPID(fle, bpid);
866 DPAA2_SET_FLE_BPID(fle + 1, bpid);
868 DPAA2_SET_FD_IVP(fd);
869 DPAA2_SET_FLE_IVP(fle);
870 DPAA2_SET_FLE_IVP((fle + 1));
872 flc = &priv->flc_desc[DESC_INITFINAL].flc;
873 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
875 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
876 fle->length = sess->digest_length;
878 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
879 DPAA2_SET_FD_COMPOUND_FMT(fd);
882 if (sess->dir == DIR_ENC) {
883 DPAA2_SET_FLE_ADDR(fle,
884 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
885 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
886 sym_op->m_src->data_off);
887 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
888 fle->length = sym_op->auth.data.length;
891 DPAA2_SET_FLE_SG_EXT(fle);
892 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
894 if (likely(bpid < MAX_BPID)) {
895 DPAA2_SET_FLE_BPID(sge, bpid);
896 DPAA2_SET_FLE_BPID(sge + 1, bpid);
898 DPAA2_SET_FLE_IVP(sge);
899 DPAA2_SET_FLE_IVP((sge + 1));
901 DPAA2_SET_FLE_ADDR(sge,
902 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
903 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
904 sym_op->m_src->data_off);
906 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
907 sess->digest_length);
908 sge->length = sym_op->auth.data.length;
910 old_digest = (uint8_t *)(sge + 1);
911 rte_memcpy(old_digest, sym_op->auth.digest.data,
912 sess->digest_length);
913 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
914 sge->length = sess->digest_length;
915 fle->length = sym_op->auth.data.length +
917 DPAA2_SET_FLE_FIN(sge);
919 DPAA2_SET_FLE_FIN(fle);
925 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
926 struct qbman_fd *fd, __rte_unused uint16_t bpid)
928 struct rte_crypto_sym_op *sym_op = op->sym;
929 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
930 struct sec_flow_context *flc;
931 struct ctxt_priv *priv = sess->ctxt;
932 struct rte_mbuf *mbuf;
933 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
936 PMD_INIT_FUNC_TRACE();
939 mbuf = sym_op->m_dst;
941 mbuf = sym_op->m_src;
943 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
944 RTE_CACHE_LINE_SIZE);
946 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
949 memset(fle, 0, FLE_SG_MEM_SIZE);
950 /* first FLE entry used to store mbuf and session ctxt */
951 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
952 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
958 flc = &priv->flc_desc[0].flc;
961 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
963 sym_op->cipher.data.offset,
964 sym_op->cipher.data.length,
966 sym_op->m_src->data_off);
969 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
970 op_fle->length = sym_op->cipher.data.length;
971 DPAA2_SET_FLE_SG_EXT(op_fle);
974 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
975 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
976 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
982 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
983 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
984 sge->length = mbuf->data_len;
987 DPAA2_SET_FLE_FIN(sge);
990 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
991 flc, fle, fle->addr_hi, fle->addr_lo,
995 mbuf = sym_op->m_src;
997 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
998 ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
999 DPAA2_SET_FLE_SG_EXT(ip_fle);
1002 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1003 DPAA2_SET_FLE_OFFSET(sge, 0);
1004 sge->length = sess->iv.length;
1009 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1010 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1012 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1018 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1019 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1020 sge->length = mbuf->data_len;
1023 DPAA2_SET_FLE_FIN(sge);
1024 DPAA2_SET_FLE_FIN(ip_fle);
1027 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1028 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1029 DPAA2_SET_FD_COMPOUND_FMT(fd);
1030 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1033 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1034 " off =%d, len =%d\n",
1035 DPAA2_GET_FD_ADDR(fd),
1036 DPAA2_GET_FD_BPID(fd),
1037 rte_dpaa2_bpid_info[bpid].meta_data_size,
1038 DPAA2_GET_FD_OFFSET(fd),
1039 DPAA2_GET_FD_LEN(fd));
1044 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1045 struct qbman_fd *fd, uint16_t bpid)
1047 struct rte_crypto_sym_op *sym_op = op->sym;
1048 struct qbman_fle *fle, *sge;
1050 struct sec_flow_context *flc;
1051 struct ctxt_priv *priv = sess->ctxt;
1052 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1054 struct rte_mbuf *dst;
1056 PMD_INIT_FUNC_TRACE();
1059 dst = sym_op->m_dst;
1061 dst = sym_op->m_src;
1063 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1065 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1068 memset(fle, 0, FLE_POOL_BUF_SIZE);
1069 /* TODO we are using the first FLE entry to store Mbuf.
1070 * Currently we donot know which FLE has the mbuf stored.
1071 * So while retreiving we can go back 1 FLE from the FD -ADDR
1072 * to get the MBUF Addr from the previous FLE.
1073 * We can have a better approach to use the inline Mbuf
1075 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1076 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1080 if (likely(bpid < MAX_BPID)) {
1081 DPAA2_SET_FD_BPID(fd, bpid);
1082 DPAA2_SET_FLE_BPID(fle, bpid);
1083 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1084 DPAA2_SET_FLE_BPID(sge, bpid);
1085 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1087 DPAA2_SET_FD_IVP(fd);
1088 DPAA2_SET_FLE_IVP(fle);
1089 DPAA2_SET_FLE_IVP((fle + 1));
1090 DPAA2_SET_FLE_IVP(sge);
1091 DPAA2_SET_FLE_IVP((sge + 1));
1094 flc = &priv->flc_desc[0].flc;
1095 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1096 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1098 DPAA2_SET_FD_COMPOUND_FMT(fd);
1099 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1102 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1103 " data_off: 0x%x\n",
1104 sym_op->cipher.data.offset,
1105 sym_op->cipher.data.length,
1107 sym_op->m_src->data_off);
1109 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1110 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1113 fle->length = sym_op->cipher.data.length + sess->iv.length;
1116 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1117 flc, fle, fle->addr_hi, fle->addr_lo,
1122 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1123 fle->length = sym_op->cipher.data.length + sess->iv.length;
1125 DPAA2_SET_FLE_SG_EXT(fle);
1127 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1128 sge->length = sess->iv.length;
1131 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1132 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1133 sym_op->m_src->data_off);
1135 sge->length = sym_op->cipher.data.length;
1136 DPAA2_SET_FLE_FIN(sge);
1137 DPAA2_SET_FLE_FIN(fle);
1140 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1141 " off =%d, len =%d\n",
1142 DPAA2_GET_FD_ADDR(fd),
1143 DPAA2_GET_FD_BPID(fd),
1144 rte_dpaa2_bpid_info[bpid].meta_data_size,
1145 DPAA2_GET_FD_OFFSET(fd),
1146 DPAA2_GET_FD_LEN(fd));
1152 build_sec_fd(struct rte_crypto_op *op,
1153 struct qbman_fd *fd, uint16_t bpid)
1156 dpaa2_sec_session *sess;
1158 PMD_INIT_FUNC_TRACE();
1160 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1161 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1162 op->sym->session, cryptodev_driver_id);
1163 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1164 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1165 op->sym->sec_session);
1169 /* Segmented buffer */
1170 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1171 switch (sess->ctxt_type) {
1172 case DPAA2_SEC_CIPHER:
1173 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1175 case DPAA2_SEC_AUTH:
1176 ret = build_auth_sg_fd(sess, op, fd, bpid);
1178 case DPAA2_SEC_AEAD:
1179 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1181 case DPAA2_SEC_CIPHER_HASH:
1182 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1184 case DPAA2_SEC_HASH_CIPHER:
1186 DPAA2_SEC_ERR("error: Unsupported session");
1189 switch (sess->ctxt_type) {
1190 case DPAA2_SEC_CIPHER:
1191 ret = build_cipher_fd(sess, op, fd, bpid);
1193 case DPAA2_SEC_AUTH:
1194 ret = build_auth_fd(sess, op, fd, bpid);
1196 case DPAA2_SEC_AEAD:
1197 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1199 case DPAA2_SEC_CIPHER_HASH:
1200 ret = build_authenc_fd(sess, op, fd, bpid);
1202 case DPAA2_SEC_IPSEC:
1203 ret = build_proto_fd(sess, op, fd, bpid);
1205 case DPAA2_SEC_PDCP:
1206 ret = build_proto_compound_fd(sess, op, fd, bpid);
1208 case DPAA2_SEC_HASH_CIPHER:
1210 DPAA2_SEC_ERR("error: Unsupported session");
1217 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1220 /* Function to transmit the frames to given device and VQ*/
1223 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1224 uint32_t frames_to_send;
1225 struct qbman_eq_desc eqdesc;
1226 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1227 struct qbman_swp *swp;
1228 uint16_t num_tx = 0;
1229 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1230 /*todo - need to support multiple buffer pools */
1232 struct rte_mempool *mb_pool;
1234 if (unlikely(nb_ops == 0))
1237 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1238 DPAA2_SEC_ERR("sessionless crypto op not supported");
1241 /*Prepare enqueue descriptor*/
1242 qbman_eq_desc_clear(&eqdesc);
1243 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1244 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1245 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1247 if (!DPAA2_PER_LCORE_DPIO) {
1248 ret = dpaa2_affine_qbman_swp();
1250 DPAA2_SEC_ERR("Failure in affining portal");
1254 swp = DPAA2_PER_LCORE_PORTAL;
1257 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1258 dpaa2_eqcr_size : nb_ops;
1260 for (loop = 0; loop < frames_to_send; loop++) {
1261 if ((*ops)->sym->m_src->seqn) {
1262 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1264 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1265 DPAA2_PER_LCORE_DQRR_SIZE--;
1266 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1267 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1270 /*Clear the unused FD fields before sending*/
1271 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1272 mb_pool = (*ops)->sym->m_src->pool;
1273 bpid = mempool_to_bpid(mb_pool);
1274 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1276 DPAA2_SEC_ERR("error: Improper packet contents"
1277 " for crypto operation");
1283 while (loop < frames_to_send) {
1284 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1287 frames_to_send - loop);
1290 num_tx += frames_to_send;
1291 nb_ops -= frames_to_send;
1294 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1295 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1299 static inline struct rte_crypto_op *
1300 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1302 struct rte_crypto_op *op;
1303 uint16_t len = DPAA2_GET_FD_LEN(fd);
1305 dpaa2_sec_session *sess_priv;
1307 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1308 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1309 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1311 diff = len - mbuf->pkt_len;
1312 mbuf->pkt_len += diff;
1313 mbuf->data_len += diff;
1314 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1315 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1316 op->sym->aead.digest.phys_addr = 0L;
1318 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1319 op->sym->sec_session);
1320 if (sess_priv->dir == DIR_ENC)
1321 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1323 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1328 static inline struct rte_crypto_op *
1329 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1331 struct qbman_fle *fle;
1332 struct rte_crypto_op *op;
1333 struct ctxt_priv *priv;
1334 struct rte_mbuf *dst, *src;
1336 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1337 return sec_simple_fd_to_mbuf(fd, driver_id);
1339 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1341 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1342 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1344 /* we are using the first FLE entry to store Mbuf.
1345 * Currently we donot know which FLE has the mbuf stored.
1346 * So while retreiving we can go back 1 FLE from the FD -ADDR
1347 * to get the MBUF Addr from the previous FLE.
1348 * We can have a better approach to use the inline Mbuf
1351 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1352 /* TODO complete it. */
1353 DPAA2_SEC_ERR("error: non inline buffer");
1356 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1359 src = op->sym->m_src;
1362 if (op->sym->m_dst) {
1363 dst = op->sym->m_dst;
1368 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1369 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1370 get_sec_session_private_data(op->sym->sec_session);
1371 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1372 uint16_t len = DPAA2_GET_FD_LEN(fd);
1374 dst->data_len = len;
1378 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1379 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1382 DPAA2_GET_FD_ADDR(fd),
1383 DPAA2_GET_FD_BPID(fd),
1384 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1385 DPAA2_GET_FD_OFFSET(fd),
1386 DPAA2_GET_FD_LEN(fd));
1388 /* free the fle memory */
1389 if (likely(rte_pktmbuf_is_contiguous(src))) {
1390 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1391 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1393 rte_free((void *)(fle-1));
1399 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1402 /* Function is responsible to receive frames for a given device and VQ*/
1403 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1404 struct rte_cryptodev *dev =
1405 (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1406 struct qbman_result *dq_storage;
1407 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1408 int ret, num_rx = 0;
1409 uint8_t is_last = 0, status;
1410 struct qbman_swp *swp;
1411 const struct qbman_fd *fd;
1412 struct qbman_pull_desc pulldesc;
1414 if (!DPAA2_PER_LCORE_DPIO) {
1415 ret = dpaa2_affine_qbman_swp();
1417 DPAA2_SEC_ERR("Failure in affining portal");
1421 swp = DPAA2_PER_LCORE_PORTAL;
1422 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1424 qbman_pull_desc_clear(&pulldesc);
1425 qbman_pull_desc_set_numframes(&pulldesc,
1426 (nb_ops > dpaa2_dqrr_size) ?
1427 dpaa2_dqrr_size : nb_ops);
1428 qbman_pull_desc_set_fq(&pulldesc, fqid);
1429 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1430 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1433 /*Issue a volatile dequeue command. */
1435 if (qbman_swp_pull(swp, &pulldesc)) {
1437 "SEC VDQ command is not issued : QBMAN busy");
1438 /* Portal was busy, try again */
1444 /* Receive the packets till Last Dequeue entry is found with
1445 * respect to the above issues PULL command.
1448 /* Check if the previous issued command is completed.
1449 * Also seems like the SWP is shared between the Ethernet Driver
1450 * and the SEC driver.
1452 while (!qbman_check_command_complete(dq_storage))
1455 /* Loop until the dq_storage is updated with
1456 * new token by QBMAN
1458 while (!qbman_check_new_result(dq_storage))
1460 /* Check whether Last Pull command is Expired and
1461 * setting Condition for Loop termination
1463 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1465 /* Check for valid frame. */
1466 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1468 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1469 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1474 fd = qbman_result_DQ_fd(dq_storage);
1475 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1477 if (unlikely(fd->simple.frc)) {
1478 /* TODO Parse SEC errors */
1479 DPAA2_SEC_ERR("SEC returned Error - %x",
1481 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1483 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1488 } /* End of Packet Rx loop */
1490 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1492 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1493 /*Return the total number of packets received to DPAA2 app*/
1497 /** Release queue pair */
1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1501 struct dpaa2_sec_qp *qp =
1502 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1504 PMD_INIT_FUNC_TRACE();
1506 if (qp->rx_vq.q_storage) {
1507 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1508 rte_free(qp->rx_vq.q_storage);
1512 dev->data->queue_pairs[queue_pair_id] = NULL;
1517 /** Setup a queue pair */
1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1520 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1521 __rte_unused int socket_id,
1522 __rte_unused struct rte_mempool *session_pool)
1524 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1525 struct dpaa2_sec_qp *qp;
1526 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1527 struct dpseci_rx_queue_cfg cfg;
1530 PMD_INIT_FUNC_TRACE();
1532 /* If qp is already in use free ring memory and qp metadata. */
1533 if (dev->data->queue_pairs[qp_id] != NULL) {
1534 DPAA2_SEC_INFO("QP already setup");
1538 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1539 dev, qp_id, qp_conf);
1541 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1543 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1544 RTE_CACHE_LINE_SIZE);
1546 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1550 qp->rx_vq.dev = dev;
1551 qp->tx_vq.dev = dev;
1552 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1553 sizeof(struct queue_storage_info_t),
1554 RTE_CACHE_LINE_SIZE);
1555 if (!qp->rx_vq.q_storage) {
1556 DPAA2_SEC_ERR("malloc failed for q_storage");
1559 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1561 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1562 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1566 dev->data->queue_pairs[qp_id] = qp;
1568 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1569 cfg.user_ctx = (size_t)(&qp->rx_vq);
1570 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1575 /** Return the number of allocated queue pairs */
1577 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1579 PMD_INIT_FUNC_TRACE();
1581 return dev->data->nb_queue_pairs;
1584 /** Returns the size of the aesni gcm session structure */
1586 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1588 PMD_INIT_FUNC_TRACE();
1590 return sizeof(dpaa2_sec_session);
1594 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1595 struct rte_crypto_sym_xform *xform,
1596 dpaa2_sec_session *session)
1598 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1599 struct alginfo cipherdata;
1601 struct ctxt_priv *priv;
1602 struct sec_flow_context *flc;
1604 PMD_INIT_FUNC_TRACE();
1606 /* For SEC CIPHER only one descriptor is required. */
1607 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1608 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1609 RTE_CACHE_LINE_SIZE);
1611 DPAA2_SEC_ERR("No Memory for priv CTXT");
1615 priv->fle_pool = dev_priv->fle_pool;
1617 flc = &priv->flc_desc[0].flc;
1619 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1620 RTE_CACHE_LINE_SIZE);
1621 if (session->cipher_key.data == NULL) {
1622 DPAA2_SEC_ERR("No Memory for cipher key");
1626 session->cipher_key.length = xform->cipher.key.length;
1628 memcpy(session->cipher_key.data, xform->cipher.key.data,
1629 xform->cipher.key.length);
1630 cipherdata.key = (size_t)session->cipher_key.data;
1631 cipherdata.keylen = session->cipher_key.length;
1632 cipherdata.key_enc_flags = 0;
1633 cipherdata.key_type = RTA_DATA_IMM;
1635 /* Set IV parameters */
1636 session->iv.offset = xform->cipher.iv.offset;
1637 session->iv.length = xform->cipher.iv.length;
1639 switch (xform->cipher.algo) {
1640 case RTE_CRYPTO_CIPHER_AES_CBC:
1641 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1642 cipherdata.algmode = OP_ALG_AAI_CBC;
1643 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1645 case RTE_CRYPTO_CIPHER_3DES_CBC:
1646 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1647 cipherdata.algmode = OP_ALG_AAI_CBC;
1648 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1650 case RTE_CRYPTO_CIPHER_AES_CTR:
1651 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1652 cipherdata.algmode = OP_ALG_AAI_CTR;
1653 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1655 case RTE_CRYPTO_CIPHER_3DES_CTR:
1656 case RTE_CRYPTO_CIPHER_AES_ECB:
1657 case RTE_CRYPTO_CIPHER_3DES_ECB:
1658 case RTE_CRYPTO_CIPHER_AES_XTS:
1659 case RTE_CRYPTO_CIPHER_AES_F8:
1660 case RTE_CRYPTO_CIPHER_ARC4:
1661 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1662 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1663 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1664 case RTE_CRYPTO_CIPHER_NULL:
1665 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1666 xform->cipher.algo);
1669 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1670 xform->cipher.algo);
1673 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1676 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1677 &cipherdata, NULL, session->iv.length,
1680 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1685 flc->mode_bits = 0x8000;
1687 flc->word1_sdl = (uint8_t)bufsize;
1688 flc->word2_rflc_31_0 = lower_32_bits(
1689 (size_t)&(((struct dpaa2_sec_qp *)
1690 dev->data->queue_pairs[0])->rx_vq));
1691 flc->word3_rflc_63_32 = upper_32_bits(
1692 (size_t)&(((struct dpaa2_sec_qp *)
1693 dev->data->queue_pairs[0])->rx_vq));
1694 session->ctxt = priv;
1696 for (i = 0; i < bufsize; i++)
1697 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1702 rte_free(session->cipher_key.data);
1708 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1709 struct rte_crypto_sym_xform *xform,
1710 dpaa2_sec_session *session)
1712 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1713 struct alginfo authdata;
1715 struct ctxt_priv *priv;
1716 struct sec_flow_context *flc;
1718 PMD_INIT_FUNC_TRACE();
1720 /* For SEC AUTH three descriptors are required for various stages */
1721 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1722 sizeof(struct ctxt_priv) + 3 *
1723 sizeof(struct sec_flc_desc),
1724 RTE_CACHE_LINE_SIZE);
1726 DPAA2_SEC_ERR("No Memory for priv CTXT");
1730 priv->fle_pool = dev_priv->fle_pool;
1731 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1733 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1734 RTE_CACHE_LINE_SIZE);
1735 if (session->auth_key.data == NULL) {
1736 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1740 session->auth_key.length = xform->auth.key.length;
1742 memcpy(session->auth_key.data, xform->auth.key.data,
1743 xform->auth.key.length);
1744 authdata.key = (size_t)session->auth_key.data;
1745 authdata.keylen = session->auth_key.length;
1746 authdata.key_enc_flags = 0;
1747 authdata.key_type = RTA_DATA_IMM;
1749 session->digest_length = xform->auth.digest_length;
1751 switch (xform->auth.algo) {
1752 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1753 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1754 authdata.algmode = OP_ALG_AAI_HMAC;
1755 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1757 case RTE_CRYPTO_AUTH_MD5_HMAC:
1758 authdata.algtype = OP_ALG_ALGSEL_MD5;
1759 authdata.algmode = OP_ALG_AAI_HMAC;
1760 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1762 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1763 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1764 authdata.algmode = OP_ALG_AAI_HMAC;
1765 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1767 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1768 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1769 authdata.algmode = OP_ALG_AAI_HMAC;
1770 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1772 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1773 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1774 authdata.algmode = OP_ALG_AAI_HMAC;
1775 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1777 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1778 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1779 authdata.algmode = OP_ALG_AAI_HMAC;
1780 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1782 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1783 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1784 case RTE_CRYPTO_AUTH_NULL:
1785 case RTE_CRYPTO_AUTH_SHA1:
1786 case RTE_CRYPTO_AUTH_SHA256:
1787 case RTE_CRYPTO_AUTH_SHA512:
1788 case RTE_CRYPTO_AUTH_SHA224:
1789 case RTE_CRYPTO_AUTH_SHA384:
1790 case RTE_CRYPTO_AUTH_MD5:
1791 case RTE_CRYPTO_AUTH_AES_GMAC:
1792 case RTE_CRYPTO_AUTH_KASUMI_F9:
1793 case RTE_CRYPTO_AUTH_AES_CMAC:
1794 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1795 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1796 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1800 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1804 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1807 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1808 1, 0, &authdata, !session->dir,
1809 session->digest_length);
1811 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1815 flc->word1_sdl = (uint8_t)bufsize;
1816 flc->word2_rflc_31_0 = lower_32_bits(
1817 (size_t)&(((struct dpaa2_sec_qp *)
1818 dev->data->queue_pairs[0])->rx_vq));
1819 flc->word3_rflc_63_32 = upper_32_bits(
1820 (size_t)&(((struct dpaa2_sec_qp *)
1821 dev->data->queue_pairs[0])->rx_vq));
1822 session->ctxt = priv;
1823 for (i = 0; i < bufsize; i++)
1824 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1825 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1831 rte_free(session->auth_key.data);
1837 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1838 struct rte_crypto_sym_xform *xform,
1839 dpaa2_sec_session *session)
1841 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1842 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1843 struct alginfo aeaddata;
1845 struct ctxt_priv *priv;
1846 struct sec_flow_context *flc;
1847 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1850 PMD_INIT_FUNC_TRACE();
1852 /* Set IV parameters */
1853 session->iv.offset = aead_xform->iv.offset;
1854 session->iv.length = aead_xform->iv.length;
1855 session->ctxt_type = DPAA2_SEC_AEAD;
1857 /* For SEC AEAD only one descriptor is required */
1858 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1859 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1860 RTE_CACHE_LINE_SIZE);
1862 DPAA2_SEC_ERR("No Memory for priv CTXT");
1866 priv->fle_pool = dev_priv->fle_pool;
1867 flc = &priv->flc_desc[0].flc;
1869 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1870 RTE_CACHE_LINE_SIZE);
1871 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1872 DPAA2_SEC_ERR("No Memory for aead key");
1876 memcpy(session->aead_key.data, aead_xform->key.data,
1877 aead_xform->key.length);
1879 session->digest_length = aead_xform->digest_length;
1880 session->aead_key.length = aead_xform->key.length;
1881 ctxt->auth_only_len = aead_xform->aad_length;
1883 aeaddata.key = (size_t)session->aead_key.data;
1884 aeaddata.keylen = session->aead_key.length;
1885 aeaddata.key_enc_flags = 0;
1886 aeaddata.key_type = RTA_DATA_IMM;
1888 switch (aead_xform->algo) {
1889 case RTE_CRYPTO_AEAD_AES_GCM:
1890 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1891 aeaddata.algmode = OP_ALG_AAI_GCM;
1892 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1894 case RTE_CRYPTO_AEAD_AES_CCM:
1895 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1899 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1903 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1906 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1907 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1909 (unsigned int *)priv->flc_desc[0].desc,
1910 &priv->flc_desc[0].desc[1], 1);
1913 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1916 if (priv->flc_desc[0].desc[1] & 1) {
1917 aeaddata.key_type = RTA_DATA_IMM;
1919 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1920 aeaddata.key_type = RTA_DATA_PTR;
1922 priv->flc_desc[0].desc[0] = 0;
1923 priv->flc_desc[0].desc[1] = 0;
1925 if (session->dir == DIR_ENC)
1926 bufsize = cnstr_shdsc_gcm_encap(
1927 priv->flc_desc[0].desc, 1, 0,
1928 &aeaddata, session->iv.length,
1929 session->digest_length);
1931 bufsize = cnstr_shdsc_gcm_decap(
1932 priv->flc_desc[0].desc, 1, 0,
1933 &aeaddata, session->iv.length,
1934 session->digest_length);
1936 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1940 flc->word1_sdl = (uint8_t)bufsize;
1941 flc->word2_rflc_31_0 = lower_32_bits(
1942 (size_t)&(((struct dpaa2_sec_qp *)
1943 dev->data->queue_pairs[0])->rx_vq));
1944 flc->word3_rflc_63_32 = upper_32_bits(
1945 (size_t)&(((struct dpaa2_sec_qp *)
1946 dev->data->queue_pairs[0])->rx_vq));
1947 session->ctxt = priv;
1948 for (i = 0; i < bufsize; i++)
1949 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1950 i, priv->flc_desc[0].desc[i]);
1955 rte_free(session->aead_key.data);
1962 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1963 struct rte_crypto_sym_xform *xform,
1964 dpaa2_sec_session *session)
1966 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1967 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1968 struct alginfo authdata, cipherdata;
1970 struct ctxt_priv *priv;
1971 struct sec_flow_context *flc;
1972 struct rte_crypto_cipher_xform *cipher_xform;
1973 struct rte_crypto_auth_xform *auth_xform;
1976 PMD_INIT_FUNC_TRACE();
1978 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1979 cipher_xform = &xform->cipher;
1980 auth_xform = &xform->next->auth;
1981 session->ctxt_type =
1982 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1983 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1985 cipher_xform = &xform->next->cipher;
1986 auth_xform = &xform->auth;
1987 session->ctxt_type =
1988 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1989 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1992 /* Set IV parameters */
1993 session->iv.offset = cipher_xform->iv.offset;
1994 session->iv.length = cipher_xform->iv.length;
1996 /* For SEC AEAD only one descriptor is required */
1997 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1998 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1999 RTE_CACHE_LINE_SIZE);
2001 DPAA2_SEC_ERR("No Memory for priv CTXT");
2005 priv->fle_pool = dev_priv->fle_pool;
2006 flc = &priv->flc_desc[0].flc;
2008 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2009 RTE_CACHE_LINE_SIZE);
2010 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2011 DPAA2_SEC_ERR("No Memory for cipher key");
2015 session->cipher_key.length = cipher_xform->key.length;
2016 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2017 RTE_CACHE_LINE_SIZE);
2018 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2019 DPAA2_SEC_ERR("No Memory for auth key");
2020 rte_free(session->cipher_key.data);
2024 session->auth_key.length = auth_xform->key.length;
2025 memcpy(session->cipher_key.data, cipher_xform->key.data,
2026 cipher_xform->key.length);
2027 memcpy(session->auth_key.data, auth_xform->key.data,
2028 auth_xform->key.length);
2030 authdata.key = (size_t)session->auth_key.data;
2031 authdata.keylen = session->auth_key.length;
2032 authdata.key_enc_flags = 0;
2033 authdata.key_type = RTA_DATA_IMM;
2035 session->digest_length = auth_xform->digest_length;
2037 switch (auth_xform->algo) {
2038 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2039 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2040 authdata.algmode = OP_ALG_AAI_HMAC;
2041 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2043 case RTE_CRYPTO_AUTH_MD5_HMAC:
2044 authdata.algtype = OP_ALG_ALGSEL_MD5;
2045 authdata.algmode = OP_ALG_AAI_HMAC;
2046 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2048 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2049 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2050 authdata.algmode = OP_ALG_AAI_HMAC;
2051 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2053 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2054 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2055 authdata.algmode = OP_ALG_AAI_HMAC;
2056 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2058 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2059 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2060 authdata.algmode = OP_ALG_AAI_HMAC;
2061 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2063 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2064 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2065 authdata.algmode = OP_ALG_AAI_HMAC;
2066 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2068 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2069 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2070 case RTE_CRYPTO_AUTH_NULL:
2071 case RTE_CRYPTO_AUTH_SHA1:
2072 case RTE_CRYPTO_AUTH_SHA256:
2073 case RTE_CRYPTO_AUTH_SHA512:
2074 case RTE_CRYPTO_AUTH_SHA224:
2075 case RTE_CRYPTO_AUTH_SHA384:
2076 case RTE_CRYPTO_AUTH_MD5:
2077 case RTE_CRYPTO_AUTH_AES_GMAC:
2078 case RTE_CRYPTO_AUTH_KASUMI_F9:
2079 case RTE_CRYPTO_AUTH_AES_CMAC:
2080 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2081 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2082 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2086 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2090 cipherdata.key = (size_t)session->cipher_key.data;
2091 cipherdata.keylen = session->cipher_key.length;
2092 cipherdata.key_enc_flags = 0;
2093 cipherdata.key_type = RTA_DATA_IMM;
2095 switch (cipher_xform->algo) {
2096 case RTE_CRYPTO_CIPHER_AES_CBC:
2097 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2098 cipherdata.algmode = OP_ALG_AAI_CBC;
2099 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2101 case RTE_CRYPTO_CIPHER_3DES_CBC:
2102 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2103 cipherdata.algmode = OP_ALG_AAI_CBC;
2104 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2106 case RTE_CRYPTO_CIPHER_AES_CTR:
2107 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2108 cipherdata.algmode = OP_ALG_AAI_CTR;
2109 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2111 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2112 case RTE_CRYPTO_CIPHER_NULL:
2113 case RTE_CRYPTO_CIPHER_3DES_ECB:
2114 case RTE_CRYPTO_CIPHER_AES_ECB:
2115 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2116 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2117 cipher_xform->algo);
2120 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2121 cipher_xform->algo);
2124 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2127 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2128 priv->flc_desc[0].desc[1] = authdata.keylen;
2129 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2131 (unsigned int *)priv->flc_desc[0].desc,
2132 &priv->flc_desc[0].desc[2], 2);
2135 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2138 if (priv->flc_desc[0].desc[2] & 1) {
2139 cipherdata.key_type = RTA_DATA_IMM;
2141 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2142 cipherdata.key_type = RTA_DATA_PTR;
2144 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2145 authdata.key_type = RTA_DATA_IMM;
2147 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2148 authdata.key_type = RTA_DATA_PTR;
2150 priv->flc_desc[0].desc[0] = 0;
2151 priv->flc_desc[0].desc[1] = 0;
2152 priv->flc_desc[0].desc[2] = 0;
2154 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2155 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2156 0, &cipherdata, &authdata,
2158 ctxt->auth_only_len,
2159 session->digest_length,
2162 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2166 DPAA2_SEC_ERR("Hash before cipher not supported");
2170 flc->word1_sdl = (uint8_t)bufsize;
2171 flc->word2_rflc_31_0 = lower_32_bits(
2172 (size_t)&(((struct dpaa2_sec_qp *)
2173 dev->data->queue_pairs[0])->rx_vq));
2174 flc->word3_rflc_63_32 = upper_32_bits(
2175 (size_t)&(((struct dpaa2_sec_qp *)
2176 dev->data->queue_pairs[0])->rx_vq));
2177 session->ctxt = priv;
2178 for (i = 0; i < bufsize; i++)
2179 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2180 i, priv->flc_desc[0].desc[i]);
2185 rte_free(session->cipher_key.data);
2186 rte_free(session->auth_key.data);
2192 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2193 struct rte_crypto_sym_xform *xform, void *sess)
2195 dpaa2_sec_session *session = sess;
2197 PMD_INIT_FUNC_TRACE();
2199 if (unlikely(sess == NULL)) {
2200 DPAA2_SEC_ERR("Invalid session struct");
2204 memset(session, 0, sizeof(dpaa2_sec_session));
2205 /* Default IV length = 0 */
2206 session->iv.length = 0;
2209 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2210 session->ctxt_type = DPAA2_SEC_CIPHER;
2211 dpaa2_sec_cipher_init(dev, xform, session);
2213 /* Authentication Only */
2214 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2215 xform->next == NULL) {
2216 session->ctxt_type = DPAA2_SEC_AUTH;
2217 dpaa2_sec_auth_init(dev, xform, session);
2219 /* Cipher then Authenticate */
2220 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2221 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2222 session->ext_params.aead_ctxt.auth_cipher_text = true;
2223 dpaa2_sec_aead_chain_init(dev, xform, session);
2225 /* Authenticate then Cipher */
2226 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2227 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2228 session->ext_params.aead_ctxt.auth_cipher_text = false;
2229 dpaa2_sec_aead_chain_init(dev, xform, session);
2231 /* AEAD operation for AES-GCM kind of Algorithms */
2232 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2233 xform->next == NULL) {
2234 dpaa2_sec_aead_init(dev, xform, session);
2237 DPAA2_SEC_ERR("Invalid crypto type");
2245 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2246 dpaa2_sec_session *session,
2247 struct alginfo *aeaddata)
2249 PMD_INIT_FUNC_TRACE();
2251 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2252 RTE_CACHE_LINE_SIZE);
2253 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2254 DPAA2_SEC_ERR("No Memory for aead key");
2257 memcpy(session->aead_key.data, aead_xform->key.data,
2258 aead_xform->key.length);
2260 session->digest_length = aead_xform->digest_length;
2261 session->aead_key.length = aead_xform->key.length;
2263 aeaddata->key = (size_t)session->aead_key.data;
2264 aeaddata->keylen = session->aead_key.length;
2265 aeaddata->key_enc_flags = 0;
2266 aeaddata->key_type = RTA_DATA_IMM;
2268 switch (aead_xform->algo) {
2269 case RTE_CRYPTO_AEAD_AES_GCM:
2270 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2271 aeaddata->algmode = OP_ALG_AAI_GCM;
2272 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2274 case RTE_CRYPTO_AEAD_AES_CCM:
2275 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2276 aeaddata->algmode = OP_ALG_AAI_CCM;
2277 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2280 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2284 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2291 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2292 struct rte_crypto_auth_xform *auth_xform,
2293 dpaa2_sec_session *session,
2294 struct alginfo *cipherdata,
2295 struct alginfo *authdata)
2298 session->cipher_key.data = rte_zmalloc(NULL,
2299 cipher_xform->key.length,
2300 RTE_CACHE_LINE_SIZE);
2301 if (session->cipher_key.data == NULL &&
2302 cipher_xform->key.length > 0) {
2303 DPAA2_SEC_ERR("No Memory for cipher key");
2307 session->cipher_key.length = cipher_xform->key.length;
2308 memcpy(session->cipher_key.data, cipher_xform->key.data,
2309 cipher_xform->key.length);
2310 session->cipher_alg = cipher_xform->algo;
2312 session->cipher_key.data = NULL;
2313 session->cipher_key.length = 0;
2314 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2318 session->auth_key.data = rte_zmalloc(NULL,
2319 auth_xform->key.length,
2320 RTE_CACHE_LINE_SIZE);
2321 if (session->auth_key.data == NULL &&
2322 auth_xform->key.length > 0) {
2323 DPAA2_SEC_ERR("No Memory for auth key");
2326 session->auth_key.length = auth_xform->key.length;
2327 memcpy(session->auth_key.data, auth_xform->key.data,
2328 auth_xform->key.length);
2329 session->auth_alg = auth_xform->algo;
2331 session->auth_key.data = NULL;
2332 session->auth_key.length = 0;
2333 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2336 authdata->key = (size_t)session->auth_key.data;
2337 authdata->keylen = session->auth_key.length;
2338 authdata->key_enc_flags = 0;
2339 authdata->key_type = RTA_DATA_IMM;
2340 switch (session->auth_alg) {
2341 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2342 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2343 authdata->algmode = OP_ALG_AAI_HMAC;
2345 case RTE_CRYPTO_AUTH_MD5_HMAC:
2346 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2347 authdata->algmode = OP_ALG_AAI_HMAC;
2349 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2350 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2351 authdata->algmode = OP_ALG_AAI_HMAC;
2353 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2354 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2355 authdata->algmode = OP_ALG_AAI_HMAC;
2357 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2358 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2359 authdata->algmode = OP_ALG_AAI_HMAC;
2361 case RTE_CRYPTO_AUTH_AES_CMAC:
2362 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2364 case RTE_CRYPTO_AUTH_NULL:
2365 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2367 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2368 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2369 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2370 case RTE_CRYPTO_AUTH_SHA1:
2371 case RTE_CRYPTO_AUTH_SHA256:
2372 case RTE_CRYPTO_AUTH_SHA512:
2373 case RTE_CRYPTO_AUTH_SHA224:
2374 case RTE_CRYPTO_AUTH_SHA384:
2375 case RTE_CRYPTO_AUTH_MD5:
2376 case RTE_CRYPTO_AUTH_AES_GMAC:
2377 case RTE_CRYPTO_AUTH_KASUMI_F9:
2378 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2379 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2380 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2384 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2388 cipherdata->key = (size_t)session->cipher_key.data;
2389 cipherdata->keylen = session->cipher_key.length;
2390 cipherdata->key_enc_flags = 0;
2391 cipherdata->key_type = RTA_DATA_IMM;
2393 switch (session->cipher_alg) {
2394 case RTE_CRYPTO_CIPHER_AES_CBC:
2395 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2396 cipherdata->algmode = OP_ALG_AAI_CBC;
2398 case RTE_CRYPTO_CIPHER_3DES_CBC:
2399 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2400 cipherdata->algmode = OP_ALG_AAI_CBC;
2402 case RTE_CRYPTO_CIPHER_AES_CTR:
2403 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2404 cipherdata->algmode = OP_ALG_AAI_CTR;
2406 case RTE_CRYPTO_CIPHER_NULL:
2407 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2409 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2410 case RTE_CRYPTO_CIPHER_3DES_ECB:
2411 case RTE_CRYPTO_CIPHER_AES_ECB:
2412 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2413 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2414 session->cipher_alg);
2417 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2418 session->cipher_alg);
2425 #ifdef RTE_LIBRTE_SECURITY_TEST
2426 static uint8_t aes_cbc_iv[] = {
2427 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2428 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2432 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2433 struct rte_security_session_conf *conf,
2436 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2437 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2438 struct rte_crypto_auth_xform *auth_xform = NULL;
2439 struct rte_crypto_aead_xform *aead_xform = NULL;
2440 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2441 struct ctxt_priv *priv;
2442 struct ipsec_encap_pdb encap_pdb;
2443 struct ipsec_decap_pdb decap_pdb;
2444 struct alginfo authdata, cipherdata;
2446 struct sec_flow_context *flc;
2447 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2450 PMD_INIT_FUNC_TRACE();
2452 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2453 sizeof(struct ctxt_priv) +
2454 sizeof(struct sec_flc_desc),
2455 RTE_CACHE_LINE_SIZE);
2458 DPAA2_SEC_ERR("No memory for priv CTXT");
2462 priv->fle_pool = dev_priv->fle_pool;
2463 flc = &priv->flc_desc[0].flc;
2465 memset(session, 0, sizeof(dpaa2_sec_session));
2467 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2468 cipher_xform = &conf->crypto_xform->cipher;
2469 if (conf->crypto_xform->next)
2470 auth_xform = &conf->crypto_xform->next->auth;
2471 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2472 session, &cipherdata, &authdata);
2473 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2474 auth_xform = &conf->crypto_xform->auth;
2475 if (conf->crypto_xform->next)
2476 cipher_xform = &conf->crypto_xform->next->cipher;
2477 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2478 session, &cipherdata, &authdata);
2479 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2480 aead_xform = &conf->crypto_xform->aead;
2481 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2482 session, &cipherdata);
2484 DPAA2_SEC_ERR("XFORM not specified");
2489 DPAA2_SEC_ERR("Failed to process xform");
2493 session->ctxt_type = DPAA2_SEC_IPSEC;
2494 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2497 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2498 ip4_hdr.ip_v = IPVERSION;
2500 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2501 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2504 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2505 ip4_hdr.ip_p = IPPROTO_ESP;
2507 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2508 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2509 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2512 /* For Sec Proto only one descriptor is required. */
2513 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2514 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2515 PDBOPTS_ESP_OIHI_PDB_INL |
2517 PDBHMO_ESP_ENCAP_DTTL |
2519 encap_pdb.spi = ipsec_xform->spi;
2520 encap_pdb.ip_hdr_len = sizeof(struct ip);
2522 session->dir = DIR_ENC;
2523 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2524 1, 0, SHR_SERIAL, &encap_pdb,
2525 (uint8_t *)&ip4_hdr,
2526 &cipherdata, &authdata);
2527 } else if (ipsec_xform->direction ==
2528 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2529 flc->dhr = SEC_FLC_DHR_INBOUND;
2530 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2531 decap_pdb.options = sizeof(struct ip) << 16;
2532 session->dir = DIR_DEC;
2533 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2535 &decap_pdb, &cipherdata, &authdata);
2540 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2544 flc->word1_sdl = (uint8_t)bufsize;
2546 /* Enable the stashing control bit */
2547 DPAA2_SET_FLC_RSC(flc);
2548 flc->word2_rflc_31_0 = lower_32_bits(
2549 (size_t)&(((struct dpaa2_sec_qp *)
2550 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2551 flc->word3_rflc_63_32 = upper_32_bits(
2552 (size_t)&(((struct dpaa2_sec_qp *)
2553 dev->data->queue_pairs[0])->rx_vq));
2555 /* Set EWS bit i.e. enable write-safe */
2556 DPAA2_SET_FLC_EWS(flc);
2557 /* Set BS = 1 i.e reuse input buffers as output buffers */
2558 DPAA2_SET_FLC_REUSE_BS(flc);
2559 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2560 DPAA2_SET_FLC_REUSE_FF(flc);
2562 session->ctxt = priv;
2566 rte_free(session->auth_key.data);
2567 rte_free(session->cipher_key.data);
2573 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2574 struct rte_security_session_conf *conf,
2577 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2578 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2579 struct rte_crypto_auth_xform *auth_xform = NULL;
2580 struct rte_crypto_cipher_xform *cipher_xform;
2581 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2582 struct ctxt_priv *priv;
2583 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2584 struct alginfo authdata, cipherdata;
2586 struct sec_flow_context *flc;
2587 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2593 PMD_INIT_FUNC_TRACE();
2595 memset(session, 0, sizeof(dpaa2_sec_session));
2597 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2598 sizeof(struct ctxt_priv) +
2599 sizeof(struct sec_flc_desc),
2600 RTE_CACHE_LINE_SIZE);
2603 DPAA2_SEC_ERR("No memory for priv CTXT");
2607 priv->fle_pool = dev_priv->fle_pool;
2608 flc = &priv->flc_desc[0].flc;
2610 /* find xfrm types */
2611 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2612 cipher_xform = &xform->cipher;
2613 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2614 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2615 session->ext_params.aead_ctxt.auth_cipher_text = true;
2616 cipher_xform = &xform->cipher;
2617 auth_xform = &xform->next->auth;
2618 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2619 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2620 session->ext_params.aead_ctxt.auth_cipher_text = false;
2621 cipher_xform = &xform->next->cipher;
2622 auth_xform = &xform->auth;
2624 DPAA2_SEC_ERR("Invalid crypto type");
2628 session->ctxt_type = DPAA2_SEC_PDCP;
2630 session->cipher_key.data = rte_zmalloc(NULL,
2631 cipher_xform->key.length,
2632 RTE_CACHE_LINE_SIZE);
2633 if (session->cipher_key.data == NULL &&
2634 cipher_xform->key.length > 0) {
2635 DPAA2_SEC_ERR("No Memory for cipher key");
2639 session->cipher_key.length = cipher_xform->key.length;
2640 memcpy(session->cipher_key.data, cipher_xform->key.data,
2641 cipher_xform->key.length);
2643 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2645 session->cipher_alg = cipher_xform->algo;
2647 session->cipher_key.data = NULL;
2648 session->cipher_key.length = 0;
2649 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2650 session->dir = DIR_ENC;
2653 session->pdcp.domain = pdcp_xform->domain;
2654 session->pdcp.bearer = pdcp_xform->bearer;
2655 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2656 session->pdcp.sn_size = pdcp_xform->sn_size;
2657 #ifdef ENABLE_HFN_OVERRIDE
2658 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2660 session->pdcp.hfn = pdcp_xform->hfn;
2661 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2663 cipherdata.key = (size_t)session->cipher_key.data;
2664 cipherdata.keylen = session->cipher_key.length;
2665 cipherdata.key_enc_flags = 0;
2666 cipherdata.key_type = RTA_DATA_IMM;
2668 switch (session->cipher_alg) {
2669 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2670 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2672 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2673 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2675 case RTE_CRYPTO_CIPHER_AES_CTR:
2676 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2678 case RTE_CRYPTO_CIPHER_NULL:
2679 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2682 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2683 session->cipher_alg);
2687 /* Auth is only applicable for control mode operation. */
2688 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2689 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2691 "PDCP Seq Num size should be 5 bits for cmode");
2695 session->auth_key.data = rte_zmalloc(NULL,
2696 auth_xform->key.length,
2697 RTE_CACHE_LINE_SIZE);
2698 if (session->auth_key.data == NULL &&
2699 auth_xform->key.length > 0) {
2700 DPAA2_SEC_ERR("No Memory for auth key");
2701 rte_free(session->cipher_key.data);
2705 session->auth_key.length = auth_xform->key.length;
2706 memcpy(session->auth_key.data, auth_xform->key.data,
2707 auth_xform->key.length);
2708 session->auth_alg = auth_xform->algo;
2710 session->auth_key.data = NULL;
2711 session->auth_key.length = 0;
2712 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2714 authdata.key = (size_t)session->auth_key.data;
2715 authdata.keylen = session->auth_key.length;
2716 authdata.key_enc_flags = 0;
2717 authdata.key_type = RTA_DATA_IMM;
2719 switch (session->auth_alg) {
2720 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2721 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2723 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2724 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2726 case RTE_CRYPTO_AUTH_AES_CMAC:
2727 authdata.algtype = PDCP_AUTH_TYPE_AES;
2729 case RTE_CRYPTO_AUTH_NULL:
2730 authdata.algtype = PDCP_AUTH_TYPE_NULL;
2733 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2738 if (session->dir == DIR_ENC)
2739 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2740 priv->flc_desc[0].desc, 1, swap,
2743 pdcp_xform->pkt_dir,
2744 pdcp_xform->hfn_threshold,
2745 &cipherdata, &authdata,
2747 else if (session->dir == DIR_DEC)
2748 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2749 priv->flc_desc[0].desc, 1, swap,
2752 pdcp_xform->pkt_dir,
2753 pdcp_xform->hfn_threshold,
2754 &cipherdata, &authdata,
2757 if (session->dir == DIR_ENC)
2758 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2759 priv->flc_desc[0].desc, 1, swap,
2760 (enum pdcp_sn_size)pdcp_xform->sn_size,
2763 pdcp_xform->pkt_dir,
2764 pdcp_xform->hfn_threshold,
2766 else if (session->dir == DIR_DEC)
2767 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2768 priv->flc_desc[0].desc, 1, swap,
2769 (enum pdcp_sn_size)pdcp_xform->sn_size,
2772 pdcp_xform->pkt_dir,
2773 pdcp_xform->hfn_threshold,
2778 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2782 /* Enable the stashing control bit */
2783 DPAA2_SET_FLC_RSC(flc);
2784 flc->word2_rflc_31_0 = lower_32_bits(
2785 (size_t)&(((struct dpaa2_sec_qp *)
2786 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2787 flc->word3_rflc_63_32 = upper_32_bits(
2788 (size_t)&(((struct dpaa2_sec_qp *)
2789 dev->data->queue_pairs[0])->rx_vq));
2791 flc->word1_sdl = (uint8_t)bufsize;
2793 /* Set EWS bit i.e. enable write-safe */
2794 DPAA2_SET_FLC_EWS(flc);
2795 /* Set BS = 1 i.e reuse input buffers as output buffers */
2796 DPAA2_SET_FLC_REUSE_BS(flc);
2797 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2798 DPAA2_SET_FLC_REUSE_FF(flc);
2800 session->ctxt = priv;
2804 rte_free(session->auth_key.data);
2805 rte_free(session->cipher_key.data);
2811 dpaa2_sec_security_session_create(void *dev,
2812 struct rte_security_session_conf *conf,
2813 struct rte_security_session *sess,
2814 struct rte_mempool *mempool)
2816 void *sess_private_data;
2817 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2820 if (rte_mempool_get(mempool, &sess_private_data)) {
2821 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2825 switch (conf->protocol) {
2826 case RTE_SECURITY_PROTOCOL_IPSEC:
2827 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2830 case RTE_SECURITY_PROTOCOL_MACSEC:
2832 case RTE_SECURITY_PROTOCOL_PDCP:
2833 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2840 DPAA2_SEC_ERR("Failed to configure session parameters");
2841 /* Return session to mempool */
2842 rte_mempool_put(mempool, sess_private_data);
2846 set_sec_session_private_data(sess, sess_private_data);
2851 /** Clear the memory of session so it doesn't leave key material behind */
2853 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2854 struct rte_security_session *sess)
2856 PMD_INIT_FUNC_TRACE();
2857 void *sess_priv = get_sec_session_private_data(sess);
2859 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2862 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2865 rte_free(s->cipher_key.data);
2866 rte_free(s->auth_key.data);
2867 memset(sess, 0, sizeof(dpaa2_sec_session));
2868 set_sec_session_private_data(sess, NULL);
2869 rte_mempool_put(sess_mp, sess_priv);
2875 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2876 struct rte_crypto_sym_xform *xform,
2877 struct rte_cryptodev_sym_session *sess,
2878 struct rte_mempool *mempool)
2880 void *sess_private_data;
2883 if (rte_mempool_get(mempool, &sess_private_data)) {
2884 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2888 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2890 DPAA2_SEC_ERR("Failed to configure session parameters");
2891 /* Return session to mempool */
2892 rte_mempool_put(mempool, sess_private_data);
2896 set_sym_session_private_data(sess, dev->driver_id,
2902 /** Clear the memory of session so it doesn't leave key material behind */
2904 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2905 struct rte_cryptodev_sym_session *sess)
2907 PMD_INIT_FUNC_TRACE();
2908 uint8_t index = dev->driver_id;
2909 void *sess_priv = get_sym_session_private_data(sess, index);
2910 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2914 rte_free(s->cipher_key.data);
2915 rte_free(s->auth_key.data);
2916 memset(sess, 0, sizeof(dpaa2_sec_session));
2917 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2918 set_sym_session_private_data(sess, index, NULL);
2919 rte_mempool_put(sess_mp, sess_priv);
2924 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2925 struct rte_cryptodev_config *config __rte_unused)
2927 PMD_INIT_FUNC_TRACE();
2933 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2935 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2936 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2937 struct dpseci_attr attr;
2938 struct dpaa2_queue *dpaa2_q;
2939 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2940 dev->data->queue_pairs;
2941 struct dpseci_rx_queue_attr rx_attr;
2942 struct dpseci_tx_queue_attr tx_attr;
2945 PMD_INIT_FUNC_TRACE();
2947 memset(&attr, 0, sizeof(struct dpseci_attr));
2949 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2951 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2953 goto get_attr_failure;
2955 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2957 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2958 goto get_attr_failure;
2960 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2961 dpaa2_q = &qp[i]->rx_vq;
2962 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2964 dpaa2_q->fqid = rx_attr.fqid;
2965 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2967 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2968 dpaa2_q = &qp[i]->tx_vq;
2969 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2971 dpaa2_q->fqid = tx_attr.fqid;
2972 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2977 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2982 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2984 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2985 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2988 PMD_INIT_FUNC_TRACE();
2990 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2992 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2997 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2999 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3005 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3007 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3008 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3011 PMD_INIT_FUNC_TRACE();
3013 /* Function is reverse of dpaa2_sec_dev_init.
3014 * It does the following:
3015 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3016 * 2. Close the DPSECI device
3017 * 3. Free the allocated resources.
3020 /*Close the device at underlying layer*/
3021 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3023 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3027 /*Free the allocated memory for ethernet private data and dpseci*/
3035 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3036 struct rte_cryptodev_info *info)
3038 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3040 PMD_INIT_FUNC_TRACE();
3042 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3043 info->feature_flags = dev->feature_flags;
3044 info->capabilities = dpaa2_sec_capabilities;
3045 /* No limit of number of sessions */
3046 info->sym.max_nb_sessions = 0;
3047 info->driver_id = cryptodev_driver_id;
3052 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3053 struct rte_cryptodev_stats *stats)
3055 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3056 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3057 struct dpseci_sec_counters counters = {0};
3058 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3059 dev->data->queue_pairs;
3062 PMD_INIT_FUNC_TRACE();
3063 if (stats == NULL) {
3064 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3067 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3068 if (qp[i] == NULL) {
3069 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3073 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3074 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3075 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3076 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3079 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3082 DPAA2_SEC_ERR("SEC counters failed");
3084 DPAA2_SEC_INFO("dpseci hardware stats:"
3085 "\n\tNum of Requests Dequeued = %" PRIu64
3086 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3087 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3088 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3089 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3090 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3091 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3092 counters.dequeued_requests,
3093 counters.ob_enc_requests,
3094 counters.ib_dec_requests,
3095 counters.ob_enc_bytes,
3096 counters.ob_prot_bytes,
3097 counters.ib_dec_bytes,
3098 counters.ib_valid_bytes);
3103 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3106 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3107 (dev->data->queue_pairs);
3109 PMD_INIT_FUNC_TRACE();
3111 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3112 if (qp[i] == NULL) {
3113 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3116 qp[i]->tx_vq.rx_pkts = 0;
3117 qp[i]->tx_vq.tx_pkts = 0;
3118 qp[i]->tx_vq.err_pkts = 0;
3119 qp[i]->rx_vq.rx_pkts = 0;
3120 qp[i]->rx_vq.tx_pkts = 0;
3121 qp[i]->rx_vq.err_pkts = 0;
3125 static void __attribute__((hot))
3126 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3127 const struct qbman_fd *fd,
3128 const struct qbman_result *dq,
3129 struct dpaa2_queue *rxq,
3130 struct rte_event *ev)
3132 /* Prefetching mbuf */
3133 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3134 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3136 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3137 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3139 ev->flow_id = rxq->ev.flow_id;
3140 ev->sub_event_type = rxq->ev.sub_event_type;
3141 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3142 ev->op = RTE_EVENT_OP_NEW;
3143 ev->sched_type = rxq->ev.sched_type;
3144 ev->queue_id = rxq->ev.queue_id;
3145 ev->priority = rxq->ev.priority;
3146 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
3147 (rxq->dev))->driver_id);
3149 qbman_swp_dqrr_consume(swp, dq);
3152 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3153 const struct qbman_fd *fd,
3154 const struct qbman_result *dq,
3155 struct dpaa2_queue *rxq,
3156 struct rte_event *ev)
3159 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3160 /* Prefetching mbuf */
3161 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3162 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3164 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3165 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3167 ev->flow_id = rxq->ev.flow_id;
3168 ev->sub_event_type = rxq->ev.sub_event_type;
3169 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3170 ev->op = RTE_EVENT_OP_NEW;
3171 ev->sched_type = rxq->ev.sched_type;
3172 ev->queue_id = rxq->ev.queue_id;
3173 ev->priority = rxq->ev.priority;
3175 ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
3176 (rxq->dev))->driver_id);
3177 dqrr_index = qbman_get_dqrr_idx(dq);
3178 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3179 DPAA2_PER_LCORE_DQRR_SIZE++;
3180 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3181 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3185 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3188 const struct rte_event *event)
3190 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3191 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3192 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3193 struct dpseci_rx_queue_cfg cfg;
3196 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3197 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3198 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3199 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3203 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3204 cfg.options = DPSECI_QUEUE_OPT_DEST;
3205 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3206 cfg.dest_cfg.dest_id = dpcon_id;
3207 cfg.dest_cfg.priority = event->priority;
3209 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3210 cfg.user_ctx = (size_t)(qp);
3211 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3212 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3213 cfg.order_preservation_en = 1;
3215 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3218 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3222 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3228 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3231 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3232 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3233 struct dpseci_rx_queue_cfg cfg;
3236 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3237 cfg.options = DPSECI_QUEUE_OPT_DEST;
3238 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3240 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3243 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3248 static struct rte_cryptodev_ops crypto_ops = {
3249 .dev_configure = dpaa2_sec_dev_configure,
3250 .dev_start = dpaa2_sec_dev_start,
3251 .dev_stop = dpaa2_sec_dev_stop,
3252 .dev_close = dpaa2_sec_dev_close,
3253 .dev_infos_get = dpaa2_sec_dev_infos_get,
3254 .stats_get = dpaa2_sec_stats_get,
3255 .stats_reset = dpaa2_sec_stats_reset,
3256 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3257 .queue_pair_release = dpaa2_sec_queue_pair_release,
3258 .queue_pair_count = dpaa2_sec_queue_pair_count,
3259 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3260 .sym_session_configure = dpaa2_sec_sym_session_configure,
3261 .sym_session_clear = dpaa2_sec_sym_session_clear,
3264 static const struct rte_security_capability *
3265 dpaa2_sec_capabilities_get(void *device __rte_unused)
3267 return dpaa2_sec_security_cap;
3270 static const struct rte_security_ops dpaa2_sec_security_ops = {
3271 .session_create = dpaa2_sec_security_session_create,
3272 .session_update = NULL,
3273 .session_stats_get = NULL,
3274 .session_destroy = dpaa2_sec_security_session_destroy,
3275 .set_pkt_metadata = NULL,
3276 .capabilities_get = dpaa2_sec_capabilities_get
3280 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3282 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3284 rte_free(dev->security_ctx);
3286 rte_mempool_free(internals->fle_pool);
3288 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3289 dev->data->name, rte_socket_id());
3295 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3297 struct dpaa2_sec_dev_private *internals;
3298 struct rte_device *dev = cryptodev->device;
3299 struct rte_dpaa2_device *dpaa2_dev;
3300 struct rte_security_ctx *security_instance;
3301 struct fsl_mc_io *dpseci;
3303 struct dpseci_attr attr;
3307 PMD_INIT_FUNC_TRACE();
3308 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3309 if (dpaa2_dev == NULL) {
3310 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3313 hw_id = dpaa2_dev->object_id;
3315 cryptodev->driver_id = cryptodev_driver_id;
3316 cryptodev->dev_ops = &crypto_ops;
3318 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3319 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3320 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3321 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3322 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3323 RTE_CRYPTODEV_FF_SECURITY |
3324 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3325 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3326 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3327 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3328 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3330 internals = cryptodev->data->dev_private;
3333 * For secondary processes, we don't initialise any further as primary
3334 * has already done this work. Only check we don't need a different
3337 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3338 DPAA2_SEC_DEBUG("Device already init by primary process");
3342 /* Initialize security_ctx only for primary process*/
3343 security_instance = rte_malloc("rte_security_instances_ops",
3344 sizeof(struct rte_security_ctx), 0);
3345 if (security_instance == NULL)
3347 security_instance->device = (void *)cryptodev;
3348 security_instance->ops = &dpaa2_sec_security_ops;
3349 security_instance->sess_cnt = 0;
3350 cryptodev->security_ctx = security_instance;
3352 /*Open the rte device via MC and save the handle for further use*/
3353 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3354 sizeof(struct fsl_mc_io), 0);
3357 "Error in allocating the memory for dpsec object");
3360 dpseci->regs = rte_mcp_ptr_list[0];
3362 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3364 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3368 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3371 "Cannot get dpsec device attributed: Error = %x",
3375 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
3377 internals->max_nb_queue_pairs = attr.num_tx_queues;
3378 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3379 internals->hw = dpseci;
3380 internals->token = token;
3382 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
3383 internals->fle_pool = rte_mempool_create((const char *)str,
3386 FLE_POOL_CACHE_SIZE, 0,
3387 NULL, NULL, NULL, NULL,
3389 if (!internals->fle_pool) {
3390 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3394 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3398 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3400 /* dpaa2_sec_uninit(crypto_dev_name); */
3405 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3406 struct rte_dpaa2_device *dpaa2_dev)
3408 struct rte_cryptodev *cryptodev;
3409 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3413 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
3415 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3416 if (cryptodev == NULL)
3419 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3420 cryptodev->data->dev_private = rte_zmalloc_socket(
3421 "cryptodev private structure",
3422 sizeof(struct dpaa2_sec_dev_private),
3423 RTE_CACHE_LINE_SIZE,
3426 if (cryptodev->data->dev_private == NULL)
3427 rte_panic("Cannot allocate memzone for private "
3431 dpaa2_dev->cryptodev = cryptodev;
3432 cryptodev->device = &dpaa2_dev->device;
3434 /* init user callbacks */
3435 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3437 /* Invoke PMD device initialization function */
3438 retval = dpaa2_sec_dev_init(cryptodev);
3442 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3443 rte_free(cryptodev->data->dev_private);
3445 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3451 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3453 struct rte_cryptodev *cryptodev;
3456 cryptodev = dpaa2_dev->cryptodev;
3457 if (cryptodev == NULL)
3460 ret = dpaa2_sec_uninit(cryptodev);
3464 return rte_cryptodev_pmd_destroy(cryptodev);
3467 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3468 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3469 .drv_type = DPAA2_CRYPTO,
3471 .name = "DPAA2 SEC PMD"
3473 .probe = cryptodev_dpaa2_sec_probe,
3474 .remove = cryptodev_dpaa2_sec_remove,
3477 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3479 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3480 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3481 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3483 RTE_INIT(dpaa2_sec_init_log)
3485 /* Bus level logs */
3486 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3487 if (dpaa2_logtype_sec >= 0)
3488 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);