1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
37 typedef uint64_t dma_addr_t;
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45 * a pointer to the shared descriptor
47 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID 0x1957
49 #define FSL_DEVICE_ID 0x410
50 #define FSL_SUBSYSTEM_SEC 1
51 #define FSL_MC_DPSECI_DEVID 3
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS 32000
56 #define FLE_POOL_BUF_SIZE 256
57 #define FLE_POOL_CACHE_SIZE 512
58 #define FLE_SG_MEM_SIZE 2048
59 #define SEC_FLC_DHR_OUTBOUND -114
60 #define SEC_FLC_DHR_INBOUND 0
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
64 static uint8_t cryptodev_driver_id;
66 int dpaa2_logtype_sec;
69 build_proto_compound_fd(dpaa2_sec_session *sess,
70 struct rte_crypto_op *op,
71 struct qbman_fd *fd, uint16_t bpid)
73 struct rte_crypto_sym_op *sym_op = op->sym;
74 struct ctxt_priv *priv = sess->ctxt;
75 struct qbman_fle *fle, *ip_fle, *op_fle;
76 struct sec_flow_context *flc;
77 struct rte_mbuf *src_mbuf = sym_op->m_src;
78 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
84 /* Save the shared descriptor */
85 flc = &priv->flc_desc[0].flc;
87 /* we are using the first FLE entry to store Mbuf */
88 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
90 DPAA2_SEC_ERR("Memory alloc failed");
93 memset(fle, 0, FLE_POOL_BUF_SIZE);
94 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
100 if (likely(bpid < MAX_BPID)) {
101 DPAA2_SET_FD_BPID(fd, bpid);
102 DPAA2_SET_FLE_BPID(op_fle, bpid);
103 DPAA2_SET_FLE_BPID(ip_fle, bpid);
105 DPAA2_SET_FD_IVP(fd);
106 DPAA2_SET_FLE_IVP(op_fle);
107 DPAA2_SET_FLE_IVP(ip_fle);
110 /* Configure FD as a FRAME LIST */
111 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
112 DPAA2_SET_FD_COMPOUND_FMT(fd);
113 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
115 /* Configure Output FLE with dst mbuf data */
116 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
117 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
118 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
120 /* Configure Input FLE with src mbuf data */
121 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
122 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
123 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
125 DPAA2_SET_FD_LEN(fd, ip_fle->length);
126 DPAA2_SET_FLE_FIN(ip_fle);
128 #ifdef ENABLE_HFN_OVERRIDE
129 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
130 /*enable HFN override override */
131 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
132 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
133 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
142 build_proto_fd(dpaa2_sec_session *sess,
143 struct rte_crypto_op *op,
144 struct qbman_fd *fd, uint16_t bpid)
146 struct rte_crypto_sym_op *sym_op = op->sym;
148 return build_proto_compound_fd(sess, op, fd, bpid);
150 struct ctxt_priv *priv = sess->ctxt;
151 struct sec_flow_context *flc;
152 struct rte_mbuf *mbuf = sym_op->m_src;
154 if (likely(bpid < MAX_BPID))
155 DPAA2_SET_FD_BPID(fd, bpid);
157 DPAA2_SET_FD_IVP(fd);
159 /* Save the shared descriptor */
160 flc = &priv->flc_desc[0].flc;
162 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
163 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
164 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
165 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
167 /* save physical address of mbuf */
168 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
169 mbuf->buf_iova = (size_t)op;
175 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
176 struct rte_crypto_op *op,
177 struct qbman_fd *fd, __rte_unused uint16_t bpid)
179 struct rte_crypto_sym_op *sym_op = op->sym;
180 struct ctxt_priv *priv = sess->ctxt;
181 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
182 struct sec_flow_context *flc;
183 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
184 int icv_len = sess->digest_length;
186 struct rte_mbuf *mbuf;
187 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
190 PMD_INIT_FUNC_TRACE();
193 mbuf = sym_op->m_dst;
195 mbuf = sym_op->m_src;
197 /* first FLE entry used to store mbuf and session ctxt */
198 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
199 RTE_CACHE_LINE_SIZE);
200 if (unlikely(!fle)) {
201 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
204 memset(fle, 0, FLE_SG_MEM_SIZE);
205 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
206 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
212 /* Save the shared descriptor */
213 flc = &priv->flc_desc[0].flc;
215 /* Configure FD as a FRAME LIST */
216 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
217 DPAA2_SET_FD_COMPOUND_FMT(fd);
218 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
220 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
221 "iv-len=%d data_off: 0x%x\n",
222 sym_op->aead.data.offset,
223 sym_op->aead.data.length,
226 sym_op->m_src->data_off);
228 /* Configure Output FLE with Scatter/Gather Entry */
229 DPAA2_SET_FLE_SG_EXT(op_fle);
230 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
233 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
235 op_fle->length = (sess->dir == DIR_ENC) ?
236 (sym_op->aead.data.length + icv_len + auth_only_len) :
237 sym_op->aead.data.length + auth_only_len;
239 /* Configure Output SGE for Encap/Decap */
240 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
241 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
242 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
243 sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
249 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
250 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
251 sge->length = mbuf->data_len;
254 sge->length -= icv_len;
256 if (sess->dir == DIR_ENC) {
258 DPAA2_SET_FLE_ADDR(sge,
259 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
260 sge->length = icv_len;
262 DPAA2_SET_FLE_FIN(sge);
265 mbuf = sym_op->m_src;
267 /* Configure Input FLE with Scatter/Gather Entry */
268 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
269 DPAA2_SET_FLE_SG_EXT(ip_fle);
270 DPAA2_SET_FLE_FIN(ip_fle);
271 ip_fle->length = (sess->dir == DIR_ENC) ?
272 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
273 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
276 /* Configure Input SGE for Encap/Decap */
277 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
278 sge->length = sess->iv.length;
282 DPAA2_SET_FLE_ADDR(sge,
283 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
284 sge->length = auth_only_len;
288 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
289 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
291 sge->length = mbuf->data_len - sym_op->aead.data.offset;
297 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
298 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
299 sge->length = mbuf->data_len;
303 if (sess->dir == DIR_DEC) {
305 old_icv = (uint8_t *)(sge + 1);
306 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
307 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
308 sge->length = icv_len;
311 DPAA2_SET_FLE_FIN(sge);
313 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
314 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
316 DPAA2_SET_FD_LEN(fd, ip_fle->length);
322 build_authenc_gcm_fd(dpaa2_sec_session *sess,
323 struct rte_crypto_op *op,
324 struct qbman_fd *fd, uint16_t bpid)
326 struct rte_crypto_sym_op *sym_op = op->sym;
327 struct ctxt_priv *priv = sess->ctxt;
328 struct qbman_fle *fle, *sge;
329 struct sec_flow_context *flc;
330 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
331 int icv_len = sess->digest_length, retval;
333 struct rte_mbuf *dst;
334 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
337 PMD_INIT_FUNC_TRACE();
344 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
345 * Currently we donot know which FLE has the mbuf stored.
346 * So while retreiving we can go back 1 FLE from the FD -ADDR
347 * to get the MBUF Addr from the previous FLE.
348 * We can have a better approach to use the inline Mbuf
350 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
352 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
355 memset(fle, 0, FLE_POOL_BUF_SIZE);
356 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
357 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
360 if (likely(bpid < MAX_BPID)) {
361 DPAA2_SET_FD_BPID(fd, bpid);
362 DPAA2_SET_FLE_BPID(fle, bpid);
363 DPAA2_SET_FLE_BPID(fle + 1, bpid);
364 DPAA2_SET_FLE_BPID(sge, bpid);
365 DPAA2_SET_FLE_BPID(sge + 1, bpid);
366 DPAA2_SET_FLE_BPID(sge + 2, bpid);
367 DPAA2_SET_FLE_BPID(sge + 3, bpid);
369 DPAA2_SET_FD_IVP(fd);
370 DPAA2_SET_FLE_IVP(fle);
371 DPAA2_SET_FLE_IVP((fle + 1));
372 DPAA2_SET_FLE_IVP(sge);
373 DPAA2_SET_FLE_IVP((sge + 1));
374 DPAA2_SET_FLE_IVP((sge + 2));
375 DPAA2_SET_FLE_IVP((sge + 3));
378 /* Save the shared descriptor */
379 flc = &priv->flc_desc[0].flc;
380 /* Configure FD as a FRAME LIST */
381 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
382 DPAA2_SET_FD_COMPOUND_FMT(fd);
383 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
385 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
386 "iv-len=%d data_off: 0x%x\n",
387 sym_op->aead.data.offset,
388 sym_op->aead.data.length,
391 sym_op->m_src->data_off);
393 /* Configure Output FLE with Scatter/Gather Entry */
394 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
396 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
397 fle->length = (sess->dir == DIR_ENC) ?
398 (sym_op->aead.data.length + icv_len + auth_only_len) :
399 sym_op->aead.data.length + auth_only_len;
401 DPAA2_SET_FLE_SG_EXT(fle);
403 /* Configure Output SGE for Encap/Decap */
404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
405 DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
406 RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
407 sge->length = sym_op->aead.data.length + auth_only_len;
409 if (sess->dir == DIR_ENC) {
411 DPAA2_SET_FLE_ADDR(sge,
412 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
413 sge->length = sess->digest_length;
414 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
415 sess->iv.length + auth_only_len));
417 DPAA2_SET_FLE_FIN(sge);
422 /* Configure Input FLE with Scatter/Gather Entry */
423 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
424 DPAA2_SET_FLE_SG_EXT(fle);
425 DPAA2_SET_FLE_FIN(fle);
426 fle->length = (sess->dir == DIR_ENC) ?
427 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
428 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
429 sess->digest_length);
431 /* Configure Input SGE for Encap/Decap */
432 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
433 sge->length = sess->iv.length;
436 DPAA2_SET_FLE_ADDR(sge,
437 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
438 sge->length = auth_only_len;
439 DPAA2_SET_FLE_BPID(sge, bpid);
443 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
444 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
445 sym_op->m_src->data_off);
446 sge->length = sym_op->aead.data.length;
447 if (sess->dir == DIR_DEC) {
449 old_icv = (uint8_t *)(sge + 1);
450 memcpy(old_icv, sym_op->aead.digest.data,
451 sess->digest_length);
452 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
453 sge->length = sess->digest_length;
454 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
455 sess->digest_length +
459 DPAA2_SET_FLE_FIN(sge);
462 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
463 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
470 build_authenc_sg_fd(dpaa2_sec_session *sess,
471 struct rte_crypto_op *op,
472 struct qbman_fd *fd, __rte_unused uint16_t bpid)
474 struct rte_crypto_sym_op *sym_op = op->sym;
475 struct ctxt_priv *priv = sess->ctxt;
476 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
477 struct sec_flow_context *flc;
478 uint32_t auth_only_len = sym_op->auth.data.length -
479 sym_op->cipher.data.length;
480 int icv_len = sess->digest_length;
482 struct rte_mbuf *mbuf;
483 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
486 PMD_INIT_FUNC_TRACE();
489 mbuf = sym_op->m_dst;
491 mbuf = sym_op->m_src;
493 /* first FLE entry used to store mbuf and session ctxt */
494 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
495 RTE_CACHE_LINE_SIZE);
496 if (unlikely(!fle)) {
497 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
500 memset(fle, 0, FLE_SG_MEM_SIZE);
501 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
502 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
508 /* Save the shared descriptor */
509 flc = &priv->flc_desc[0].flc;
511 /* Configure FD as a FRAME LIST */
512 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
513 DPAA2_SET_FD_COMPOUND_FMT(fd);
514 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
517 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
518 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
519 sym_op->auth.data.offset,
520 sym_op->auth.data.length,
522 sym_op->cipher.data.offset,
523 sym_op->cipher.data.length,
525 sym_op->m_src->data_off);
527 /* Configure Output FLE with Scatter/Gather Entry */
528 DPAA2_SET_FLE_SG_EXT(op_fle);
529 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
532 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
534 op_fle->length = (sess->dir == DIR_ENC) ?
535 (sym_op->cipher.data.length + icv_len) :
536 sym_op->cipher.data.length;
538 /* Configure Output SGE for Encap/Decap */
539 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
540 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
541 sge->length = mbuf->data_len - sym_op->auth.data.offset;
547 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
548 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
549 sge->length = mbuf->data_len;
552 sge->length -= icv_len;
554 if (sess->dir == DIR_ENC) {
556 DPAA2_SET_FLE_ADDR(sge,
557 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
558 sge->length = icv_len;
560 DPAA2_SET_FLE_FIN(sge);
563 mbuf = sym_op->m_src;
565 /* Configure Input FLE with Scatter/Gather Entry */
566 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
567 DPAA2_SET_FLE_SG_EXT(ip_fle);
568 DPAA2_SET_FLE_FIN(ip_fle);
569 ip_fle->length = (sess->dir == DIR_ENC) ?
570 (sym_op->auth.data.length + sess->iv.length) :
571 (sym_op->auth.data.length + sess->iv.length +
574 /* Configure Input SGE for Encap/Decap */
575 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
576 sge->length = sess->iv.length;
579 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
580 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
582 sge->length = mbuf->data_len - sym_op->auth.data.offset;
588 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
589 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
590 sge->length = mbuf->data_len;
593 sge->length -= icv_len;
595 if (sess->dir == DIR_DEC) {
597 old_icv = (uint8_t *)(sge + 1);
598 memcpy(old_icv, sym_op->auth.digest.data,
600 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
601 sge->length = icv_len;
604 DPAA2_SET_FLE_FIN(sge);
606 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
607 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
609 DPAA2_SET_FD_LEN(fd, ip_fle->length);
615 build_authenc_fd(dpaa2_sec_session *sess,
616 struct rte_crypto_op *op,
617 struct qbman_fd *fd, uint16_t bpid)
619 struct rte_crypto_sym_op *sym_op = op->sym;
620 struct ctxt_priv *priv = sess->ctxt;
621 struct qbman_fle *fle, *sge;
622 struct sec_flow_context *flc;
623 uint32_t auth_only_len = sym_op->auth.data.length -
624 sym_op->cipher.data.length;
625 int icv_len = sess->digest_length, retval;
627 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
629 struct rte_mbuf *dst;
631 PMD_INIT_FUNC_TRACE();
638 /* we are using the first FLE entry to store Mbuf.
639 * Currently we donot know which FLE has the mbuf stored.
640 * So while retreiving we can go back 1 FLE from the FD -ADDR
641 * to get the MBUF Addr from the previous FLE.
642 * We can have a better approach to use the inline Mbuf
644 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
646 DPAA2_SEC_ERR("Memory alloc failed for SGE");
649 memset(fle, 0, FLE_POOL_BUF_SIZE);
650 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
651 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
654 if (likely(bpid < MAX_BPID)) {
655 DPAA2_SET_FD_BPID(fd, bpid);
656 DPAA2_SET_FLE_BPID(fle, bpid);
657 DPAA2_SET_FLE_BPID(fle + 1, bpid);
658 DPAA2_SET_FLE_BPID(sge, bpid);
659 DPAA2_SET_FLE_BPID(sge + 1, bpid);
660 DPAA2_SET_FLE_BPID(sge + 2, bpid);
661 DPAA2_SET_FLE_BPID(sge + 3, bpid);
663 DPAA2_SET_FD_IVP(fd);
664 DPAA2_SET_FLE_IVP(fle);
665 DPAA2_SET_FLE_IVP((fle + 1));
666 DPAA2_SET_FLE_IVP(sge);
667 DPAA2_SET_FLE_IVP((sge + 1));
668 DPAA2_SET_FLE_IVP((sge + 2));
669 DPAA2_SET_FLE_IVP((sge + 3));
672 /* Save the shared descriptor */
673 flc = &priv->flc_desc[0].flc;
674 /* Configure FD as a FRAME LIST */
675 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
676 DPAA2_SET_FD_COMPOUND_FMT(fd);
677 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
680 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
681 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
682 sym_op->auth.data.offset,
683 sym_op->auth.data.length,
685 sym_op->cipher.data.offset,
686 sym_op->cipher.data.length,
688 sym_op->m_src->data_off);
690 /* Configure Output FLE with Scatter/Gather Entry */
691 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
693 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
694 fle->length = (sess->dir == DIR_ENC) ?
695 (sym_op->cipher.data.length + icv_len) :
696 sym_op->cipher.data.length;
698 DPAA2_SET_FLE_SG_EXT(fle);
700 /* Configure Output SGE for Encap/Decap */
701 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
702 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
704 sge->length = sym_op->cipher.data.length;
706 if (sess->dir == DIR_ENC) {
708 DPAA2_SET_FLE_ADDR(sge,
709 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
710 sge->length = sess->digest_length;
711 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
714 DPAA2_SET_FLE_FIN(sge);
719 /* Configure Input FLE with Scatter/Gather Entry */
720 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
721 DPAA2_SET_FLE_SG_EXT(fle);
722 DPAA2_SET_FLE_FIN(fle);
723 fle->length = (sess->dir == DIR_ENC) ?
724 (sym_op->auth.data.length + sess->iv.length) :
725 (sym_op->auth.data.length + sess->iv.length +
726 sess->digest_length);
728 /* Configure Input SGE for Encap/Decap */
729 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
730 sge->length = sess->iv.length;
733 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
734 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
735 sym_op->m_src->data_off);
736 sge->length = sym_op->auth.data.length;
737 if (sess->dir == DIR_DEC) {
739 old_icv = (uint8_t *)(sge + 1);
740 memcpy(old_icv, sym_op->auth.digest.data,
741 sess->digest_length);
742 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
743 sge->length = sess->digest_length;
744 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
745 sess->digest_length +
748 DPAA2_SET_FLE_FIN(sge);
750 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
751 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
756 static inline int build_auth_sg_fd(
757 dpaa2_sec_session *sess,
758 struct rte_crypto_op *op,
760 __rte_unused uint16_t bpid)
762 struct rte_crypto_sym_op *sym_op = op->sym;
763 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
764 struct sec_flow_context *flc;
765 struct ctxt_priv *priv = sess->ctxt;
767 struct rte_mbuf *mbuf;
769 PMD_INIT_FUNC_TRACE();
771 mbuf = sym_op->m_src;
772 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
773 RTE_CACHE_LINE_SIZE);
774 if (unlikely(!fle)) {
775 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
778 memset(fle, 0, FLE_SG_MEM_SIZE);
779 /* first FLE entry used to store mbuf and session ctxt */
780 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
781 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
786 flc = &priv->flc_desc[DESC_INITFINAL].flc;
788 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
789 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
790 DPAA2_SET_FD_COMPOUND_FMT(fd);
793 DPAA2_SET_FLE_ADDR(op_fle,
794 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
795 op_fle->length = sess->digest_length;
798 DPAA2_SET_FLE_SG_EXT(ip_fle);
799 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
801 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
802 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
803 sge->length = mbuf->data_len - sym_op->auth.data.offset;
809 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
810 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
811 sge->length = mbuf->data_len;
814 if (sess->dir == DIR_ENC) {
815 /* Digest calculation case */
816 sge->length -= sess->digest_length;
817 ip_fle->length = sym_op->auth.data.length;
819 /* Digest verification case */
821 old_digest = (uint8_t *)(sge + 1);
822 rte_memcpy(old_digest, sym_op->auth.digest.data,
823 sess->digest_length);
824 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
825 sge->length = sess->digest_length;
826 ip_fle->length = sym_op->auth.data.length +
829 DPAA2_SET_FLE_FIN(sge);
830 DPAA2_SET_FLE_FIN(ip_fle);
831 DPAA2_SET_FD_LEN(fd, ip_fle->length);
837 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
838 struct qbman_fd *fd, uint16_t bpid)
840 struct rte_crypto_sym_op *sym_op = op->sym;
841 struct qbman_fle *fle, *sge;
842 struct sec_flow_context *flc;
843 struct ctxt_priv *priv = sess->ctxt;
847 PMD_INIT_FUNC_TRACE();
849 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
851 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
854 memset(fle, 0, FLE_POOL_BUF_SIZE);
855 /* TODO we are using the first FLE entry to store Mbuf.
856 * Currently we donot know which FLE has the mbuf stored.
857 * So while retreiving we can go back 1 FLE from the FD -ADDR
858 * to get the MBUF Addr from the previous FLE.
859 * We can have a better approach to use the inline Mbuf
861 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
862 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
865 if (likely(bpid < MAX_BPID)) {
866 DPAA2_SET_FD_BPID(fd, bpid);
867 DPAA2_SET_FLE_BPID(fle, bpid);
868 DPAA2_SET_FLE_BPID(fle + 1, bpid);
870 DPAA2_SET_FD_IVP(fd);
871 DPAA2_SET_FLE_IVP(fle);
872 DPAA2_SET_FLE_IVP((fle + 1));
874 flc = &priv->flc_desc[DESC_INITFINAL].flc;
875 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
877 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
878 fle->length = sess->digest_length;
880 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
881 DPAA2_SET_FD_COMPOUND_FMT(fd);
884 if (sess->dir == DIR_ENC) {
885 DPAA2_SET_FLE_ADDR(fle,
886 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
887 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
888 sym_op->m_src->data_off);
889 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
890 fle->length = sym_op->auth.data.length;
893 DPAA2_SET_FLE_SG_EXT(fle);
894 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
896 if (likely(bpid < MAX_BPID)) {
897 DPAA2_SET_FLE_BPID(sge, bpid);
898 DPAA2_SET_FLE_BPID(sge + 1, bpid);
900 DPAA2_SET_FLE_IVP(sge);
901 DPAA2_SET_FLE_IVP((sge + 1));
903 DPAA2_SET_FLE_ADDR(sge,
904 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
905 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
906 sym_op->m_src->data_off);
908 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
909 sess->digest_length);
910 sge->length = sym_op->auth.data.length;
912 old_digest = (uint8_t *)(sge + 1);
913 rte_memcpy(old_digest, sym_op->auth.digest.data,
914 sess->digest_length);
915 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
916 sge->length = sess->digest_length;
917 fle->length = sym_op->auth.data.length +
919 DPAA2_SET_FLE_FIN(sge);
921 DPAA2_SET_FLE_FIN(fle);
927 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
928 struct qbman_fd *fd, __rte_unused uint16_t bpid)
930 struct rte_crypto_sym_op *sym_op = op->sym;
931 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
932 struct sec_flow_context *flc;
933 struct ctxt_priv *priv = sess->ctxt;
934 struct rte_mbuf *mbuf;
935 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
938 PMD_INIT_FUNC_TRACE();
941 mbuf = sym_op->m_dst;
943 mbuf = sym_op->m_src;
945 fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
946 RTE_CACHE_LINE_SIZE);
948 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
951 memset(fle, 0, FLE_SG_MEM_SIZE);
952 /* first FLE entry used to store mbuf and session ctxt */
953 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
954 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
960 flc = &priv->flc_desc[0].flc;
963 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
965 sym_op->cipher.data.offset,
966 sym_op->cipher.data.length,
968 sym_op->m_src->data_off);
971 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
972 op_fle->length = sym_op->cipher.data.length;
973 DPAA2_SET_FLE_SG_EXT(op_fle);
976 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
977 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
978 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
984 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
985 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
986 sge->length = mbuf->data_len;
989 DPAA2_SET_FLE_FIN(sge);
992 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
993 flc, fle, fle->addr_hi, fle->addr_lo,
997 mbuf = sym_op->m_src;
999 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1000 ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
1001 DPAA2_SET_FLE_SG_EXT(ip_fle);
1004 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1005 DPAA2_SET_FLE_OFFSET(sge, 0);
1006 sge->length = sess->iv.length;
1011 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1012 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1014 sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1020 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1021 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1022 sge->length = mbuf->data_len;
1025 DPAA2_SET_FLE_FIN(sge);
1026 DPAA2_SET_FLE_FIN(ip_fle);
1029 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1030 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1031 DPAA2_SET_FD_COMPOUND_FMT(fd);
1032 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1035 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1036 " off =%d, len =%d\n",
1037 DPAA2_GET_FD_ADDR(fd),
1038 DPAA2_GET_FD_BPID(fd),
1039 rte_dpaa2_bpid_info[bpid].meta_data_size,
1040 DPAA2_GET_FD_OFFSET(fd),
1041 DPAA2_GET_FD_LEN(fd));
1046 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1047 struct qbman_fd *fd, uint16_t bpid)
1049 struct rte_crypto_sym_op *sym_op = op->sym;
1050 struct qbman_fle *fle, *sge;
1052 struct sec_flow_context *flc;
1053 struct ctxt_priv *priv = sess->ctxt;
1054 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1056 struct rte_mbuf *dst;
1058 PMD_INIT_FUNC_TRACE();
1061 dst = sym_op->m_dst;
1063 dst = sym_op->m_src;
1065 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1067 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1070 memset(fle, 0, FLE_POOL_BUF_SIZE);
1071 /* TODO we are using the first FLE entry to store Mbuf.
1072 * Currently we donot know which FLE has the mbuf stored.
1073 * So while retreiving we can go back 1 FLE from the FD -ADDR
1074 * to get the MBUF Addr from the previous FLE.
1075 * We can have a better approach to use the inline Mbuf
1077 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1078 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1082 if (likely(bpid < MAX_BPID)) {
1083 DPAA2_SET_FD_BPID(fd, bpid);
1084 DPAA2_SET_FLE_BPID(fle, bpid);
1085 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1086 DPAA2_SET_FLE_BPID(sge, bpid);
1087 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1089 DPAA2_SET_FD_IVP(fd);
1090 DPAA2_SET_FLE_IVP(fle);
1091 DPAA2_SET_FLE_IVP((fle + 1));
1092 DPAA2_SET_FLE_IVP(sge);
1093 DPAA2_SET_FLE_IVP((sge + 1));
1096 flc = &priv->flc_desc[0].flc;
1097 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1098 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1100 DPAA2_SET_FD_COMPOUND_FMT(fd);
1101 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1104 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1105 " data_off: 0x%x\n",
1106 sym_op->cipher.data.offset,
1107 sym_op->cipher.data.length,
1109 sym_op->m_src->data_off);
1111 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1112 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1115 fle->length = sym_op->cipher.data.length + sess->iv.length;
1118 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1119 flc, fle, fle->addr_hi, fle->addr_lo,
1124 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1125 fle->length = sym_op->cipher.data.length + sess->iv.length;
1127 DPAA2_SET_FLE_SG_EXT(fle);
1129 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1130 sge->length = sess->iv.length;
1133 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1134 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1135 sym_op->m_src->data_off);
1137 sge->length = sym_op->cipher.data.length;
1138 DPAA2_SET_FLE_FIN(sge);
1139 DPAA2_SET_FLE_FIN(fle);
1142 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1143 " off =%d, len =%d\n",
1144 DPAA2_GET_FD_ADDR(fd),
1145 DPAA2_GET_FD_BPID(fd),
1146 rte_dpaa2_bpid_info[bpid].meta_data_size,
1147 DPAA2_GET_FD_OFFSET(fd),
1148 DPAA2_GET_FD_LEN(fd));
1154 build_sec_fd(struct rte_crypto_op *op,
1155 struct qbman_fd *fd, uint16_t bpid)
1158 dpaa2_sec_session *sess;
1160 PMD_INIT_FUNC_TRACE();
1162 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1163 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1164 op->sym->session, cryptodev_driver_id);
1165 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1166 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1167 op->sym->sec_session);
1171 /* Segmented buffer */
1172 if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1173 switch (sess->ctxt_type) {
1174 case DPAA2_SEC_CIPHER:
1175 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1177 case DPAA2_SEC_AUTH:
1178 ret = build_auth_sg_fd(sess, op, fd, bpid);
1180 case DPAA2_SEC_AEAD:
1181 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1183 case DPAA2_SEC_CIPHER_HASH:
1184 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1186 case DPAA2_SEC_HASH_CIPHER:
1188 DPAA2_SEC_ERR("error: Unsupported session");
1191 switch (sess->ctxt_type) {
1192 case DPAA2_SEC_CIPHER:
1193 ret = build_cipher_fd(sess, op, fd, bpid);
1195 case DPAA2_SEC_AUTH:
1196 ret = build_auth_fd(sess, op, fd, bpid);
1198 case DPAA2_SEC_AEAD:
1199 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1201 case DPAA2_SEC_CIPHER_HASH:
1202 ret = build_authenc_fd(sess, op, fd, bpid);
1204 case DPAA2_SEC_IPSEC:
1205 ret = build_proto_fd(sess, op, fd, bpid);
1207 case DPAA2_SEC_PDCP:
1208 ret = build_proto_compound_fd(sess, op, fd, bpid);
1210 case DPAA2_SEC_HASH_CIPHER:
1212 DPAA2_SEC_ERR("error: Unsupported session");
1219 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1222 /* Function to transmit the frames to given device and VQ*/
1225 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1226 uint32_t frames_to_send;
1227 struct qbman_eq_desc eqdesc;
1228 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1229 struct qbman_swp *swp;
1230 uint16_t num_tx = 0;
1231 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1232 /*todo - need to support multiple buffer pools */
1234 struct rte_mempool *mb_pool;
1236 if (unlikely(nb_ops == 0))
1239 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1240 DPAA2_SEC_ERR("sessionless crypto op not supported");
1243 /*Prepare enqueue descriptor*/
1244 qbman_eq_desc_clear(&eqdesc);
1245 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1246 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1247 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1249 if (!DPAA2_PER_LCORE_DPIO) {
1250 ret = dpaa2_affine_qbman_swp();
1252 DPAA2_SEC_ERR("Failure in affining portal");
1256 swp = DPAA2_PER_LCORE_PORTAL;
1259 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1260 dpaa2_eqcr_size : nb_ops;
1262 for (loop = 0; loop < frames_to_send; loop++) {
1263 if ((*ops)->sym->m_src->seqn) {
1264 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1266 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1267 DPAA2_PER_LCORE_DQRR_SIZE--;
1268 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1269 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1272 /*Clear the unused FD fields before sending*/
1273 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1274 mb_pool = (*ops)->sym->m_src->pool;
1275 bpid = mempool_to_bpid(mb_pool);
1276 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1278 DPAA2_SEC_ERR("error: Improper packet contents"
1279 " for crypto operation");
1285 while (loop < frames_to_send) {
1286 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1289 frames_to_send - loop);
1292 num_tx += frames_to_send;
1293 nb_ops -= frames_to_send;
1296 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1297 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1301 static inline struct rte_crypto_op *
1302 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1304 struct rte_crypto_op *op;
1305 uint16_t len = DPAA2_GET_FD_LEN(fd);
1307 dpaa2_sec_session *sess_priv;
1309 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1310 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1311 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1313 diff = len - mbuf->pkt_len;
1314 mbuf->pkt_len += diff;
1315 mbuf->data_len += diff;
1316 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1317 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1318 op->sym->aead.digest.phys_addr = 0L;
1320 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1321 op->sym->sec_session);
1322 if (sess_priv->dir == DIR_ENC)
1323 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1325 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1330 static inline struct rte_crypto_op *
1331 sec_fd_to_mbuf(const struct qbman_fd *fd)
1333 struct qbman_fle *fle;
1334 struct rte_crypto_op *op;
1335 struct ctxt_priv *priv;
1336 struct rte_mbuf *dst, *src;
1338 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1339 return sec_simple_fd_to_mbuf(fd);
1341 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1343 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1344 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1346 /* we are using the first FLE entry to store Mbuf.
1347 * Currently we donot know which FLE has the mbuf stored.
1348 * So while retreiving we can go back 1 FLE from the FD -ADDR
1349 * to get the MBUF Addr from the previous FLE.
1350 * We can have a better approach to use the inline Mbuf
1353 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1354 /* TODO complete it. */
1355 DPAA2_SEC_ERR("error: non inline buffer");
1358 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1361 src = op->sym->m_src;
1364 if (op->sym->m_dst) {
1365 dst = op->sym->m_dst;
1370 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1371 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1372 get_sec_session_private_data(op->sym->sec_session);
1373 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1374 uint16_t len = DPAA2_GET_FD_LEN(fd);
1376 dst->data_len = len;
1380 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1381 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1384 DPAA2_GET_FD_ADDR(fd),
1385 DPAA2_GET_FD_BPID(fd),
1386 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1387 DPAA2_GET_FD_OFFSET(fd),
1388 DPAA2_GET_FD_LEN(fd));
1390 /* free the fle memory */
1391 if (likely(rte_pktmbuf_is_contiguous(src))) {
1392 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1393 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1395 rte_free((void *)(fle-1));
1401 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1404 /* Function is responsible to receive frames for a given device and VQ*/
1405 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1406 struct qbman_result *dq_storage;
1407 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1408 int ret, num_rx = 0;
1409 uint8_t is_last = 0, status;
1410 struct qbman_swp *swp;
1411 const struct qbman_fd *fd;
1412 struct qbman_pull_desc pulldesc;
1414 if (!DPAA2_PER_LCORE_DPIO) {
1415 ret = dpaa2_affine_qbman_swp();
1417 DPAA2_SEC_ERR("Failure in affining portal");
1421 swp = DPAA2_PER_LCORE_PORTAL;
1422 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1424 qbman_pull_desc_clear(&pulldesc);
1425 qbman_pull_desc_set_numframes(&pulldesc,
1426 (nb_ops > dpaa2_dqrr_size) ?
1427 dpaa2_dqrr_size : nb_ops);
1428 qbman_pull_desc_set_fq(&pulldesc, fqid);
1429 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1430 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1433 /*Issue a volatile dequeue command. */
1435 if (qbman_swp_pull(swp, &pulldesc)) {
1437 "SEC VDQ command is not issued : QBMAN busy");
1438 /* Portal was busy, try again */
1444 /* Receive the packets till Last Dequeue entry is found with
1445 * respect to the above issues PULL command.
1448 /* Check if the previous issued command is completed.
1449 * Also seems like the SWP is shared between the Ethernet Driver
1450 * and the SEC driver.
1452 while (!qbman_check_command_complete(dq_storage))
1455 /* Loop until the dq_storage is updated with
1456 * new token by QBMAN
1458 while (!qbman_check_new_result(dq_storage))
1460 /* Check whether Last Pull command is Expired and
1461 * setting Condition for Loop termination
1463 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1465 /* Check for valid frame. */
1466 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1468 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1469 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1474 fd = qbman_result_DQ_fd(dq_storage);
1475 ops[num_rx] = sec_fd_to_mbuf(fd);
1477 if (unlikely(fd->simple.frc)) {
1478 /* TODO Parse SEC errors */
1479 DPAA2_SEC_ERR("SEC returned Error - %x",
1481 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1483 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1488 } /* End of Packet Rx loop */
1490 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1492 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1493 /*Return the total number of packets received to DPAA2 app*/
1497 /** Release queue pair */
1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1501 struct dpaa2_sec_qp *qp =
1502 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1504 PMD_INIT_FUNC_TRACE();
1506 if (qp->rx_vq.q_storage) {
1507 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1508 rte_free(qp->rx_vq.q_storage);
1512 dev->data->queue_pairs[queue_pair_id] = NULL;
1517 /** Setup a queue pair */
1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1520 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1521 __rte_unused int socket_id)
1523 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1524 struct dpaa2_sec_qp *qp;
1525 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1526 struct dpseci_rx_queue_cfg cfg;
1529 PMD_INIT_FUNC_TRACE();
1531 /* If qp is already in use free ring memory and qp metadata. */
1532 if (dev->data->queue_pairs[qp_id] != NULL) {
1533 DPAA2_SEC_INFO("QP already setup");
1537 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1538 dev, qp_id, qp_conf);
1540 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1542 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1543 RTE_CACHE_LINE_SIZE);
1545 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1549 qp->rx_vq.crypto_data = dev->data;
1550 qp->tx_vq.crypto_data = dev->data;
1551 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1552 sizeof(struct queue_storage_info_t),
1553 RTE_CACHE_LINE_SIZE);
1554 if (!qp->rx_vq.q_storage) {
1555 DPAA2_SEC_ERR("malloc failed for q_storage");
1558 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1560 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1561 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1565 dev->data->queue_pairs[qp_id] = qp;
1567 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1568 cfg.user_ctx = (size_t)(&qp->rx_vq);
1569 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1574 /** Return the number of allocated queue pairs */
1576 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1578 PMD_INIT_FUNC_TRACE();
1580 return dev->data->nb_queue_pairs;
1583 /** Returns the size of the aesni gcm session structure */
1585 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1587 PMD_INIT_FUNC_TRACE();
1589 return sizeof(dpaa2_sec_session);
1593 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1594 struct rte_crypto_sym_xform *xform,
1595 dpaa2_sec_session *session)
1597 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1598 struct alginfo cipherdata;
1600 struct ctxt_priv *priv;
1601 struct sec_flow_context *flc;
1603 PMD_INIT_FUNC_TRACE();
1605 /* For SEC CIPHER only one descriptor is required. */
1606 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1607 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1608 RTE_CACHE_LINE_SIZE);
1610 DPAA2_SEC_ERR("No Memory for priv CTXT");
1614 priv->fle_pool = dev_priv->fle_pool;
1616 flc = &priv->flc_desc[0].flc;
1618 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1619 RTE_CACHE_LINE_SIZE);
1620 if (session->cipher_key.data == NULL) {
1621 DPAA2_SEC_ERR("No Memory for cipher key");
1625 session->cipher_key.length = xform->cipher.key.length;
1627 memcpy(session->cipher_key.data, xform->cipher.key.data,
1628 xform->cipher.key.length);
1629 cipherdata.key = (size_t)session->cipher_key.data;
1630 cipherdata.keylen = session->cipher_key.length;
1631 cipherdata.key_enc_flags = 0;
1632 cipherdata.key_type = RTA_DATA_IMM;
1634 /* Set IV parameters */
1635 session->iv.offset = xform->cipher.iv.offset;
1636 session->iv.length = xform->cipher.iv.length;
1638 switch (xform->cipher.algo) {
1639 case RTE_CRYPTO_CIPHER_AES_CBC:
1640 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1641 cipherdata.algmode = OP_ALG_AAI_CBC;
1642 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1644 case RTE_CRYPTO_CIPHER_3DES_CBC:
1645 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1646 cipherdata.algmode = OP_ALG_AAI_CBC;
1647 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1649 case RTE_CRYPTO_CIPHER_AES_CTR:
1650 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1651 cipherdata.algmode = OP_ALG_AAI_CTR;
1652 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1654 case RTE_CRYPTO_CIPHER_3DES_CTR:
1655 case RTE_CRYPTO_CIPHER_AES_ECB:
1656 case RTE_CRYPTO_CIPHER_3DES_ECB:
1657 case RTE_CRYPTO_CIPHER_AES_XTS:
1658 case RTE_CRYPTO_CIPHER_AES_F8:
1659 case RTE_CRYPTO_CIPHER_ARC4:
1660 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1661 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1662 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1663 case RTE_CRYPTO_CIPHER_NULL:
1664 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1665 xform->cipher.algo);
1668 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1669 xform->cipher.algo);
1672 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1675 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1676 &cipherdata, NULL, session->iv.length,
1679 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1683 flc->word1_sdl = (uint8_t)bufsize;
1684 session->ctxt = priv;
1686 for (i = 0; i < bufsize; i++)
1687 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1692 rte_free(session->cipher_key.data);
1698 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1699 struct rte_crypto_sym_xform *xform,
1700 dpaa2_sec_session *session)
1702 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1703 struct alginfo authdata;
1705 struct ctxt_priv *priv;
1706 struct sec_flow_context *flc;
1708 PMD_INIT_FUNC_TRACE();
1710 /* For SEC AUTH three descriptors are required for various stages */
1711 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1712 sizeof(struct ctxt_priv) + 3 *
1713 sizeof(struct sec_flc_desc),
1714 RTE_CACHE_LINE_SIZE);
1716 DPAA2_SEC_ERR("No Memory for priv CTXT");
1720 priv->fle_pool = dev_priv->fle_pool;
1721 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1723 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1724 RTE_CACHE_LINE_SIZE);
1725 if (session->auth_key.data == NULL) {
1726 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1730 session->auth_key.length = xform->auth.key.length;
1732 memcpy(session->auth_key.data, xform->auth.key.data,
1733 xform->auth.key.length);
1734 authdata.key = (size_t)session->auth_key.data;
1735 authdata.keylen = session->auth_key.length;
1736 authdata.key_enc_flags = 0;
1737 authdata.key_type = RTA_DATA_IMM;
1739 session->digest_length = xform->auth.digest_length;
1741 switch (xform->auth.algo) {
1742 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1743 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1744 authdata.algmode = OP_ALG_AAI_HMAC;
1745 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1747 case RTE_CRYPTO_AUTH_MD5_HMAC:
1748 authdata.algtype = OP_ALG_ALGSEL_MD5;
1749 authdata.algmode = OP_ALG_AAI_HMAC;
1750 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1752 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1753 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1754 authdata.algmode = OP_ALG_AAI_HMAC;
1755 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1757 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1758 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1759 authdata.algmode = OP_ALG_AAI_HMAC;
1760 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1762 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1763 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1764 authdata.algmode = OP_ALG_AAI_HMAC;
1765 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1767 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1768 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1769 authdata.algmode = OP_ALG_AAI_HMAC;
1770 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1772 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1773 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1774 case RTE_CRYPTO_AUTH_NULL:
1775 case RTE_CRYPTO_AUTH_SHA1:
1776 case RTE_CRYPTO_AUTH_SHA256:
1777 case RTE_CRYPTO_AUTH_SHA512:
1778 case RTE_CRYPTO_AUTH_SHA224:
1779 case RTE_CRYPTO_AUTH_SHA384:
1780 case RTE_CRYPTO_AUTH_MD5:
1781 case RTE_CRYPTO_AUTH_AES_GMAC:
1782 case RTE_CRYPTO_AUTH_KASUMI_F9:
1783 case RTE_CRYPTO_AUTH_AES_CMAC:
1784 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1785 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1786 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1790 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1794 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1797 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1798 1, 0, SHR_NEVER, &authdata, !session->dir,
1799 session->digest_length);
1801 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1805 flc->word1_sdl = (uint8_t)bufsize;
1806 session->ctxt = priv;
1807 for (i = 0; i < bufsize; i++)
1808 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1809 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1815 rte_free(session->auth_key.data);
1821 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1822 struct rte_crypto_sym_xform *xform,
1823 dpaa2_sec_session *session)
1825 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1826 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1827 struct alginfo aeaddata;
1829 struct ctxt_priv *priv;
1830 struct sec_flow_context *flc;
1831 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1834 PMD_INIT_FUNC_TRACE();
1836 /* Set IV parameters */
1837 session->iv.offset = aead_xform->iv.offset;
1838 session->iv.length = aead_xform->iv.length;
1839 session->ctxt_type = DPAA2_SEC_AEAD;
1841 /* For SEC AEAD only one descriptor is required */
1842 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1843 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1844 RTE_CACHE_LINE_SIZE);
1846 DPAA2_SEC_ERR("No Memory for priv CTXT");
1850 priv->fle_pool = dev_priv->fle_pool;
1851 flc = &priv->flc_desc[0].flc;
1853 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1854 RTE_CACHE_LINE_SIZE);
1855 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1856 DPAA2_SEC_ERR("No Memory for aead key");
1860 memcpy(session->aead_key.data, aead_xform->key.data,
1861 aead_xform->key.length);
1863 session->digest_length = aead_xform->digest_length;
1864 session->aead_key.length = aead_xform->key.length;
1865 ctxt->auth_only_len = aead_xform->aad_length;
1867 aeaddata.key = (size_t)session->aead_key.data;
1868 aeaddata.keylen = session->aead_key.length;
1869 aeaddata.key_enc_flags = 0;
1870 aeaddata.key_type = RTA_DATA_IMM;
1872 switch (aead_xform->algo) {
1873 case RTE_CRYPTO_AEAD_AES_GCM:
1874 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1875 aeaddata.algmode = OP_ALG_AAI_GCM;
1876 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1878 case RTE_CRYPTO_AEAD_AES_CCM:
1879 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1883 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1887 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1890 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1891 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1893 (unsigned int *)priv->flc_desc[0].desc,
1894 &priv->flc_desc[0].desc[1], 1);
1897 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1900 if (priv->flc_desc[0].desc[1] & 1) {
1901 aeaddata.key_type = RTA_DATA_IMM;
1903 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1904 aeaddata.key_type = RTA_DATA_PTR;
1906 priv->flc_desc[0].desc[0] = 0;
1907 priv->flc_desc[0].desc[1] = 0;
1909 if (session->dir == DIR_ENC)
1910 bufsize = cnstr_shdsc_gcm_encap(
1911 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1912 &aeaddata, session->iv.length,
1913 session->digest_length);
1915 bufsize = cnstr_shdsc_gcm_decap(
1916 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1917 &aeaddata, session->iv.length,
1918 session->digest_length);
1920 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1924 flc->word1_sdl = (uint8_t)bufsize;
1925 session->ctxt = priv;
1926 for (i = 0; i < bufsize; i++)
1927 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1928 i, priv->flc_desc[0].desc[i]);
1933 rte_free(session->aead_key.data);
1940 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1941 struct rte_crypto_sym_xform *xform,
1942 dpaa2_sec_session *session)
1944 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1945 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1946 struct alginfo authdata, cipherdata;
1948 struct ctxt_priv *priv;
1949 struct sec_flow_context *flc;
1950 struct rte_crypto_cipher_xform *cipher_xform;
1951 struct rte_crypto_auth_xform *auth_xform;
1954 PMD_INIT_FUNC_TRACE();
1956 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1957 cipher_xform = &xform->cipher;
1958 auth_xform = &xform->next->auth;
1959 session->ctxt_type =
1960 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1961 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1963 cipher_xform = &xform->next->cipher;
1964 auth_xform = &xform->auth;
1965 session->ctxt_type =
1966 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1967 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1970 /* Set IV parameters */
1971 session->iv.offset = cipher_xform->iv.offset;
1972 session->iv.length = cipher_xform->iv.length;
1974 /* For SEC AEAD only one descriptor is required */
1975 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1976 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1977 RTE_CACHE_LINE_SIZE);
1979 DPAA2_SEC_ERR("No Memory for priv CTXT");
1983 priv->fle_pool = dev_priv->fle_pool;
1984 flc = &priv->flc_desc[0].flc;
1986 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1987 RTE_CACHE_LINE_SIZE);
1988 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1989 DPAA2_SEC_ERR("No Memory for cipher key");
1993 session->cipher_key.length = cipher_xform->key.length;
1994 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1995 RTE_CACHE_LINE_SIZE);
1996 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1997 DPAA2_SEC_ERR("No Memory for auth key");
1998 rte_free(session->cipher_key.data);
2002 session->auth_key.length = auth_xform->key.length;
2003 memcpy(session->cipher_key.data, cipher_xform->key.data,
2004 cipher_xform->key.length);
2005 memcpy(session->auth_key.data, auth_xform->key.data,
2006 auth_xform->key.length);
2008 authdata.key = (size_t)session->auth_key.data;
2009 authdata.keylen = session->auth_key.length;
2010 authdata.key_enc_flags = 0;
2011 authdata.key_type = RTA_DATA_IMM;
2013 session->digest_length = auth_xform->digest_length;
2015 switch (auth_xform->algo) {
2016 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2017 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2018 authdata.algmode = OP_ALG_AAI_HMAC;
2019 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2021 case RTE_CRYPTO_AUTH_MD5_HMAC:
2022 authdata.algtype = OP_ALG_ALGSEL_MD5;
2023 authdata.algmode = OP_ALG_AAI_HMAC;
2024 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2026 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2027 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2028 authdata.algmode = OP_ALG_AAI_HMAC;
2029 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2031 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2032 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2033 authdata.algmode = OP_ALG_AAI_HMAC;
2034 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2036 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2037 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2038 authdata.algmode = OP_ALG_AAI_HMAC;
2039 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2041 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2042 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2043 authdata.algmode = OP_ALG_AAI_HMAC;
2044 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2046 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2047 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2048 case RTE_CRYPTO_AUTH_NULL:
2049 case RTE_CRYPTO_AUTH_SHA1:
2050 case RTE_CRYPTO_AUTH_SHA256:
2051 case RTE_CRYPTO_AUTH_SHA512:
2052 case RTE_CRYPTO_AUTH_SHA224:
2053 case RTE_CRYPTO_AUTH_SHA384:
2054 case RTE_CRYPTO_AUTH_MD5:
2055 case RTE_CRYPTO_AUTH_AES_GMAC:
2056 case RTE_CRYPTO_AUTH_KASUMI_F9:
2057 case RTE_CRYPTO_AUTH_AES_CMAC:
2058 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2059 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2060 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2064 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2068 cipherdata.key = (size_t)session->cipher_key.data;
2069 cipherdata.keylen = session->cipher_key.length;
2070 cipherdata.key_enc_flags = 0;
2071 cipherdata.key_type = RTA_DATA_IMM;
2073 switch (cipher_xform->algo) {
2074 case RTE_CRYPTO_CIPHER_AES_CBC:
2075 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2076 cipherdata.algmode = OP_ALG_AAI_CBC;
2077 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2079 case RTE_CRYPTO_CIPHER_3DES_CBC:
2080 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2081 cipherdata.algmode = OP_ALG_AAI_CBC;
2082 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2084 case RTE_CRYPTO_CIPHER_AES_CTR:
2085 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2086 cipherdata.algmode = OP_ALG_AAI_CTR;
2087 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2089 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2090 case RTE_CRYPTO_CIPHER_NULL:
2091 case RTE_CRYPTO_CIPHER_3DES_ECB:
2092 case RTE_CRYPTO_CIPHER_AES_ECB:
2093 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2094 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2095 cipher_xform->algo);
2098 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2099 cipher_xform->algo);
2102 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2105 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2106 priv->flc_desc[0].desc[1] = authdata.keylen;
2107 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2109 (unsigned int *)priv->flc_desc[0].desc,
2110 &priv->flc_desc[0].desc[2], 2);
2113 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2116 if (priv->flc_desc[0].desc[2] & 1) {
2117 cipherdata.key_type = RTA_DATA_IMM;
2119 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2120 cipherdata.key_type = RTA_DATA_PTR;
2122 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2123 authdata.key_type = RTA_DATA_IMM;
2125 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2126 authdata.key_type = RTA_DATA_PTR;
2128 priv->flc_desc[0].desc[0] = 0;
2129 priv->flc_desc[0].desc[1] = 0;
2130 priv->flc_desc[0].desc[2] = 0;
2132 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2133 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2135 &cipherdata, &authdata,
2137 ctxt->auth_only_len,
2138 session->digest_length,
2141 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2145 DPAA2_SEC_ERR("Hash before cipher not supported");
2149 flc->word1_sdl = (uint8_t)bufsize;
2150 session->ctxt = priv;
2151 for (i = 0; i < bufsize; i++)
2152 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2153 i, priv->flc_desc[0].desc[i]);
2158 rte_free(session->cipher_key.data);
2159 rte_free(session->auth_key.data);
2165 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2166 struct rte_crypto_sym_xform *xform, void *sess)
2168 dpaa2_sec_session *session = sess;
2171 PMD_INIT_FUNC_TRACE();
2173 if (unlikely(sess == NULL)) {
2174 DPAA2_SEC_ERR("Invalid session struct");
2178 memset(session, 0, sizeof(dpaa2_sec_session));
2179 /* Default IV length = 0 */
2180 session->iv.length = 0;
2183 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2184 session->ctxt_type = DPAA2_SEC_CIPHER;
2185 ret = dpaa2_sec_cipher_init(dev, xform, session);
2187 /* Authentication Only */
2188 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2189 xform->next == NULL) {
2190 session->ctxt_type = DPAA2_SEC_AUTH;
2191 ret = dpaa2_sec_auth_init(dev, xform, session);
2193 /* Cipher then Authenticate */
2194 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2195 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2196 session->ext_params.aead_ctxt.auth_cipher_text = true;
2197 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2199 /* Authenticate then Cipher */
2200 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2201 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2202 session->ext_params.aead_ctxt.auth_cipher_text = false;
2203 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2205 /* AEAD operation for AES-GCM kind of Algorithms */
2206 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2207 xform->next == NULL) {
2208 ret = dpaa2_sec_aead_init(dev, xform, session);
2211 DPAA2_SEC_ERR("Invalid crypto type");
2219 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2220 dpaa2_sec_session *session,
2221 struct alginfo *aeaddata)
2223 PMD_INIT_FUNC_TRACE();
2225 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2226 RTE_CACHE_LINE_SIZE);
2227 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2228 DPAA2_SEC_ERR("No Memory for aead key");
2231 memcpy(session->aead_key.data, aead_xform->key.data,
2232 aead_xform->key.length);
2234 session->digest_length = aead_xform->digest_length;
2235 session->aead_key.length = aead_xform->key.length;
2237 aeaddata->key = (size_t)session->aead_key.data;
2238 aeaddata->keylen = session->aead_key.length;
2239 aeaddata->key_enc_flags = 0;
2240 aeaddata->key_type = RTA_DATA_IMM;
2242 switch (aead_xform->algo) {
2243 case RTE_CRYPTO_AEAD_AES_GCM:
2244 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2245 aeaddata->algmode = OP_ALG_AAI_GCM;
2246 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2248 case RTE_CRYPTO_AEAD_AES_CCM:
2249 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2250 aeaddata->algmode = OP_ALG_AAI_CCM;
2251 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2254 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2258 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2265 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2266 struct rte_crypto_auth_xform *auth_xform,
2267 dpaa2_sec_session *session,
2268 struct alginfo *cipherdata,
2269 struct alginfo *authdata)
2272 session->cipher_key.data = rte_zmalloc(NULL,
2273 cipher_xform->key.length,
2274 RTE_CACHE_LINE_SIZE);
2275 if (session->cipher_key.data == NULL &&
2276 cipher_xform->key.length > 0) {
2277 DPAA2_SEC_ERR("No Memory for cipher key");
2281 session->cipher_key.length = cipher_xform->key.length;
2282 memcpy(session->cipher_key.data, cipher_xform->key.data,
2283 cipher_xform->key.length);
2284 session->cipher_alg = cipher_xform->algo;
2286 session->cipher_key.data = NULL;
2287 session->cipher_key.length = 0;
2288 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2292 session->auth_key.data = rte_zmalloc(NULL,
2293 auth_xform->key.length,
2294 RTE_CACHE_LINE_SIZE);
2295 if (session->auth_key.data == NULL &&
2296 auth_xform->key.length > 0) {
2297 DPAA2_SEC_ERR("No Memory for auth key");
2300 session->auth_key.length = auth_xform->key.length;
2301 memcpy(session->auth_key.data, auth_xform->key.data,
2302 auth_xform->key.length);
2303 session->auth_alg = auth_xform->algo;
2305 session->auth_key.data = NULL;
2306 session->auth_key.length = 0;
2307 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2310 authdata->key = (size_t)session->auth_key.data;
2311 authdata->keylen = session->auth_key.length;
2312 authdata->key_enc_flags = 0;
2313 authdata->key_type = RTA_DATA_IMM;
2314 switch (session->auth_alg) {
2315 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2316 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2317 authdata->algmode = OP_ALG_AAI_HMAC;
2319 case RTE_CRYPTO_AUTH_MD5_HMAC:
2320 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2321 authdata->algmode = OP_ALG_AAI_HMAC;
2323 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2324 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2325 authdata->algmode = OP_ALG_AAI_HMAC;
2327 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2328 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2329 authdata->algmode = OP_ALG_AAI_HMAC;
2331 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2332 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2333 authdata->algmode = OP_ALG_AAI_HMAC;
2335 case RTE_CRYPTO_AUTH_AES_CMAC:
2336 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2338 case RTE_CRYPTO_AUTH_NULL:
2339 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2341 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2342 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2343 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2344 case RTE_CRYPTO_AUTH_SHA1:
2345 case RTE_CRYPTO_AUTH_SHA256:
2346 case RTE_CRYPTO_AUTH_SHA512:
2347 case RTE_CRYPTO_AUTH_SHA224:
2348 case RTE_CRYPTO_AUTH_SHA384:
2349 case RTE_CRYPTO_AUTH_MD5:
2350 case RTE_CRYPTO_AUTH_AES_GMAC:
2351 case RTE_CRYPTO_AUTH_KASUMI_F9:
2352 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2353 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2354 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2358 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2362 cipherdata->key = (size_t)session->cipher_key.data;
2363 cipherdata->keylen = session->cipher_key.length;
2364 cipherdata->key_enc_flags = 0;
2365 cipherdata->key_type = RTA_DATA_IMM;
2367 switch (session->cipher_alg) {
2368 case RTE_CRYPTO_CIPHER_AES_CBC:
2369 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2370 cipherdata->algmode = OP_ALG_AAI_CBC;
2372 case RTE_CRYPTO_CIPHER_3DES_CBC:
2373 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2374 cipherdata->algmode = OP_ALG_AAI_CBC;
2376 case RTE_CRYPTO_CIPHER_AES_CTR:
2377 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2378 cipherdata->algmode = OP_ALG_AAI_CTR;
2380 case RTE_CRYPTO_CIPHER_NULL:
2381 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2383 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2384 case RTE_CRYPTO_CIPHER_3DES_ECB:
2385 case RTE_CRYPTO_CIPHER_AES_ECB:
2386 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2387 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2388 session->cipher_alg);
2391 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2392 session->cipher_alg);
2399 #ifdef RTE_LIBRTE_SECURITY_TEST
2400 static uint8_t aes_cbc_iv[] = {
2401 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2402 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2406 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2407 struct rte_security_session_conf *conf,
2410 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2411 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2412 struct rte_crypto_auth_xform *auth_xform = NULL;
2413 struct rte_crypto_aead_xform *aead_xform = NULL;
2414 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2415 struct ctxt_priv *priv;
2416 struct ipsec_encap_pdb encap_pdb;
2417 struct ipsec_decap_pdb decap_pdb;
2418 struct alginfo authdata, cipherdata;
2420 struct sec_flow_context *flc;
2421 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2424 PMD_INIT_FUNC_TRACE();
2426 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2427 sizeof(struct ctxt_priv) +
2428 sizeof(struct sec_flc_desc),
2429 RTE_CACHE_LINE_SIZE);
2432 DPAA2_SEC_ERR("No memory for priv CTXT");
2436 priv->fle_pool = dev_priv->fle_pool;
2437 flc = &priv->flc_desc[0].flc;
2439 memset(session, 0, sizeof(dpaa2_sec_session));
2441 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2442 cipher_xform = &conf->crypto_xform->cipher;
2443 if (conf->crypto_xform->next)
2444 auth_xform = &conf->crypto_xform->next->auth;
2445 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2446 session, &cipherdata, &authdata);
2447 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2448 auth_xform = &conf->crypto_xform->auth;
2449 if (conf->crypto_xform->next)
2450 cipher_xform = &conf->crypto_xform->next->cipher;
2451 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2452 session, &cipherdata, &authdata);
2453 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2454 aead_xform = &conf->crypto_xform->aead;
2455 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2456 session, &cipherdata);
2458 DPAA2_SEC_ERR("XFORM not specified");
2463 DPAA2_SEC_ERR("Failed to process xform");
2467 session->ctxt_type = DPAA2_SEC_IPSEC;
2468 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2469 uint8_t *hdr = NULL;
2471 struct rte_ipv6_hdr ip6_hdr;
2473 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2474 /* For Sec Proto only one descriptor is required. */
2475 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2476 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2477 PDBOPTS_ESP_OIHI_PDB_INL |
2479 PDBHMO_ESP_ENCAP_DTTL |
2481 if (ipsec_xform->options.esn)
2482 encap_pdb.options |= PDBOPTS_ESP_ESN;
2483 encap_pdb.spi = ipsec_xform->spi;
2484 session->dir = DIR_ENC;
2485 if (ipsec_xform->tunnel.type ==
2486 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2487 encap_pdb.ip_hdr_len = sizeof(struct ip);
2488 ip4_hdr.ip_v = IPVERSION;
2490 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2491 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2494 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2495 ip4_hdr.ip_p = IPPROTO_ESP;
2497 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2498 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2499 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2500 &ip4_hdr, sizeof(struct ip));
2501 hdr = (uint8_t *)&ip4_hdr;
2502 } else if (ipsec_xform->tunnel.type ==
2503 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2504 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2505 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2506 ((ipsec_xform->tunnel.ipv6.dscp <<
2507 RTE_IPV6_HDR_TC_SHIFT) &
2508 RTE_IPV6_HDR_TC_MASK) |
2509 ((ipsec_xform->tunnel.ipv6.flabel <<
2510 RTE_IPV6_HDR_FL_SHIFT) &
2511 RTE_IPV6_HDR_FL_MASK));
2512 /* Payload length will be updated by HW */
2513 ip6_hdr.payload_len = 0;
2514 ip6_hdr.hop_limits =
2515 ipsec_xform->tunnel.ipv6.hlimit;
2516 ip6_hdr.proto = (ipsec_xform->proto ==
2517 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2518 IPPROTO_ESP : IPPROTO_AH;
2519 memcpy(&ip6_hdr.src_addr,
2520 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2521 memcpy(&ip6_hdr.dst_addr,
2522 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2523 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2524 hdr = (uint8_t *)&ip6_hdr;
2527 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2528 1, 0, SHR_SERIAL, &encap_pdb,
2529 hdr, &cipherdata, &authdata);
2530 } else if (ipsec_xform->direction ==
2531 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2532 flc->dhr = SEC_FLC_DHR_INBOUND;
2533 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2534 decap_pdb.options = sizeof(struct ip) << 16;
2535 if (ipsec_xform->options.esn)
2536 decap_pdb.options |= PDBOPTS_ESP_ESN;
2537 decap_pdb.options = (ipsec_xform->tunnel.type ==
2538 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2539 sizeof(struct ip) << 16 :
2540 sizeof(struct rte_ipv6_hdr) << 16;
2541 session->dir = DIR_DEC;
2542 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2544 &decap_pdb, &cipherdata, &authdata);
2549 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2553 flc->word1_sdl = (uint8_t)bufsize;
2555 /* Enable the stashing control bit */
2556 DPAA2_SET_FLC_RSC(flc);
2557 flc->word2_rflc_31_0 = lower_32_bits(
2558 (size_t)&(((struct dpaa2_sec_qp *)
2559 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2560 flc->word3_rflc_63_32 = upper_32_bits(
2561 (size_t)&(((struct dpaa2_sec_qp *)
2562 dev->data->queue_pairs[0])->rx_vq));
2564 /* Set EWS bit i.e. enable write-safe */
2565 DPAA2_SET_FLC_EWS(flc);
2566 /* Set BS = 1 i.e reuse input buffers as output buffers */
2567 DPAA2_SET_FLC_REUSE_BS(flc);
2568 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2569 DPAA2_SET_FLC_REUSE_FF(flc);
2571 session->ctxt = priv;
2575 rte_free(session->auth_key.data);
2576 rte_free(session->cipher_key.data);
2582 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2583 struct rte_security_session_conf *conf,
2586 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2587 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2588 struct rte_crypto_auth_xform *auth_xform = NULL;
2589 struct rte_crypto_cipher_xform *cipher_xform;
2590 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2591 struct ctxt_priv *priv;
2592 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2593 struct alginfo authdata, cipherdata;
2594 struct alginfo *p_authdata = NULL;
2596 struct sec_flow_context *flc;
2597 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2603 PMD_INIT_FUNC_TRACE();
2605 memset(session, 0, sizeof(dpaa2_sec_session));
2607 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2608 sizeof(struct ctxt_priv) +
2609 sizeof(struct sec_flc_desc),
2610 RTE_CACHE_LINE_SIZE);
2613 DPAA2_SEC_ERR("No memory for priv CTXT");
2617 priv->fle_pool = dev_priv->fle_pool;
2618 flc = &priv->flc_desc[0].flc;
2620 /* find xfrm types */
2621 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2622 cipher_xform = &xform->cipher;
2623 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2624 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2625 session->ext_params.aead_ctxt.auth_cipher_text = true;
2626 cipher_xform = &xform->cipher;
2627 auth_xform = &xform->next->auth;
2628 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2629 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2630 session->ext_params.aead_ctxt.auth_cipher_text = false;
2631 cipher_xform = &xform->next->cipher;
2632 auth_xform = &xform->auth;
2634 DPAA2_SEC_ERR("Invalid crypto type");
2638 session->ctxt_type = DPAA2_SEC_PDCP;
2640 session->cipher_key.data = rte_zmalloc(NULL,
2641 cipher_xform->key.length,
2642 RTE_CACHE_LINE_SIZE);
2643 if (session->cipher_key.data == NULL &&
2644 cipher_xform->key.length > 0) {
2645 DPAA2_SEC_ERR("No Memory for cipher key");
2649 session->cipher_key.length = cipher_xform->key.length;
2650 memcpy(session->cipher_key.data, cipher_xform->key.data,
2651 cipher_xform->key.length);
2653 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2655 session->cipher_alg = cipher_xform->algo;
2657 session->cipher_key.data = NULL;
2658 session->cipher_key.length = 0;
2659 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2660 session->dir = DIR_ENC;
2663 session->pdcp.domain = pdcp_xform->domain;
2664 session->pdcp.bearer = pdcp_xform->bearer;
2665 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2666 session->pdcp.sn_size = pdcp_xform->sn_size;
2667 #ifdef ENABLE_HFN_OVERRIDE
2668 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2670 session->pdcp.hfn = pdcp_xform->hfn;
2671 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2673 cipherdata.key = (size_t)session->cipher_key.data;
2674 cipherdata.keylen = session->cipher_key.length;
2675 cipherdata.key_enc_flags = 0;
2676 cipherdata.key_type = RTA_DATA_IMM;
2678 switch (session->cipher_alg) {
2679 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2680 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2682 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2683 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2685 case RTE_CRYPTO_CIPHER_AES_CTR:
2686 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2688 case RTE_CRYPTO_CIPHER_NULL:
2689 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2692 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2693 session->cipher_alg);
2698 session->auth_key.data = rte_zmalloc(NULL,
2699 auth_xform->key.length,
2700 RTE_CACHE_LINE_SIZE);
2701 if (!session->auth_key.data &&
2702 auth_xform->key.length > 0) {
2703 DPAA2_SEC_ERR("No Memory for auth key");
2704 rte_free(session->cipher_key.data);
2708 session->auth_key.length = auth_xform->key.length;
2709 memcpy(session->auth_key.data, auth_xform->key.data,
2710 auth_xform->key.length);
2711 session->auth_alg = auth_xform->algo;
2713 session->auth_key.data = NULL;
2714 session->auth_key.length = 0;
2715 session->auth_alg = 0;
2717 authdata.key = (size_t)session->auth_key.data;
2718 authdata.keylen = session->auth_key.length;
2719 authdata.key_enc_flags = 0;
2720 authdata.key_type = RTA_DATA_IMM;
2722 if (session->auth_alg) {
2723 switch (session->auth_alg) {
2724 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2725 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2727 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2728 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2730 case RTE_CRYPTO_AUTH_AES_CMAC:
2731 authdata.algtype = PDCP_AUTH_TYPE_AES;
2733 case RTE_CRYPTO_AUTH_NULL:
2734 authdata.algtype = PDCP_AUTH_TYPE_NULL;
2737 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2742 p_authdata = &authdata;
2743 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2744 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
2748 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2749 if (session->dir == DIR_ENC)
2750 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2751 priv->flc_desc[0].desc, 1, swap,
2753 session->pdcp.sn_size,
2755 pdcp_xform->pkt_dir,
2756 pdcp_xform->hfn_threshold,
2757 &cipherdata, &authdata,
2759 else if (session->dir == DIR_DEC)
2760 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2761 priv->flc_desc[0].desc, 1, swap,
2763 session->pdcp.sn_size,
2765 pdcp_xform->pkt_dir,
2766 pdcp_xform->hfn_threshold,
2767 &cipherdata, &authdata,
2770 if (session->dir == DIR_ENC)
2771 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2772 priv->flc_desc[0].desc, 1, swap,
2773 session->pdcp.sn_size,
2776 pdcp_xform->pkt_dir,
2777 pdcp_xform->hfn_threshold,
2778 &cipherdata, p_authdata, 0);
2779 else if (session->dir == DIR_DEC)
2780 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2781 priv->flc_desc[0].desc, 1, swap,
2782 session->pdcp.sn_size,
2785 pdcp_xform->pkt_dir,
2786 pdcp_xform->hfn_threshold,
2787 &cipherdata, p_authdata, 0);
2791 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2795 /* Enable the stashing control bit */
2796 DPAA2_SET_FLC_RSC(flc);
2797 flc->word2_rflc_31_0 = lower_32_bits(
2798 (size_t)&(((struct dpaa2_sec_qp *)
2799 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2800 flc->word3_rflc_63_32 = upper_32_bits(
2801 (size_t)&(((struct dpaa2_sec_qp *)
2802 dev->data->queue_pairs[0])->rx_vq));
2804 flc->word1_sdl = (uint8_t)bufsize;
2806 /* Set EWS bit i.e. enable write-safe */
2807 DPAA2_SET_FLC_EWS(flc);
2808 /* Set BS = 1 i.e reuse input buffers as output buffers */
2809 DPAA2_SET_FLC_REUSE_BS(flc);
2810 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2811 DPAA2_SET_FLC_REUSE_FF(flc);
2813 session->ctxt = priv;
2817 rte_free(session->auth_key.data);
2818 rte_free(session->cipher_key.data);
2824 dpaa2_sec_security_session_create(void *dev,
2825 struct rte_security_session_conf *conf,
2826 struct rte_security_session *sess,
2827 struct rte_mempool *mempool)
2829 void *sess_private_data;
2830 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2833 if (rte_mempool_get(mempool, &sess_private_data)) {
2834 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2838 switch (conf->protocol) {
2839 case RTE_SECURITY_PROTOCOL_IPSEC:
2840 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2843 case RTE_SECURITY_PROTOCOL_MACSEC:
2845 case RTE_SECURITY_PROTOCOL_PDCP:
2846 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2853 DPAA2_SEC_ERR("Failed to configure session parameters");
2854 /* Return session to mempool */
2855 rte_mempool_put(mempool, sess_private_data);
2859 set_sec_session_private_data(sess, sess_private_data);
2864 /** Clear the memory of session so it doesn't leave key material behind */
2866 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2867 struct rte_security_session *sess)
2869 PMD_INIT_FUNC_TRACE();
2870 void *sess_priv = get_sec_session_private_data(sess);
2872 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2875 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2878 rte_free(s->cipher_key.data);
2879 rte_free(s->auth_key.data);
2880 memset(s, 0, sizeof(dpaa2_sec_session));
2881 set_sec_session_private_data(sess, NULL);
2882 rte_mempool_put(sess_mp, sess_priv);
2888 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2889 struct rte_crypto_sym_xform *xform,
2890 struct rte_cryptodev_sym_session *sess,
2891 struct rte_mempool *mempool)
2893 void *sess_private_data;
2896 if (rte_mempool_get(mempool, &sess_private_data)) {
2897 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2901 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2903 DPAA2_SEC_ERR("Failed to configure session parameters");
2904 /* Return session to mempool */
2905 rte_mempool_put(mempool, sess_private_data);
2909 set_sym_session_private_data(sess, dev->driver_id,
2915 /** Clear the memory of session so it doesn't leave key material behind */
2917 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2918 struct rte_cryptodev_sym_session *sess)
2920 PMD_INIT_FUNC_TRACE();
2921 uint8_t index = dev->driver_id;
2922 void *sess_priv = get_sym_session_private_data(sess, index);
2923 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2927 rte_free(s->cipher_key.data);
2928 rte_free(s->auth_key.data);
2929 memset(s, 0, sizeof(dpaa2_sec_session));
2930 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2931 set_sym_session_private_data(sess, index, NULL);
2932 rte_mempool_put(sess_mp, sess_priv);
2937 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2938 struct rte_cryptodev_config *config __rte_unused)
2940 PMD_INIT_FUNC_TRACE();
2946 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2948 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2949 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2950 struct dpseci_attr attr;
2951 struct dpaa2_queue *dpaa2_q;
2952 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2953 dev->data->queue_pairs;
2954 struct dpseci_rx_queue_attr rx_attr;
2955 struct dpseci_tx_queue_attr tx_attr;
2958 PMD_INIT_FUNC_TRACE();
2960 memset(&attr, 0, sizeof(struct dpseci_attr));
2962 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2964 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2966 goto get_attr_failure;
2968 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2970 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2971 goto get_attr_failure;
2973 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2974 dpaa2_q = &qp[i]->rx_vq;
2975 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2977 dpaa2_q->fqid = rx_attr.fqid;
2978 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2980 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2981 dpaa2_q = &qp[i]->tx_vq;
2982 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2984 dpaa2_q->fqid = tx_attr.fqid;
2985 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2990 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2995 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2997 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2998 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3001 PMD_INIT_FUNC_TRACE();
3003 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3005 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3010 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3012 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3018 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3020 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3021 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3024 PMD_INIT_FUNC_TRACE();
3026 /* Function is reverse of dpaa2_sec_dev_init.
3027 * It does the following:
3028 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3029 * 2. Close the DPSECI device
3030 * 3. Free the allocated resources.
3033 /*Close the device at underlying layer*/
3034 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3036 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3040 /*Free the allocated memory for ethernet private data and dpseci*/
3048 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3049 struct rte_cryptodev_info *info)
3051 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3053 PMD_INIT_FUNC_TRACE();
3055 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3056 info->feature_flags = dev->feature_flags;
3057 info->capabilities = dpaa2_sec_capabilities;
3058 /* No limit of number of sessions */
3059 info->sym.max_nb_sessions = 0;
3060 info->driver_id = cryptodev_driver_id;
3065 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3066 struct rte_cryptodev_stats *stats)
3068 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3069 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3070 struct dpseci_sec_counters counters = {0};
3071 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3072 dev->data->queue_pairs;
3075 PMD_INIT_FUNC_TRACE();
3076 if (stats == NULL) {
3077 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3080 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3081 if (qp[i] == NULL) {
3082 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3086 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3087 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3088 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3089 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3092 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3095 DPAA2_SEC_ERR("SEC counters failed");
3097 DPAA2_SEC_INFO("dpseci hardware stats:"
3098 "\n\tNum of Requests Dequeued = %" PRIu64
3099 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3100 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3101 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3102 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3103 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3104 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3105 counters.dequeued_requests,
3106 counters.ob_enc_requests,
3107 counters.ib_dec_requests,
3108 counters.ob_enc_bytes,
3109 counters.ob_prot_bytes,
3110 counters.ib_dec_bytes,
3111 counters.ib_valid_bytes);
3116 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3119 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3120 (dev->data->queue_pairs);
3122 PMD_INIT_FUNC_TRACE();
3124 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3125 if (qp[i] == NULL) {
3126 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3129 qp[i]->tx_vq.rx_pkts = 0;
3130 qp[i]->tx_vq.tx_pkts = 0;
3131 qp[i]->tx_vq.err_pkts = 0;
3132 qp[i]->rx_vq.rx_pkts = 0;
3133 qp[i]->rx_vq.tx_pkts = 0;
3134 qp[i]->rx_vq.err_pkts = 0;
3138 static void __attribute__((hot))
3139 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3140 const struct qbman_fd *fd,
3141 const struct qbman_result *dq,
3142 struct dpaa2_queue *rxq,
3143 struct rte_event *ev)
3145 /* Prefetching mbuf */
3146 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3147 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3149 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3150 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3152 ev->flow_id = rxq->ev.flow_id;
3153 ev->sub_event_type = rxq->ev.sub_event_type;
3154 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3155 ev->op = RTE_EVENT_OP_NEW;
3156 ev->sched_type = rxq->ev.sched_type;
3157 ev->queue_id = rxq->ev.queue_id;
3158 ev->priority = rxq->ev.priority;
3159 ev->event_ptr = sec_fd_to_mbuf(fd);
3161 qbman_swp_dqrr_consume(swp, dq);
3164 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3165 const struct qbman_fd *fd,
3166 const struct qbman_result *dq,
3167 struct dpaa2_queue *rxq,
3168 struct rte_event *ev)
3171 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3172 /* Prefetching mbuf */
3173 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3174 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3176 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3177 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3179 ev->flow_id = rxq->ev.flow_id;
3180 ev->sub_event_type = rxq->ev.sub_event_type;
3181 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3182 ev->op = RTE_EVENT_OP_NEW;
3183 ev->sched_type = rxq->ev.sched_type;
3184 ev->queue_id = rxq->ev.queue_id;
3185 ev->priority = rxq->ev.priority;
3187 ev->event_ptr = sec_fd_to_mbuf(fd);
3188 dqrr_index = qbman_get_dqrr_idx(dq);
3189 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3190 DPAA2_PER_LCORE_DQRR_SIZE++;
3191 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3192 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3196 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3199 const struct rte_event *event)
3201 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3202 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3203 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3204 struct dpseci_rx_queue_cfg cfg;
3207 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3208 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3209 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3210 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3214 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3215 cfg.options = DPSECI_QUEUE_OPT_DEST;
3216 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3217 cfg.dest_cfg.dest_id = dpcon_id;
3218 cfg.dest_cfg.priority = event->priority;
3220 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3221 cfg.user_ctx = (size_t)(qp);
3222 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3223 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3224 cfg.order_preservation_en = 1;
3226 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3229 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3233 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3239 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3242 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3243 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3244 struct dpseci_rx_queue_cfg cfg;
3247 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3248 cfg.options = DPSECI_QUEUE_OPT_DEST;
3249 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3251 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3254 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3259 static struct rte_cryptodev_ops crypto_ops = {
3260 .dev_configure = dpaa2_sec_dev_configure,
3261 .dev_start = dpaa2_sec_dev_start,
3262 .dev_stop = dpaa2_sec_dev_stop,
3263 .dev_close = dpaa2_sec_dev_close,
3264 .dev_infos_get = dpaa2_sec_dev_infos_get,
3265 .stats_get = dpaa2_sec_stats_get,
3266 .stats_reset = dpaa2_sec_stats_reset,
3267 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3268 .queue_pair_release = dpaa2_sec_queue_pair_release,
3269 .queue_pair_count = dpaa2_sec_queue_pair_count,
3270 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3271 .sym_session_configure = dpaa2_sec_sym_session_configure,
3272 .sym_session_clear = dpaa2_sec_sym_session_clear,
3275 static const struct rte_security_capability *
3276 dpaa2_sec_capabilities_get(void *device __rte_unused)
3278 return dpaa2_sec_security_cap;
3281 static const struct rte_security_ops dpaa2_sec_security_ops = {
3282 .session_create = dpaa2_sec_security_session_create,
3283 .session_update = NULL,
3284 .session_stats_get = NULL,
3285 .session_destroy = dpaa2_sec_security_session_destroy,
3286 .set_pkt_metadata = NULL,
3287 .capabilities_get = dpaa2_sec_capabilities_get
3291 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3293 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3295 rte_free(dev->security_ctx);
3297 rte_mempool_free(internals->fle_pool);
3299 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3300 dev->data->name, rte_socket_id());
3306 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3308 struct dpaa2_sec_dev_private *internals;
3309 struct rte_device *dev = cryptodev->device;
3310 struct rte_dpaa2_device *dpaa2_dev;
3311 struct rte_security_ctx *security_instance;
3312 struct fsl_mc_io *dpseci;
3314 struct dpseci_attr attr;
3318 PMD_INIT_FUNC_TRACE();
3319 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3320 if (dpaa2_dev == NULL) {
3321 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3324 hw_id = dpaa2_dev->object_id;
3326 cryptodev->driver_id = cryptodev_driver_id;
3327 cryptodev->dev_ops = &crypto_ops;
3329 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3330 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3331 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3332 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3333 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3334 RTE_CRYPTODEV_FF_SECURITY |
3335 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3336 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3337 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3338 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3339 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3341 internals = cryptodev->data->dev_private;
3344 * For secondary processes, we don't initialise any further as primary
3345 * has already done this work. Only check we don't need a different
3348 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3349 DPAA2_SEC_DEBUG("Device already init by primary process");
3353 /* Initialize security_ctx only for primary process*/
3354 security_instance = rte_malloc("rte_security_instances_ops",
3355 sizeof(struct rte_security_ctx), 0);
3356 if (security_instance == NULL)
3358 security_instance->device = (void *)cryptodev;
3359 security_instance->ops = &dpaa2_sec_security_ops;
3360 security_instance->sess_cnt = 0;
3361 cryptodev->security_ctx = security_instance;
3363 /*Open the rte device via MC and save the handle for further use*/
3364 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3365 sizeof(struct fsl_mc_io), 0);
3368 "Error in allocating the memory for dpsec object");
3371 dpseci->regs = rte_mcp_ptr_list[0];
3373 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3375 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3379 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3382 "Cannot get dpsec device attributed: Error = %x",
3386 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3389 internals->max_nb_queue_pairs = attr.num_tx_queues;
3390 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3391 internals->hw = dpseci;
3392 internals->token = token;
3394 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3395 getpid(), cryptodev->data->dev_id);
3396 internals->fle_pool = rte_mempool_create((const char *)str,
3399 FLE_POOL_CACHE_SIZE, 0,
3400 NULL, NULL, NULL, NULL,
3402 if (!internals->fle_pool) {
3403 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3407 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3411 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3413 /* dpaa2_sec_uninit(crypto_dev_name); */
3418 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3419 struct rte_dpaa2_device *dpaa2_dev)
3421 struct rte_cryptodev *cryptodev;
3422 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3426 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3427 dpaa2_dev->object_id);
3429 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3430 if (cryptodev == NULL)
3433 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3434 cryptodev->data->dev_private = rte_zmalloc_socket(
3435 "cryptodev private structure",
3436 sizeof(struct dpaa2_sec_dev_private),
3437 RTE_CACHE_LINE_SIZE,
3440 if (cryptodev->data->dev_private == NULL)
3441 rte_panic("Cannot allocate memzone for private "
3445 dpaa2_dev->cryptodev = cryptodev;
3446 cryptodev->device = &dpaa2_dev->device;
3448 /* init user callbacks */
3449 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3451 /* Invoke PMD device initialization function */
3452 retval = dpaa2_sec_dev_init(cryptodev);
3456 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3457 rte_free(cryptodev->data->dev_private);
3459 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3465 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3467 struct rte_cryptodev *cryptodev;
3470 cryptodev = dpaa2_dev->cryptodev;
3471 if (cryptodev == NULL)
3474 ret = dpaa2_sec_uninit(cryptodev);
3478 return rte_cryptodev_pmd_destroy(cryptodev);
3481 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3482 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3483 .drv_type = DPAA2_CRYPTO,
3485 .name = "DPAA2 SEC PMD"
3487 .probe = cryptodev_dpaa2_sec_probe,
3488 .remove = cryptodev_dpaa2_sec_remove,
3491 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3493 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3494 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3495 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3497 RTE_INIT(dpaa2_sec_init_log)
3499 /* Bus level logs */
3500 dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3501 if (dpaa2_logtype_sec >= 0)
3502 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);