1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/algo.h>
41 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
42 * a pointer to the shared descriptor
44 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
45 #define FSL_VENDOR_ID 0x1957
46 #define FSL_DEVICE_ID 0x410
47 #define FSL_SUBSYSTEM_SEC 1
48 #define FSL_MC_DPSECI_DEVID 3
51 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
52 #define FLE_POOL_NUM_BUFS 32000
53 #define FLE_POOL_BUF_SIZE 256
54 #define FLE_POOL_CACHE_SIZE 512
55 #define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32))
56 #define SEC_FLC_DHR_OUTBOUND -114
57 #define SEC_FLC_DHR_INBOUND 0
59 static uint8_t cryptodev_driver_id;
61 #ifdef RTE_LIBRTE_SECURITY
63 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
64 struct rte_crypto_op *op,
65 struct qbman_fd *fd, uint16_t bpid)
67 struct rte_crypto_sym_op *sym_op = op->sym;
68 struct ctxt_priv *priv = sess->ctxt;
69 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
70 struct sec_flow_context *flc;
71 struct rte_mbuf *mbuf;
72 uint32_t in_len = 0, out_len = 0;
79 /* first FLE entry used to store mbuf and session ctxt */
80 fle = (struct qbman_fle *)rte_malloc(NULL,
81 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
84 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
87 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
88 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
89 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
91 /* Save the shared descriptor */
92 flc = &priv->flc_desc[0].flc;
98 if (likely(bpid < MAX_BPID)) {
99 DPAA2_SET_FD_BPID(fd, bpid);
100 DPAA2_SET_FLE_BPID(op_fle, bpid);
101 DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 DPAA2_SET_FD_IVP(fd);
104 DPAA2_SET_FLE_IVP(op_fle);
105 DPAA2_SET_FLE_IVP(ip_fle);
108 /* Configure FD as a FRAME LIST */
109 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
110 DPAA2_SET_FD_COMPOUND_FMT(fd);
111 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113 /* Configure Output FLE with Scatter/Gather Entry */
114 DPAA2_SET_FLE_SG_EXT(op_fle);
115 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
117 /* Configure Output SGE for Encap/Decap */
118 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
119 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
122 sge->length = mbuf->data_len;
123 out_len += sge->length;
126 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
127 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 /* using buf_len for last buf - so that extra data can be added */
130 sge->length = mbuf->buf_len - mbuf->data_off;
131 out_len += sge->length;
133 DPAA2_SET_FLE_FIN(sge);
134 op_fle->length = out_len;
137 mbuf = sym_op->m_src;
139 /* Configure Input FLE with Scatter/Gather Entry */
140 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
141 DPAA2_SET_FLE_SG_EXT(ip_fle);
142 DPAA2_SET_FLE_FIN(ip_fle);
144 /* Configure input SGE for Encap/Decap */
145 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
146 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
147 sge->length = mbuf->data_len;
148 in_len += sge->length;
154 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
155 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
156 sge->length = mbuf->data_len;
157 in_len += sge->length;
160 ip_fle->length = in_len;
161 DPAA2_SET_FLE_FIN(sge);
163 /* In case of PDCP, per packet HFN is stored in
164 * mbuf priv after sym_op.
166 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
167 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
168 /*enable HFN override override */
169 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
170 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
171 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
173 DPAA2_SET_FD_LEN(fd, ip_fle->length);
179 build_proto_compound_fd(dpaa2_sec_session *sess,
180 struct rte_crypto_op *op,
181 struct qbman_fd *fd, uint16_t bpid)
183 struct rte_crypto_sym_op *sym_op = op->sym;
184 struct ctxt_priv *priv = sess->ctxt;
185 struct qbman_fle *fle, *ip_fle, *op_fle;
186 struct sec_flow_context *flc;
187 struct rte_mbuf *src_mbuf = sym_op->m_src;
188 struct rte_mbuf *dst_mbuf = sym_op->m_dst;
194 /* Save the shared descriptor */
195 flc = &priv->flc_desc[0].flc;
197 /* we are using the first FLE entry to store Mbuf */
198 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
200 DPAA2_SEC_DP_ERR("Memory alloc failed");
203 memset(fle, 0, FLE_POOL_BUF_SIZE);
204 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
205 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
210 if (likely(bpid < MAX_BPID)) {
211 DPAA2_SET_FD_BPID(fd, bpid);
212 DPAA2_SET_FLE_BPID(op_fle, bpid);
213 DPAA2_SET_FLE_BPID(ip_fle, bpid);
215 DPAA2_SET_FD_IVP(fd);
216 DPAA2_SET_FLE_IVP(op_fle);
217 DPAA2_SET_FLE_IVP(ip_fle);
220 /* Configure FD as a FRAME LIST */
221 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
222 DPAA2_SET_FD_COMPOUND_FMT(fd);
223 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
225 /* Configure Output FLE with dst mbuf data */
226 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
227 DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
228 DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
230 /* Configure Input FLE with src mbuf data */
231 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
232 DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
233 DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
235 DPAA2_SET_FD_LEN(fd, ip_fle->length);
236 DPAA2_SET_FLE_FIN(ip_fle);
238 /* In case of PDCP, per packet HFN is stored in
239 * mbuf priv after sym_op.
241 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
242 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
243 /*enable HFN override override */
244 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
245 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
246 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
254 build_proto_fd(dpaa2_sec_session *sess,
255 struct rte_crypto_op *op,
256 struct qbman_fd *fd, uint16_t bpid)
258 struct rte_crypto_sym_op *sym_op = op->sym;
260 return build_proto_compound_fd(sess, op, fd, bpid);
262 struct ctxt_priv *priv = sess->ctxt;
263 struct sec_flow_context *flc;
264 struct rte_mbuf *mbuf = sym_op->m_src;
266 if (likely(bpid < MAX_BPID))
267 DPAA2_SET_FD_BPID(fd, bpid);
269 DPAA2_SET_FD_IVP(fd);
271 /* Save the shared descriptor */
272 flc = &priv->flc_desc[0].flc;
274 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
275 DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
276 DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
277 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
279 /* save physical address of mbuf */
280 op->sym->aead.digest.phys_addr = mbuf->buf_iova;
281 mbuf->buf_iova = (size_t)op;
288 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
289 struct rte_crypto_op *op,
290 struct qbman_fd *fd, __rte_unused uint16_t bpid)
292 struct rte_crypto_sym_op *sym_op = op->sym;
293 struct ctxt_priv *priv = sess->ctxt;
294 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
295 struct sec_flow_context *flc;
296 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
297 int icv_len = sess->digest_length;
299 struct rte_mbuf *mbuf;
300 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
304 mbuf = sym_op->m_dst;
306 mbuf = sym_op->m_src;
308 /* first FLE entry used to store mbuf and session ctxt */
309 fle = (struct qbman_fle *)rte_malloc(NULL,
310 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
311 RTE_CACHE_LINE_SIZE);
312 if (unlikely(!fle)) {
313 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
316 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
317 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
318 DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
324 /* Save the shared descriptor */
325 flc = &priv->flc_desc[0].flc;
327 /* Configure FD as a FRAME LIST */
328 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
329 DPAA2_SET_FD_COMPOUND_FMT(fd);
330 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
332 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
333 "iv-len=%d data_off: 0x%x\n",
334 sym_op->aead.data.offset,
335 sym_op->aead.data.length,
338 sym_op->m_src->data_off);
340 /* Configure Output FLE with Scatter/Gather Entry */
341 DPAA2_SET_FLE_SG_EXT(op_fle);
342 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
345 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
347 op_fle->length = (sess->dir == DIR_ENC) ?
348 (sym_op->aead.data.length + icv_len) :
349 sym_op->aead.data.length;
351 /* Configure Output SGE for Encap/Decap */
352 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
353 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
354 sge->length = mbuf->data_len - sym_op->aead.data.offset;
360 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
361 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
362 sge->length = mbuf->data_len;
365 sge->length -= icv_len;
367 if (sess->dir == DIR_ENC) {
369 DPAA2_SET_FLE_ADDR(sge,
370 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
371 sge->length = icv_len;
373 DPAA2_SET_FLE_FIN(sge);
376 mbuf = sym_op->m_src;
378 /* Configure Input FLE with Scatter/Gather Entry */
379 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
380 DPAA2_SET_FLE_SG_EXT(ip_fle);
381 DPAA2_SET_FLE_FIN(ip_fle);
382 ip_fle->length = (sess->dir == DIR_ENC) ?
383 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
384 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
387 /* Configure Input SGE for Encap/Decap */
388 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
389 sge->length = sess->iv.length;
393 DPAA2_SET_FLE_ADDR(sge,
394 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
395 sge->length = auth_only_len;
399 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
400 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
402 sge->length = mbuf->data_len - sym_op->aead.data.offset;
408 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
409 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
410 sge->length = mbuf->data_len;
414 if (sess->dir == DIR_DEC) {
416 old_icv = (uint8_t *)(sge + 1);
417 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
418 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
419 sge->length = icv_len;
422 DPAA2_SET_FLE_FIN(sge);
424 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
425 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
427 DPAA2_SET_FD_LEN(fd, ip_fle->length);
433 build_authenc_gcm_fd(dpaa2_sec_session *sess,
434 struct rte_crypto_op *op,
435 struct qbman_fd *fd, uint16_t bpid)
437 struct rte_crypto_sym_op *sym_op = op->sym;
438 struct ctxt_priv *priv = sess->ctxt;
439 struct qbman_fle *fle, *sge;
440 struct sec_flow_context *flc;
441 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
442 int icv_len = sess->digest_length, retval;
444 struct rte_mbuf *dst;
445 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
453 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
454 * Currently we donot know which FLE has the mbuf stored.
455 * So while retreiving we can go back 1 FLE from the FD -ADDR
456 * to get the MBUF Addr from the previous FLE.
457 * We can have a better approach to use the inline Mbuf
459 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
461 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
464 memset(fle, 0, FLE_POOL_BUF_SIZE);
465 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
466 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
469 if (likely(bpid < MAX_BPID)) {
470 DPAA2_SET_FD_BPID(fd, bpid);
471 DPAA2_SET_FLE_BPID(fle, bpid);
472 DPAA2_SET_FLE_BPID(fle + 1, bpid);
473 DPAA2_SET_FLE_BPID(sge, bpid);
474 DPAA2_SET_FLE_BPID(sge + 1, bpid);
475 DPAA2_SET_FLE_BPID(sge + 2, bpid);
476 DPAA2_SET_FLE_BPID(sge + 3, bpid);
478 DPAA2_SET_FD_IVP(fd);
479 DPAA2_SET_FLE_IVP(fle);
480 DPAA2_SET_FLE_IVP((fle + 1));
481 DPAA2_SET_FLE_IVP(sge);
482 DPAA2_SET_FLE_IVP((sge + 1));
483 DPAA2_SET_FLE_IVP((sge + 2));
484 DPAA2_SET_FLE_IVP((sge + 3));
487 /* Save the shared descriptor */
488 flc = &priv->flc_desc[0].flc;
489 /* Configure FD as a FRAME LIST */
490 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
491 DPAA2_SET_FD_COMPOUND_FMT(fd);
492 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
494 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
495 "iv-len=%d data_off: 0x%x\n",
496 sym_op->aead.data.offset,
497 sym_op->aead.data.length,
500 sym_op->m_src->data_off);
502 /* Configure Output FLE with Scatter/Gather Entry */
503 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
505 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
506 fle->length = (sess->dir == DIR_ENC) ?
507 (sym_op->aead.data.length + icv_len) :
508 sym_op->aead.data.length;
510 DPAA2_SET_FLE_SG_EXT(fle);
512 /* Configure Output SGE for Encap/Decap */
513 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
514 DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
515 sge->length = sym_op->aead.data.length;
517 if (sess->dir == DIR_ENC) {
519 DPAA2_SET_FLE_ADDR(sge,
520 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
521 sge->length = sess->digest_length;
523 DPAA2_SET_FLE_FIN(sge);
528 /* Configure Input FLE with Scatter/Gather Entry */
529 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
530 DPAA2_SET_FLE_SG_EXT(fle);
531 DPAA2_SET_FLE_FIN(fle);
532 fle->length = (sess->dir == DIR_ENC) ?
533 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
534 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
535 sess->digest_length);
537 /* Configure Input SGE for Encap/Decap */
538 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
539 sge->length = sess->iv.length;
542 DPAA2_SET_FLE_ADDR(sge,
543 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
544 sge->length = auth_only_len;
545 DPAA2_SET_FLE_BPID(sge, bpid);
549 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
550 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
551 sym_op->m_src->data_off);
552 sge->length = sym_op->aead.data.length;
553 if (sess->dir == DIR_DEC) {
555 old_icv = (uint8_t *)(sge + 1);
556 memcpy(old_icv, sym_op->aead.digest.data,
557 sess->digest_length);
558 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
559 sge->length = sess->digest_length;
561 DPAA2_SET_FLE_FIN(sge);
564 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
565 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
568 DPAA2_SET_FD_LEN(fd, fle->length);
573 build_authenc_sg_fd(dpaa2_sec_session *sess,
574 struct rte_crypto_op *op,
575 struct qbman_fd *fd, __rte_unused uint16_t bpid)
577 struct rte_crypto_sym_op *sym_op = op->sym;
578 struct ctxt_priv *priv = sess->ctxt;
579 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
580 struct sec_flow_context *flc;
581 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
582 sym_op->auth.data.offset;
583 uint16_t auth_tail_len = sym_op->auth.data.length -
584 sym_op->cipher.data.length - auth_hdr_len;
585 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
586 int icv_len = sess->digest_length;
588 struct rte_mbuf *mbuf;
589 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
593 mbuf = sym_op->m_dst;
595 mbuf = sym_op->m_src;
597 /* first FLE entry used to store mbuf and session ctxt */
598 fle = (struct qbman_fle *)rte_malloc(NULL,
599 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
600 RTE_CACHE_LINE_SIZE);
601 if (unlikely(!fle)) {
602 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
605 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
606 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
607 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
613 /* Save the shared descriptor */
614 flc = &priv->flc_desc[0].flc;
616 /* Configure FD as a FRAME LIST */
617 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
618 DPAA2_SET_FD_COMPOUND_FMT(fd);
619 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
622 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
623 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
624 sym_op->auth.data.offset,
625 sym_op->auth.data.length,
627 sym_op->cipher.data.offset,
628 sym_op->cipher.data.length,
630 sym_op->m_src->data_off);
632 /* Configure Output FLE with Scatter/Gather Entry */
633 DPAA2_SET_FLE_SG_EXT(op_fle);
634 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
637 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
639 op_fle->length = (sess->dir == DIR_ENC) ?
640 (sym_op->cipher.data.length + icv_len) :
641 sym_op->cipher.data.length;
643 /* Configure Output SGE for Encap/Decap */
644 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
645 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
646 sge->length = mbuf->data_len - sym_op->auth.data.offset;
652 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
653 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
654 sge->length = mbuf->data_len;
657 sge->length -= icv_len;
659 if (sess->dir == DIR_ENC) {
661 DPAA2_SET_FLE_ADDR(sge,
662 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
663 sge->length = icv_len;
665 DPAA2_SET_FLE_FIN(sge);
668 mbuf = sym_op->m_src;
670 /* Configure Input FLE with Scatter/Gather Entry */
671 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
672 DPAA2_SET_FLE_SG_EXT(ip_fle);
673 DPAA2_SET_FLE_FIN(ip_fle);
674 ip_fle->length = (sess->dir == DIR_ENC) ?
675 (sym_op->auth.data.length + sess->iv.length) :
676 (sym_op->auth.data.length + sess->iv.length +
679 /* Configure Input SGE for Encap/Decap */
680 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
681 sge->length = sess->iv.length;
684 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
685 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
687 sge->length = mbuf->data_len - sym_op->auth.data.offset;
693 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
694 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
695 sge->length = mbuf->data_len;
698 sge->length -= icv_len;
700 if (sess->dir == DIR_DEC) {
702 old_icv = (uint8_t *)(sge + 1);
703 memcpy(old_icv, sym_op->auth.digest.data,
705 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
706 sge->length = icv_len;
709 DPAA2_SET_FLE_FIN(sge);
711 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
712 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
714 DPAA2_SET_FD_LEN(fd, ip_fle->length);
720 build_authenc_fd(dpaa2_sec_session *sess,
721 struct rte_crypto_op *op,
722 struct qbman_fd *fd, uint16_t bpid)
724 struct rte_crypto_sym_op *sym_op = op->sym;
725 struct ctxt_priv *priv = sess->ctxt;
726 struct qbman_fle *fle, *sge;
727 struct sec_flow_context *flc;
728 uint16_t auth_hdr_len = sym_op->cipher.data.offset -
729 sym_op->auth.data.offset;
730 uint16_t auth_tail_len = sym_op->auth.data.length -
731 sym_op->cipher.data.length - auth_hdr_len;
732 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
734 int icv_len = sess->digest_length, retval;
736 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
738 struct rte_mbuf *dst;
745 /* we are using the first FLE entry to store Mbuf.
746 * Currently we donot know which FLE has the mbuf stored.
747 * So while retreiving we can go back 1 FLE from the FD -ADDR
748 * to get the MBUF Addr from the previous FLE.
749 * We can have a better approach to use the inline Mbuf
751 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
753 DPAA2_SEC_ERR("Memory alloc failed for SGE");
756 memset(fle, 0, FLE_POOL_BUF_SIZE);
757 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
758 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
761 if (likely(bpid < MAX_BPID)) {
762 DPAA2_SET_FD_BPID(fd, bpid);
763 DPAA2_SET_FLE_BPID(fle, bpid);
764 DPAA2_SET_FLE_BPID(fle + 1, bpid);
765 DPAA2_SET_FLE_BPID(sge, bpid);
766 DPAA2_SET_FLE_BPID(sge + 1, bpid);
767 DPAA2_SET_FLE_BPID(sge + 2, bpid);
768 DPAA2_SET_FLE_BPID(sge + 3, bpid);
770 DPAA2_SET_FD_IVP(fd);
771 DPAA2_SET_FLE_IVP(fle);
772 DPAA2_SET_FLE_IVP((fle + 1));
773 DPAA2_SET_FLE_IVP(sge);
774 DPAA2_SET_FLE_IVP((sge + 1));
775 DPAA2_SET_FLE_IVP((sge + 2));
776 DPAA2_SET_FLE_IVP((sge + 3));
779 /* Save the shared descriptor */
780 flc = &priv->flc_desc[0].flc;
781 /* Configure FD as a FRAME LIST */
782 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
783 DPAA2_SET_FD_COMPOUND_FMT(fd);
784 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
787 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
788 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
789 sym_op->auth.data.offset,
790 sym_op->auth.data.length,
792 sym_op->cipher.data.offset,
793 sym_op->cipher.data.length,
795 sym_op->m_src->data_off);
797 /* Configure Output FLE with Scatter/Gather Entry */
798 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
800 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
801 fle->length = (sess->dir == DIR_ENC) ?
802 (sym_op->cipher.data.length + icv_len) :
803 sym_op->cipher.data.length;
805 DPAA2_SET_FLE_SG_EXT(fle);
807 /* Configure Output SGE for Encap/Decap */
808 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
809 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
811 sge->length = sym_op->cipher.data.length;
813 if (sess->dir == DIR_ENC) {
815 DPAA2_SET_FLE_ADDR(sge,
816 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
817 sge->length = sess->digest_length;
818 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
821 DPAA2_SET_FLE_FIN(sge);
826 /* Configure Input FLE with Scatter/Gather Entry */
827 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
828 DPAA2_SET_FLE_SG_EXT(fle);
829 DPAA2_SET_FLE_FIN(fle);
830 fle->length = (sess->dir == DIR_ENC) ?
831 (sym_op->auth.data.length + sess->iv.length) :
832 (sym_op->auth.data.length + sess->iv.length +
833 sess->digest_length);
835 /* Configure Input SGE for Encap/Decap */
836 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
837 sge->length = sess->iv.length;
840 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
841 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
842 sym_op->m_src->data_off);
843 sge->length = sym_op->auth.data.length;
844 if (sess->dir == DIR_DEC) {
846 old_icv = (uint8_t *)(sge + 1);
847 memcpy(old_icv, sym_op->auth.digest.data,
848 sess->digest_length);
849 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
850 sge->length = sess->digest_length;
851 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
852 sess->digest_length +
855 DPAA2_SET_FLE_FIN(sge);
857 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
858 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
863 static inline int build_auth_sg_fd(
864 dpaa2_sec_session *sess,
865 struct rte_crypto_op *op,
867 __rte_unused uint16_t bpid)
869 struct rte_crypto_sym_op *sym_op = op->sym;
870 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
871 struct sec_flow_context *flc;
872 struct ctxt_priv *priv = sess->ctxt;
873 int data_len, data_offset;
875 struct rte_mbuf *mbuf;
877 data_len = sym_op->auth.data.length;
878 data_offset = sym_op->auth.data.offset;
880 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
881 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
882 if ((data_len & 7) || (data_offset & 7)) {
883 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 data_len = data_len >> 3;
888 data_offset = data_offset >> 3;
891 mbuf = sym_op->m_src;
892 fle = (struct qbman_fle *)rte_malloc(NULL,
893 FLE_SG_MEM_SIZE(mbuf->nb_segs),
894 RTE_CACHE_LINE_SIZE);
895 if (unlikely(!fle)) {
896 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
899 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
900 /* first FLE entry used to store mbuf and session ctxt */
901 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
902 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
907 flc = &priv->flc_desc[DESC_INITFINAL].flc;
909 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
910 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
911 DPAA2_SET_FD_COMPOUND_FMT(fd);
914 DPAA2_SET_FLE_ADDR(op_fle,
915 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
916 op_fle->length = sess->digest_length;
919 DPAA2_SET_FLE_SG_EXT(ip_fle);
920 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
921 ip_fle->length = data_len;
923 if (sess->iv.length) {
926 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
929 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
930 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
932 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
933 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
936 sge->length = sess->iv.length;
938 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
939 ip_fle->length += sge->length;
943 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
944 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
946 if (data_len <= (mbuf->data_len - data_offset)) {
947 sge->length = data_len;
950 sge->length = mbuf->data_len - data_offset;
952 /* remaining i/p segs */
953 while ((data_len = data_len - sge->length) &&
954 (mbuf = mbuf->next)) {
956 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
957 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
958 if (data_len > mbuf->data_len)
959 sge->length = mbuf->data_len;
961 sge->length = data_len;
965 if (sess->dir == DIR_DEC) {
966 /* Digest verification case */
968 old_digest = (uint8_t *)(sge + 1);
969 rte_memcpy(old_digest, sym_op->auth.digest.data,
970 sess->digest_length);
971 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
972 sge->length = sess->digest_length;
973 ip_fle->length += sess->digest_length;
975 DPAA2_SET_FLE_FIN(sge);
976 DPAA2_SET_FLE_FIN(ip_fle);
977 DPAA2_SET_FD_LEN(fd, ip_fle->length);
983 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
984 struct qbman_fd *fd, uint16_t bpid)
986 struct rte_crypto_sym_op *sym_op = op->sym;
987 struct qbman_fle *fle, *sge;
988 struct sec_flow_context *flc;
989 struct ctxt_priv *priv = sess->ctxt;
990 int data_len, data_offset;
994 data_len = sym_op->auth.data.length;
995 data_offset = sym_op->auth.data.offset;
997 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
998 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
999 if ((data_len & 7) || (data_offset & 7)) {
1000 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1004 data_len = data_len >> 3;
1005 data_offset = data_offset >> 3;
1008 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1010 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1013 memset(fle, 0, FLE_POOL_BUF_SIZE);
1014 /* TODO we are using the first FLE entry to store Mbuf.
1015 * Currently we donot know which FLE has the mbuf stored.
1016 * So while retreiving we can go back 1 FLE from the FD -ADDR
1017 * to get the MBUF Addr from the previous FLE.
1018 * We can have a better approach to use the inline Mbuf
1020 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1021 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1025 if (likely(bpid < MAX_BPID)) {
1026 DPAA2_SET_FD_BPID(fd, bpid);
1027 DPAA2_SET_FLE_BPID(fle, bpid);
1028 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1029 DPAA2_SET_FLE_BPID(sge, bpid);
1030 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1032 DPAA2_SET_FD_IVP(fd);
1033 DPAA2_SET_FLE_IVP(fle);
1034 DPAA2_SET_FLE_IVP((fle + 1));
1035 DPAA2_SET_FLE_IVP(sge);
1036 DPAA2_SET_FLE_IVP((sge + 1));
1039 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1040 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1041 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1042 DPAA2_SET_FD_COMPOUND_FMT(fd);
1044 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1045 fle->length = sess->digest_length;
1048 /* Setting input FLE */
1049 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1050 DPAA2_SET_FLE_SG_EXT(fle);
1051 fle->length = data_len;
1053 if (sess->iv.length) {
1056 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1059 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1060 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1062 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1063 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1066 sge->length = sess->iv.length;
1069 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1070 fle->length = fle->length + sge->length;
1074 /* Setting data to authenticate */
1075 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1076 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1077 sge->length = data_len;
1079 if (sess->dir == DIR_DEC) {
1081 old_digest = (uint8_t *)(sge + 1);
1082 rte_memcpy(old_digest, sym_op->auth.digest.data,
1083 sess->digest_length);
1084 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1085 sge->length = sess->digest_length;
1086 fle->length = fle->length + sess->digest_length;
1089 DPAA2_SET_FLE_FIN(sge);
1090 DPAA2_SET_FLE_FIN(fle);
1091 DPAA2_SET_FD_LEN(fd, fle->length);
1097 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1098 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1100 struct rte_crypto_sym_op *sym_op = op->sym;
1101 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1102 int data_len, data_offset;
1103 struct sec_flow_context *flc;
1104 struct ctxt_priv *priv = sess->ctxt;
1105 struct rte_mbuf *mbuf;
1106 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1109 data_len = sym_op->cipher.data.length;
1110 data_offset = sym_op->cipher.data.offset;
1112 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1113 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1114 if ((data_len & 7) || (data_offset & 7)) {
1115 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 data_len = data_len >> 3;
1120 data_offset = data_offset >> 3;
1124 mbuf = sym_op->m_dst;
1126 mbuf = sym_op->m_src;
1128 /* first FLE entry used to store mbuf and session ctxt */
1129 fle = (struct qbman_fle *)rte_malloc(NULL,
1130 FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1131 RTE_CACHE_LINE_SIZE);
1133 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1136 memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1137 /* first FLE entry used to store mbuf and session ctxt */
1138 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1139 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1145 flc = &priv->flc_desc[0].flc;
1148 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1149 " data_off: 0x%x\n",
1153 sym_op->m_src->data_off);
1156 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1157 op_fle->length = data_len;
1158 DPAA2_SET_FLE_SG_EXT(op_fle);
1161 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1162 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1163 sge->length = mbuf->data_len - data_offset;
1169 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1170 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1171 sge->length = mbuf->data_len;
1174 DPAA2_SET_FLE_FIN(sge);
1177 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1178 flc, fle, fle->addr_hi, fle->addr_lo,
1182 mbuf = sym_op->m_src;
1184 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1185 ip_fle->length = sess->iv.length + data_len;
1186 DPAA2_SET_FLE_SG_EXT(ip_fle);
1189 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1190 DPAA2_SET_FLE_OFFSET(sge, 0);
1191 sge->length = sess->iv.length;
1196 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1197 DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1198 sge->length = mbuf->data_len - data_offset;
1204 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1205 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1206 sge->length = mbuf->data_len;
1209 DPAA2_SET_FLE_FIN(sge);
1210 DPAA2_SET_FLE_FIN(ip_fle);
1213 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1214 DPAA2_SET_FD_LEN(fd, ip_fle->length);
1215 DPAA2_SET_FD_COMPOUND_FMT(fd);
1216 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1219 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1220 " off =%d, len =%d\n",
1221 DPAA2_GET_FD_ADDR(fd),
1222 DPAA2_GET_FD_BPID(fd),
1223 rte_dpaa2_bpid_info[bpid].meta_data_size,
1224 DPAA2_GET_FD_OFFSET(fd),
1225 DPAA2_GET_FD_LEN(fd));
1230 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1231 struct qbman_fd *fd, uint16_t bpid)
1233 struct rte_crypto_sym_op *sym_op = op->sym;
1234 struct qbman_fle *fle, *sge;
1235 int retval, data_len, data_offset;
1236 struct sec_flow_context *flc;
1237 struct ctxt_priv *priv = sess->ctxt;
1238 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1240 struct rte_mbuf *dst;
1242 data_len = sym_op->cipher.data.length;
1243 data_offset = sym_op->cipher.data.offset;
1245 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1246 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1247 if ((data_len & 7) || (data_offset & 7)) {
1248 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1252 data_len = data_len >> 3;
1253 data_offset = data_offset >> 3;
1257 dst = sym_op->m_dst;
1259 dst = sym_op->m_src;
1261 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1263 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1266 memset(fle, 0, FLE_POOL_BUF_SIZE);
1267 /* TODO we are using the first FLE entry to store Mbuf.
1268 * Currently we donot know which FLE has the mbuf stored.
1269 * So while retreiving we can go back 1 FLE from the FD -ADDR
1270 * to get the MBUF Addr from the previous FLE.
1271 * We can have a better approach to use the inline Mbuf
1273 DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1274 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1278 if (likely(bpid < MAX_BPID)) {
1279 DPAA2_SET_FD_BPID(fd, bpid);
1280 DPAA2_SET_FLE_BPID(fle, bpid);
1281 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1282 DPAA2_SET_FLE_BPID(sge, bpid);
1283 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1285 DPAA2_SET_FD_IVP(fd);
1286 DPAA2_SET_FLE_IVP(fle);
1287 DPAA2_SET_FLE_IVP((fle + 1));
1288 DPAA2_SET_FLE_IVP(sge);
1289 DPAA2_SET_FLE_IVP((sge + 1));
1292 flc = &priv->flc_desc[0].flc;
1293 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1294 DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1295 DPAA2_SET_FD_COMPOUND_FMT(fd);
1296 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1299 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1300 " data_off: 0x%x\n",
1304 sym_op->m_src->data_off);
1306 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1307 DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1309 fle->length = data_len + sess->iv.length;
1312 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1313 flc, fle, fle->addr_hi, fle->addr_lo,
1318 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1319 fle->length = data_len + sess->iv.length;
1321 DPAA2_SET_FLE_SG_EXT(fle);
1323 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1324 sge->length = sess->iv.length;
1327 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1328 DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1330 sge->length = data_len;
1331 DPAA2_SET_FLE_FIN(sge);
1332 DPAA2_SET_FLE_FIN(fle);
1335 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1336 " off =%d, len =%d\n",
1337 DPAA2_GET_FD_ADDR(fd),
1338 DPAA2_GET_FD_BPID(fd),
1339 rte_dpaa2_bpid_info[bpid].meta_data_size,
1340 DPAA2_GET_FD_OFFSET(fd),
1341 DPAA2_GET_FD_LEN(fd));
1347 build_sec_fd(struct rte_crypto_op *op,
1348 struct qbman_fd *fd, uint16_t bpid)
1351 dpaa2_sec_session *sess;
1353 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1354 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1355 op->sym->session, cryptodev_driver_id);
1356 #ifdef RTE_LIBRTE_SECURITY
1357 else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1358 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1359 op->sym->sec_session);
1367 /* Any of the buffer is segmented*/
1368 if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1369 ((op->sym->m_dst != NULL) &&
1370 !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1371 switch (sess->ctxt_type) {
1372 case DPAA2_SEC_CIPHER:
1373 ret = build_cipher_sg_fd(sess, op, fd, bpid);
1375 case DPAA2_SEC_AUTH:
1376 ret = build_auth_sg_fd(sess, op, fd, bpid);
1378 case DPAA2_SEC_AEAD:
1379 ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1381 case DPAA2_SEC_CIPHER_HASH:
1382 ret = build_authenc_sg_fd(sess, op, fd, bpid);
1384 #ifdef RTE_LIBRTE_SECURITY
1385 case DPAA2_SEC_IPSEC:
1386 case DPAA2_SEC_PDCP:
1387 ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1390 case DPAA2_SEC_HASH_CIPHER:
1392 DPAA2_SEC_ERR("error: Unsupported session");
1395 switch (sess->ctxt_type) {
1396 case DPAA2_SEC_CIPHER:
1397 ret = build_cipher_fd(sess, op, fd, bpid);
1399 case DPAA2_SEC_AUTH:
1400 ret = build_auth_fd(sess, op, fd, bpid);
1402 case DPAA2_SEC_AEAD:
1403 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1405 case DPAA2_SEC_CIPHER_HASH:
1406 ret = build_authenc_fd(sess, op, fd, bpid);
1408 #ifdef RTE_LIBRTE_SECURITY
1409 case DPAA2_SEC_IPSEC:
1410 ret = build_proto_fd(sess, op, fd, bpid);
1412 case DPAA2_SEC_PDCP:
1413 ret = build_proto_compound_fd(sess, op, fd, bpid);
1416 case DPAA2_SEC_HASH_CIPHER:
1418 DPAA2_SEC_ERR("error: Unsupported session");
1426 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1429 /* Function to transmit the frames to given device and VQ*/
1432 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1433 uint32_t frames_to_send, retry_count;
1434 struct qbman_eq_desc eqdesc;
1435 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1436 struct qbman_swp *swp;
1437 uint16_t num_tx = 0;
1438 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1439 /*todo - need to support multiple buffer pools */
1441 struct rte_mempool *mb_pool;
1443 if (unlikely(nb_ops == 0))
1446 if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1447 DPAA2_SEC_ERR("sessionless crypto op not supported");
1450 /*Prepare enqueue descriptor*/
1451 qbman_eq_desc_clear(&eqdesc);
1452 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1453 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1454 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1456 if (!DPAA2_PER_LCORE_DPIO) {
1457 ret = dpaa2_affine_qbman_swp();
1460 "Failed to allocate IO portal, tid: %d\n",
1465 swp = DPAA2_PER_LCORE_PORTAL;
1468 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1469 dpaa2_eqcr_size : nb_ops;
1471 for (loop = 0; loop < frames_to_send; loop++) {
1472 if ((*ops)->sym->m_src->seqn) {
1473 uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1475 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1476 DPAA2_PER_LCORE_DQRR_SIZE--;
1477 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1478 (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1481 /*Clear the unused FD fields before sending*/
1482 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1483 mb_pool = (*ops)->sym->m_src->pool;
1484 bpid = mempool_to_bpid(mb_pool);
1485 ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1487 DPAA2_SEC_ERR("error: Improper packet contents"
1488 " for crypto operation");
1496 while (loop < frames_to_send) {
1497 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1500 frames_to_send - loop);
1501 if (unlikely(ret < 0)) {
1503 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1518 dpaa2_qp->tx_vq.tx_pkts += num_tx;
1519 dpaa2_qp->tx_vq.err_pkts += nb_ops;
1523 #ifdef RTE_LIBRTE_SECURITY
1524 static inline struct rte_crypto_op *
1525 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1527 struct rte_crypto_op *op;
1528 uint16_t len = DPAA2_GET_FD_LEN(fd);
1530 dpaa2_sec_session *sess_priv __rte_unused;
1532 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1533 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1534 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1536 diff = len - mbuf->pkt_len;
1537 mbuf->pkt_len += diff;
1538 mbuf->data_len += diff;
1539 op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1540 mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1541 op->sym->aead.digest.phys_addr = 0L;
1543 sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1544 op->sym->sec_session);
1545 if (sess_priv->dir == DIR_ENC)
1546 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1548 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1554 static inline struct rte_crypto_op *
1555 sec_fd_to_mbuf(const struct qbman_fd *fd)
1557 struct qbman_fle *fle;
1558 struct rte_crypto_op *op;
1559 struct ctxt_priv *priv;
1560 struct rte_mbuf *dst, *src;
1562 #ifdef RTE_LIBRTE_SECURITY
1563 if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1564 return sec_simple_fd_to_mbuf(fd);
1566 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1568 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1569 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1571 /* we are using the first FLE entry to store Mbuf.
1572 * Currently we donot know which FLE has the mbuf stored.
1573 * So while retreiving we can go back 1 FLE from the FD -ADDR
1574 * to get the MBUF Addr from the previous FLE.
1575 * We can have a better approach to use the inline Mbuf
1578 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1579 /* TODO complete it. */
1580 DPAA2_SEC_ERR("error: non inline buffer");
1583 op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1586 src = op->sym->m_src;
1589 if (op->sym->m_dst) {
1590 dst = op->sym->m_dst;
1595 #ifdef RTE_LIBRTE_SECURITY
1596 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1597 uint16_t len = DPAA2_GET_FD_LEN(fd);
1599 while (dst->next != NULL) {
1600 len -= dst->data_len;
1603 dst->data_len = len;
1606 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1607 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1610 DPAA2_GET_FD_ADDR(fd),
1611 DPAA2_GET_FD_BPID(fd),
1612 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1613 DPAA2_GET_FD_OFFSET(fd),
1614 DPAA2_GET_FD_LEN(fd));
1616 /* free the fle memory */
1617 if (likely(rte_pktmbuf_is_contiguous(src))) {
1618 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1619 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1621 rte_free((void *)(fle-1));
1627 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1630 /* Function is responsible to receive frames for a given device and VQ*/
1631 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1632 struct qbman_result *dq_storage;
1633 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1634 int ret, num_rx = 0;
1635 uint8_t is_last = 0, status;
1636 struct qbman_swp *swp;
1637 const struct qbman_fd *fd;
1638 struct qbman_pull_desc pulldesc;
1640 if (!DPAA2_PER_LCORE_DPIO) {
1641 ret = dpaa2_affine_qbman_swp();
1644 "Failed to allocate IO portal, tid: %d\n",
1649 swp = DPAA2_PER_LCORE_PORTAL;
1650 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1652 qbman_pull_desc_clear(&pulldesc);
1653 qbman_pull_desc_set_numframes(&pulldesc,
1654 (nb_ops > dpaa2_dqrr_size) ?
1655 dpaa2_dqrr_size : nb_ops);
1656 qbman_pull_desc_set_fq(&pulldesc, fqid);
1657 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1658 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1661 /*Issue a volatile dequeue command. */
1663 if (qbman_swp_pull(swp, &pulldesc)) {
1665 "SEC VDQ command is not issued : QBMAN busy");
1666 /* Portal was busy, try again */
1672 /* Receive the packets till Last Dequeue entry is found with
1673 * respect to the above issues PULL command.
1676 /* Check if the previous issued command is completed.
1677 * Also seems like the SWP is shared between the Ethernet Driver
1678 * and the SEC driver.
1680 while (!qbman_check_command_complete(dq_storage))
1683 /* Loop until the dq_storage is updated with
1684 * new token by QBMAN
1686 while (!qbman_check_new_result(dq_storage))
1688 /* Check whether Last Pull command is Expired and
1689 * setting Condition for Loop termination
1691 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1693 /* Check for valid frame. */
1694 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1696 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1697 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1702 fd = qbman_result_DQ_fd(dq_storage);
1703 ops[num_rx] = sec_fd_to_mbuf(fd);
1705 if (unlikely(fd->simple.frc)) {
1706 /* TODO Parse SEC errors */
1707 DPAA2_SEC_ERR("SEC returned Error - %x",
1709 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1711 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1716 } /* End of Packet Rx loop */
1718 dpaa2_qp->rx_vq.rx_pkts += num_rx;
1720 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1721 /*Return the total number of packets received to DPAA2 app*/
1725 /** Release queue pair */
1727 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1729 struct dpaa2_sec_qp *qp =
1730 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1732 PMD_INIT_FUNC_TRACE();
1734 if (qp->rx_vq.q_storage) {
1735 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1736 rte_free(qp->rx_vq.q_storage);
1740 dev->data->queue_pairs[queue_pair_id] = NULL;
1745 /** Setup a queue pair */
1747 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1748 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1749 __rte_unused int socket_id)
1751 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1752 struct dpaa2_sec_qp *qp;
1753 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1754 struct dpseci_rx_queue_cfg cfg;
1757 PMD_INIT_FUNC_TRACE();
1759 /* If qp is already in use free ring memory and qp metadata. */
1760 if (dev->data->queue_pairs[qp_id] != NULL) {
1761 DPAA2_SEC_INFO("QP already setup");
1765 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1766 dev, qp_id, qp_conf);
1768 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1770 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1771 RTE_CACHE_LINE_SIZE);
1773 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1777 qp->rx_vq.crypto_data = dev->data;
1778 qp->tx_vq.crypto_data = dev->data;
1779 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1780 sizeof(struct queue_storage_info_t),
1781 RTE_CACHE_LINE_SIZE);
1782 if (!qp->rx_vq.q_storage) {
1783 DPAA2_SEC_ERR("malloc failed for q_storage");
1786 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1788 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1789 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1793 dev->data->queue_pairs[qp_id] = qp;
1795 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1796 cfg.user_ctx = (size_t)(&qp->rx_vq);
1797 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1802 /** Returns the size of the aesni gcm session structure */
1804 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1806 PMD_INIT_FUNC_TRACE();
1808 return sizeof(dpaa2_sec_session);
1812 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1813 struct rte_crypto_sym_xform *xform,
1814 dpaa2_sec_session *session)
1816 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1817 struct alginfo cipherdata;
1818 int bufsize, ret = 0;
1819 struct ctxt_priv *priv;
1820 struct sec_flow_context *flc;
1822 PMD_INIT_FUNC_TRACE();
1824 /* For SEC CIPHER only one descriptor is required. */
1825 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1826 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1827 RTE_CACHE_LINE_SIZE);
1829 DPAA2_SEC_ERR("No Memory for priv CTXT");
1833 priv->fle_pool = dev_priv->fle_pool;
1835 flc = &priv->flc_desc[0].flc;
1837 session->ctxt_type = DPAA2_SEC_CIPHER;
1838 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1839 RTE_CACHE_LINE_SIZE);
1840 if (session->cipher_key.data == NULL) {
1841 DPAA2_SEC_ERR("No Memory for cipher key");
1845 session->cipher_key.length = xform->cipher.key.length;
1847 memcpy(session->cipher_key.data, xform->cipher.key.data,
1848 xform->cipher.key.length);
1849 cipherdata.key = (size_t)session->cipher_key.data;
1850 cipherdata.keylen = session->cipher_key.length;
1851 cipherdata.key_enc_flags = 0;
1852 cipherdata.key_type = RTA_DATA_IMM;
1854 /* Set IV parameters */
1855 session->iv.offset = xform->cipher.iv.offset;
1856 session->iv.length = xform->cipher.iv.length;
1857 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1860 switch (xform->cipher.algo) {
1861 case RTE_CRYPTO_CIPHER_AES_CBC:
1862 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1863 cipherdata.algmode = OP_ALG_AAI_CBC;
1864 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1865 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1866 SHR_NEVER, &cipherdata,
1870 case RTE_CRYPTO_CIPHER_3DES_CBC:
1871 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1872 cipherdata.algmode = OP_ALG_AAI_CBC;
1873 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1874 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1875 SHR_NEVER, &cipherdata,
1879 case RTE_CRYPTO_CIPHER_AES_CTR:
1880 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1881 cipherdata.algmode = OP_ALG_AAI_CTR;
1882 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1883 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1884 SHR_NEVER, &cipherdata,
1888 case RTE_CRYPTO_CIPHER_3DES_CTR:
1889 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1890 cipherdata.algmode = OP_ALG_AAI_CTR;
1891 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1892 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1893 SHR_NEVER, &cipherdata,
1897 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1898 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1899 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1900 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1904 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1905 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1906 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1907 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1911 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1912 case RTE_CRYPTO_CIPHER_AES_F8:
1913 case RTE_CRYPTO_CIPHER_AES_ECB:
1914 case RTE_CRYPTO_CIPHER_3DES_ECB:
1915 case RTE_CRYPTO_CIPHER_AES_XTS:
1916 case RTE_CRYPTO_CIPHER_ARC4:
1917 case RTE_CRYPTO_CIPHER_NULL:
1918 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1919 xform->cipher.algo);
1923 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1924 xform->cipher.algo);
1930 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1935 flc->word1_sdl = (uint8_t)bufsize;
1936 session->ctxt = priv;
1938 #ifdef CAAM_DESC_DEBUG
1940 for (i = 0; i < bufsize; i++)
1941 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1946 rte_free(session->cipher_key.data);
1952 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1953 struct rte_crypto_sym_xform *xform,
1954 dpaa2_sec_session *session)
1956 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1957 struct alginfo authdata;
1958 int bufsize, ret = 0;
1959 struct ctxt_priv *priv;
1960 struct sec_flow_context *flc;
1962 PMD_INIT_FUNC_TRACE();
1964 /* For SEC AUTH three descriptors are required for various stages */
1965 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1966 sizeof(struct ctxt_priv) + 3 *
1967 sizeof(struct sec_flc_desc),
1968 RTE_CACHE_LINE_SIZE);
1970 DPAA2_SEC_ERR("No Memory for priv CTXT");
1974 priv->fle_pool = dev_priv->fle_pool;
1975 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1977 session->ctxt_type = DPAA2_SEC_AUTH;
1978 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1979 RTE_CACHE_LINE_SIZE);
1980 if (session->auth_key.data == NULL) {
1981 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1985 session->auth_key.length = xform->auth.key.length;
1987 memcpy(session->auth_key.data, xform->auth.key.data,
1988 xform->auth.key.length);
1989 authdata.key = (size_t)session->auth_key.data;
1990 authdata.keylen = session->auth_key.length;
1991 authdata.key_enc_flags = 0;
1992 authdata.key_type = RTA_DATA_IMM;
1994 session->digest_length = xform->auth.digest_length;
1995 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1998 switch (xform->auth.algo) {
1999 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2000 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2001 authdata.algmode = OP_ALG_AAI_HMAC;
2002 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2003 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2004 1, 0, SHR_NEVER, &authdata,
2006 session->digest_length);
2008 case RTE_CRYPTO_AUTH_MD5_HMAC:
2009 authdata.algtype = OP_ALG_ALGSEL_MD5;
2010 authdata.algmode = OP_ALG_AAI_HMAC;
2011 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2012 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2013 1, 0, SHR_NEVER, &authdata,
2015 session->digest_length);
2017 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2018 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2019 authdata.algmode = OP_ALG_AAI_HMAC;
2020 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2021 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2022 1, 0, SHR_NEVER, &authdata,
2024 session->digest_length);
2026 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2027 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2028 authdata.algmode = OP_ALG_AAI_HMAC;
2029 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2030 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2031 1, 0, SHR_NEVER, &authdata,
2033 session->digest_length);
2035 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2036 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2037 authdata.algmode = OP_ALG_AAI_HMAC;
2038 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2039 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2040 1, 0, SHR_NEVER, &authdata,
2042 session->digest_length);
2044 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2045 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2046 authdata.algmode = OP_ALG_AAI_HMAC;
2047 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2048 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2049 1, 0, SHR_NEVER, &authdata,
2051 session->digest_length);
2053 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2054 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2055 authdata.algmode = OP_ALG_AAI_F9;
2056 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2057 session->iv.offset = xform->auth.iv.offset;
2058 session->iv.length = xform->auth.iv.length;
2059 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2062 session->digest_length);
2064 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2065 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2066 authdata.algmode = OP_ALG_AAI_F9;
2067 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2068 session->iv.offset = xform->auth.iv.offset;
2069 session->iv.length = xform->auth.iv.length;
2070 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2073 session->digest_length);
2075 case RTE_CRYPTO_AUTH_KASUMI_F9:
2076 case RTE_CRYPTO_AUTH_NULL:
2077 case RTE_CRYPTO_AUTH_SHA1:
2078 case RTE_CRYPTO_AUTH_SHA256:
2079 case RTE_CRYPTO_AUTH_SHA512:
2080 case RTE_CRYPTO_AUTH_SHA224:
2081 case RTE_CRYPTO_AUTH_SHA384:
2082 case RTE_CRYPTO_AUTH_MD5:
2083 case RTE_CRYPTO_AUTH_AES_GMAC:
2084 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2085 case RTE_CRYPTO_AUTH_AES_CMAC:
2086 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2087 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2092 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2099 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2104 flc->word1_sdl = (uint8_t)bufsize;
2105 session->ctxt = priv;
2106 #ifdef CAAM_DESC_DEBUG
2108 for (i = 0; i < bufsize; i++)
2109 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2110 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2116 rte_free(session->auth_key.data);
2122 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2123 struct rte_crypto_sym_xform *xform,
2124 dpaa2_sec_session *session)
2126 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2127 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2128 struct alginfo aeaddata;
2130 struct ctxt_priv *priv;
2131 struct sec_flow_context *flc;
2132 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2135 PMD_INIT_FUNC_TRACE();
2137 /* Set IV parameters */
2138 session->iv.offset = aead_xform->iv.offset;
2139 session->iv.length = aead_xform->iv.length;
2140 session->ctxt_type = DPAA2_SEC_AEAD;
2142 /* For SEC AEAD only one descriptor is required */
2143 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2144 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2145 RTE_CACHE_LINE_SIZE);
2147 DPAA2_SEC_ERR("No Memory for priv CTXT");
2151 priv->fle_pool = dev_priv->fle_pool;
2152 flc = &priv->flc_desc[0].flc;
2154 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2155 RTE_CACHE_LINE_SIZE);
2156 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2157 DPAA2_SEC_ERR("No Memory for aead key");
2161 memcpy(session->aead_key.data, aead_xform->key.data,
2162 aead_xform->key.length);
2164 session->digest_length = aead_xform->digest_length;
2165 session->aead_key.length = aead_xform->key.length;
2166 ctxt->auth_only_len = aead_xform->aad_length;
2168 aeaddata.key = (size_t)session->aead_key.data;
2169 aeaddata.keylen = session->aead_key.length;
2170 aeaddata.key_enc_flags = 0;
2171 aeaddata.key_type = RTA_DATA_IMM;
2173 switch (aead_xform->algo) {
2174 case RTE_CRYPTO_AEAD_AES_GCM:
2175 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2176 aeaddata.algmode = OP_ALG_AAI_GCM;
2177 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2179 case RTE_CRYPTO_AEAD_AES_CCM:
2180 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2185 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2190 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2193 priv->flc_desc[0].desc[0] = aeaddata.keylen;
2194 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2196 (unsigned int *)priv->flc_desc[0].desc,
2197 &priv->flc_desc[0].desc[1], 1);
2200 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2204 if (priv->flc_desc[0].desc[1] & 1) {
2205 aeaddata.key_type = RTA_DATA_IMM;
2207 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2208 aeaddata.key_type = RTA_DATA_PTR;
2210 priv->flc_desc[0].desc[0] = 0;
2211 priv->flc_desc[0].desc[1] = 0;
2213 if (session->dir == DIR_ENC)
2214 bufsize = cnstr_shdsc_gcm_encap(
2215 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2216 &aeaddata, session->iv.length,
2217 session->digest_length);
2219 bufsize = cnstr_shdsc_gcm_decap(
2220 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2221 &aeaddata, session->iv.length,
2222 session->digest_length);
2224 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2229 flc->word1_sdl = (uint8_t)bufsize;
2230 session->ctxt = priv;
2231 #ifdef CAAM_DESC_DEBUG
2233 for (i = 0; i < bufsize; i++)
2234 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2235 i, priv->flc_desc[0].desc[i]);
2240 rte_free(session->aead_key.data);
2247 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2248 struct rte_crypto_sym_xform *xform,
2249 dpaa2_sec_session *session)
2251 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2252 struct alginfo authdata, cipherdata;
2254 struct ctxt_priv *priv;
2255 struct sec_flow_context *flc;
2256 struct rte_crypto_cipher_xform *cipher_xform;
2257 struct rte_crypto_auth_xform *auth_xform;
2260 PMD_INIT_FUNC_TRACE();
2262 if (session->ext_params.aead_ctxt.auth_cipher_text) {
2263 cipher_xform = &xform->cipher;
2264 auth_xform = &xform->next->auth;
2265 session->ctxt_type =
2266 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2267 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2269 cipher_xform = &xform->next->cipher;
2270 auth_xform = &xform->auth;
2271 session->ctxt_type =
2272 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2273 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2276 /* Set IV parameters */
2277 session->iv.offset = cipher_xform->iv.offset;
2278 session->iv.length = cipher_xform->iv.length;
2280 /* For SEC AEAD only one descriptor is required */
2281 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2282 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2283 RTE_CACHE_LINE_SIZE);
2285 DPAA2_SEC_ERR("No Memory for priv CTXT");
2289 priv->fle_pool = dev_priv->fle_pool;
2290 flc = &priv->flc_desc[0].flc;
2292 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2293 RTE_CACHE_LINE_SIZE);
2294 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2295 DPAA2_SEC_ERR("No Memory for cipher key");
2299 session->cipher_key.length = cipher_xform->key.length;
2300 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2301 RTE_CACHE_LINE_SIZE);
2302 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2303 DPAA2_SEC_ERR("No Memory for auth key");
2304 rte_free(session->cipher_key.data);
2308 session->auth_key.length = auth_xform->key.length;
2309 memcpy(session->cipher_key.data, cipher_xform->key.data,
2310 cipher_xform->key.length);
2311 memcpy(session->auth_key.data, auth_xform->key.data,
2312 auth_xform->key.length);
2314 authdata.key = (size_t)session->auth_key.data;
2315 authdata.keylen = session->auth_key.length;
2316 authdata.key_enc_flags = 0;
2317 authdata.key_type = RTA_DATA_IMM;
2319 session->digest_length = auth_xform->digest_length;
2321 switch (auth_xform->algo) {
2322 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2323 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2324 authdata.algmode = OP_ALG_AAI_HMAC;
2325 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2327 case RTE_CRYPTO_AUTH_MD5_HMAC:
2328 authdata.algtype = OP_ALG_ALGSEL_MD5;
2329 authdata.algmode = OP_ALG_AAI_HMAC;
2330 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2332 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2333 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2334 authdata.algmode = OP_ALG_AAI_HMAC;
2335 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2337 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2338 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2339 authdata.algmode = OP_ALG_AAI_HMAC;
2340 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2342 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2343 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2344 authdata.algmode = OP_ALG_AAI_HMAC;
2345 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2347 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2348 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2349 authdata.algmode = OP_ALG_AAI_HMAC;
2350 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2352 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2353 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2354 case RTE_CRYPTO_AUTH_NULL:
2355 case RTE_CRYPTO_AUTH_SHA1:
2356 case RTE_CRYPTO_AUTH_SHA256:
2357 case RTE_CRYPTO_AUTH_SHA512:
2358 case RTE_CRYPTO_AUTH_SHA224:
2359 case RTE_CRYPTO_AUTH_SHA384:
2360 case RTE_CRYPTO_AUTH_MD5:
2361 case RTE_CRYPTO_AUTH_AES_GMAC:
2362 case RTE_CRYPTO_AUTH_KASUMI_F9:
2363 case RTE_CRYPTO_AUTH_AES_CMAC:
2364 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2365 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2366 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2371 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2376 cipherdata.key = (size_t)session->cipher_key.data;
2377 cipherdata.keylen = session->cipher_key.length;
2378 cipherdata.key_enc_flags = 0;
2379 cipherdata.key_type = RTA_DATA_IMM;
2381 switch (cipher_xform->algo) {
2382 case RTE_CRYPTO_CIPHER_AES_CBC:
2383 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2384 cipherdata.algmode = OP_ALG_AAI_CBC;
2385 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2387 case RTE_CRYPTO_CIPHER_3DES_CBC:
2388 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2389 cipherdata.algmode = OP_ALG_AAI_CBC;
2390 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2392 case RTE_CRYPTO_CIPHER_AES_CTR:
2393 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2394 cipherdata.algmode = OP_ALG_AAI_CTR;
2395 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2397 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2398 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2399 case RTE_CRYPTO_CIPHER_NULL:
2400 case RTE_CRYPTO_CIPHER_3DES_ECB:
2401 case RTE_CRYPTO_CIPHER_AES_ECB:
2402 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2403 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2404 cipher_xform->algo);
2408 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2409 cipher_xform->algo);
2413 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2416 priv->flc_desc[0].desc[0] = cipherdata.keylen;
2417 priv->flc_desc[0].desc[1] = authdata.keylen;
2418 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2420 (unsigned int *)priv->flc_desc[0].desc,
2421 &priv->flc_desc[0].desc[2], 2);
2424 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2428 if (priv->flc_desc[0].desc[2] & 1) {
2429 cipherdata.key_type = RTA_DATA_IMM;
2431 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2432 cipherdata.key_type = RTA_DATA_PTR;
2434 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2435 authdata.key_type = RTA_DATA_IMM;
2437 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2438 authdata.key_type = RTA_DATA_PTR;
2440 priv->flc_desc[0].desc[0] = 0;
2441 priv->flc_desc[0].desc[1] = 0;
2442 priv->flc_desc[0].desc[2] = 0;
2444 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2445 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2447 &cipherdata, &authdata,
2449 session->digest_length,
2452 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2457 DPAA2_SEC_ERR("Hash before cipher not supported");
2462 flc->word1_sdl = (uint8_t)bufsize;
2463 session->ctxt = priv;
2464 #ifdef CAAM_DESC_DEBUG
2466 for (i = 0; i < bufsize; i++)
2467 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2468 i, priv->flc_desc[0].desc[i]);
2474 rte_free(session->cipher_key.data);
2475 rte_free(session->auth_key.data);
2481 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2482 struct rte_crypto_sym_xform *xform, void *sess)
2484 dpaa2_sec_session *session = sess;
2487 PMD_INIT_FUNC_TRACE();
2489 if (unlikely(sess == NULL)) {
2490 DPAA2_SEC_ERR("Invalid session struct");
2494 memset(session, 0, sizeof(dpaa2_sec_session));
2495 /* Default IV length = 0 */
2496 session->iv.length = 0;
2499 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2500 ret = dpaa2_sec_cipher_init(dev, xform, session);
2502 /* Authentication Only */
2503 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2504 xform->next == NULL) {
2505 ret = dpaa2_sec_auth_init(dev, xform, session);
2507 /* Cipher then Authenticate */
2508 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2509 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2510 session->ext_params.aead_ctxt.auth_cipher_text = true;
2511 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2512 ret = dpaa2_sec_auth_init(dev, xform, session);
2513 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2514 ret = dpaa2_sec_cipher_init(dev, xform, session);
2516 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2517 /* Authenticate then Cipher */
2518 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2519 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2520 session->ext_params.aead_ctxt.auth_cipher_text = false;
2521 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2522 ret = dpaa2_sec_cipher_init(dev, xform, session);
2523 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2524 ret = dpaa2_sec_auth_init(dev, xform, session);
2526 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2527 /* AEAD operation for AES-GCM kind of Algorithms */
2528 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2529 xform->next == NULL) {
2530 ret = dpaa2_sec_aead_init(dev, xform, session);
2533 DPAA2_SEC_ERR("Invalid crypto type");
2540 #ifdef RTE_LIBRTE_SECURITY
2542 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2543 dpaa2_sec_session *session,
2544 struct alginfo *aeaddata)
2546 PMD_INIT_FUNC_TRACE();
2548 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2549 RTE_CACHE_LINE_SIZE);
2550 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2551 DPAA2_SEC_ERR("No Memory for aead key");
2554 memcpy(session->aead_key.data, aead_xform->key.data,
2555 aead_xform->key.length);
2557 session->digest_length = aead_xform->digest_length;
2558 session->aead_key.length = aead_xform->key.length;
2560 aeaddata->key = (size_t)session->aead_key.data;
2561 aeaddata->keylen = session->aead_key.length;
2562 aeaddata->key_enc_flags = 0;
2563 aeaddata->key_type = RTA_DATA_IMM;
2565 switch (aead_xform->algo) {
2566 case RTE_CRYPTO_AEAD_AES_GCM:
2567 switch (session->digest_length) {
2569 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2572 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2575 aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2578 DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2579 session->digest_length);
2582 aeaddata->algmode = OP_ALG_AAI_GCM;
2583 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2585 case RTE_CRYPTO_AEAD_AES_CCM:
2586 switch (session->digest_length) {
2588 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2591 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2594 aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2597 DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2598 session->digest_length);
2601 aeaddata->algmode = OP_ALG_AAI_CCM;
2602 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2605 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2609 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2616 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2617 struct rte_crypto_auth_xform *auth_xform,
2618 dpaa2_sec_session *session,
2619 struct alginfo *cipherdata,
2620 struct alginfo *authdata)
2623 session->cipher_key.data = rte_zmalloc(NULL,
2624 cipher_xform->key.length,
2625 RTE_CACHE_LINE_SIZE);
2626 if (session->cipher_key.data == NULL &&
2627 cipher_xform->key.length > 0) {
2628 DPAA2_SEC_ERR("No Memory for cipher key");
2632 session->cipher_key.length = cipher_xform->key.length;
2633 memcpy(session->cipher_key.data, cipher_xform->key.data,
2634 cipher_xform->key.length);
2635 session->cipher_alg = cipher_xform->algo;
2637 session->cipher_key.data = NULL;
2638 session->cipher_key.length = 0;
2639 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2643 session->auth_key.data = rte_zmalloc(NULL,
2644 auth_xform->key.length,
2645 RTE_CACHE_LINE_SIZE);
2646 if (session->auth_key.data == NULL &&
2647 auth_xform->key.length > 0) {
2648 DPAA2_SEC_ERR("No Memory for auth key");
2651 session->auth_key.length = auth_xform->key.length;
2652 memcpy(session->auth_key.data, auth_xform->key.data,
2653 auth_xform->key.length);
2654 session->auth_alg = auth_xform->algo;
2655 session->digest_length = auth_xform->digest_length;
2657 session->auth_key.data = NULL;
2658 session->auth_key.length = 0;
2659 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2662 authdata->key = (size_t)session->auth_key.data;
2663 authdata->keylen = session->auth_key.length;
2664 authdata->key_enc_flags = 0;
2665 authdata->key_type = RTA_DATA_IMM;
2666 switch (session->auth_alg) {
2667 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2668 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2669 authdata->algmode = OP_ALG_AAI_HMAC;
2671 case RTE_CRYPTO_AUTH_MD5_HMAC:
2672 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2673 authdata->algmode = OP_ALG_AAI_HMAC;
2675 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2676 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2677 authdata->algmode = OP_ALG_AAI_HMAC;
2678 if (session->digest_length != 16)
2680 "+++Using sha256-hmac truncated len is non-standard,"
2681 "it will not work with lookaside proto");
2683 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2684 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2685 authdata->algmode = OP_ALG_AAI_HMAC;
2687 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2688 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2689 authdata->algmode = OP_ALG_AAI_HMAC;
2691 case RTE_CRYPTO_AUTH_AES_CMAC:
2692 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2694 case RTE_CRYPTO_AUTH_NULL:
2695 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2697 case RTE_CRYPTO_AUTH_SHA224_HMAC:
2698 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2699 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2700 case RTE_CRYPTO_AUTH_SHA1:
2701 case RTE_CRYPTO_AUTH_SHA256:
2702 case RTE_CRYPTO_AUTH_SHA512:
2703 case RTE_CRYPTO_AUTH_SHA224:
2704 case RTE_CRYPTO_AUTH_SHA384:
2705 case RTE_CRYPTO_AUTH_MD5:
2706 case RTE_CRYPTO_AUTH_AES_GMAC:
2707 case RTE_CRYPTO_AUTH_KASUMI_F9:
2708 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2709 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2710 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2714 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2718 cipherdata->key = (size_t)session->cipher_key.data;
2719 cipherdata->keylen = session->cipher_key.length;
2720 cipherdata->key_enc_flags = 0;
2721 cipherdata->key_type = RTA_DATA_IMM;
2723 switch (session->cipher_alg) {
2724 case RTE_CRYPTO_CIPHER_AES_CBC:
2725 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2726 cipherdata->algmode = OP_ALG_AAI_CBC;
2728 case RTE_CRYPTO_CIPHER_3DES_CBC:
2729 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2730 cipherdata->algmode = OP_ALG_AAI_CBC;
2732 case RTE_CRYPTO_CIPHER_AES_CTR:
2733 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2734 cipherdata->algmode = OP_ALG_AAI_CTR;
2736 case RTE_CRYPTO_CIPHER_NULL:
2737 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2739 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2740 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2741 case RTE_CRYPTO_CIPHER_3DES_ECB:
2742 case RTE_CRYPTO_CIPHER_AES_ECB:
2743 case RTE_CRYPTO_CIPHER_KASUMI_F8:
2744 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2745 session->cipher_alg);
2748 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2749 session->cipher_alg);
2756 #ifdef RTE_LIBRTE_SECURITY_TEST
2757 static uint8_t aes_cbc_iv[] = {
2758 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2759 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2763 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2764 struct rte_security_session_conf *conf,
2767 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2768 struct rte_crypto_cipher_xform *cipher_xform = NULL;
2769 struct rte_crypto_auth_xform *auth_xform = NULL;
2770 struct rte_crypto_aead_xform *aead_xform = NULL;
2771 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2772 struct ctxt_priv *priv;
2773 struct alginfo authdata, cipherdata;
2775 struct sec_flow_context *flc;
2776 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2779 PMD_INIT_FUNC_TRACE();
2781 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2782 sizeof(struct ctxt_priv) +
2783 sizeof(struct sec_flc_desc),
2784 RTE_CACHE_LINE_SIZE);
2787 DPAA2_SEC_ERR("No memory for priv CTXT");
2791 priv->fle_pool = dev_priv->fle_pool;
2792 flc = &priv->flc_desc[0].flc;
2794 memset(session, 0, sizeof(dpaa2_sec_session));
2796 if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2797 cipher_xform = &conf->crypto_xform->cipher;
2798 if (conf->crypto_xform->next)
2799 auth_xform = &conf->crypto_xform->next->auth;
2800 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2801 session, &cipherdata, &authdata);
2802 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2803 auth_xform = &conf->crypto_xform->auth;
2804 if (conf->crypto_xform->next)
2805 cipher_xform = &conf->crypto_xform->next->cipher;
2806 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2807 session, &cipherdata, &authdata);
2808 } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2809 aead_xform = &conf->crypto_xform->aead;
2810 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2811 session, &cipherdata);
2812 authdata.keylen = 0;
2813 authdata.algtype = 0;
2815 DPAA2_SEC_ERR("XFORM not specified");
2820 DPAA2_SEC_ERR("Failed to process xform");
2824 session->ctxt_type = DPAA2_SEC_IPSEC;
2825 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2826 uint8_t *hdr = NULL;
2828 struct rte_ipv6_hdr ip6_hdr;
2829 struct ipsec_encap_pdb encap_pdb;
2831 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2832 /* For Sec Proto only one descriptor is required. */
2833 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2835 /* copy algo specific data to PDB */
2836 switch (cipherdata.algtype) {
2837 case OP_PCL_IPSEC_AES_CTR:
2838 encap_pdb.ctr.ctr_initial = 0x00000001;
2839 encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2841 case OP_PCL_IPSEC_AES_GCM8:
2842 case OP_PCL_IPSEC_AES_GCM12:
2843 case OP_PCL_IPSEC_AES_GCM16:
2844 memcpy(encap_pdb.gcm.salt,
2845 (uint8_t *)&(ipsec_xform->salt), 4);
2849 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2850 PDBOPTS_ESP_OIHI_PDB_INL |
2852 PDBHMO_ESP_ENCAP_DTTL |
2854 if (ipsec_xform->options.esn)
2855 encap_pdb.options |= PDBOPTS_ESP_ESN;
2856 encap_pdb.spi = ipsec_xform->spi;
2857 session->dir = DIR_ENC;
2858 if (ipsec_xform->tunnel.type ==
2859 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2860 encap_pdb.ip_hdr_len = sizeof(struct ip);
2861 ip4_hdr.ip_v = IPVERSION;
2863 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2864 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2867 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2868 ip4_hdr.ip_p = IPPROTO_ESP;
2870 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2871 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2872 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2873 &ip4_hdr, sizeof(struct ip));
2874 hdr = (uint8_t *)&ip4_hdr;
2875 } else if (ipsec_xform->tunnel.type ==
2876 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2877 ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2878 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2879 ((ipsec_xform->tunnel.ipv6.dscp <<
2880 RTE_IPV6_HDR_TC_SHIFT) &
2881 RTE_IPV6_HDR_TC_MASK) |
2882 ((ipsec_xform->tunnel.ipv6.flabel <<
2883 RTE_IPV6_HDR_FL_SHIFT) &
2884 RTE_IPV6_HDR_FL_MASK));
2885 /* Payload length will be updated by HW */
2886 ip6_hdr.payload_len = 0;
2887 ip6_hdr.hop_limits =
2888 ipsec_xform->tunnel.ipv6.hlimit;
2889 ip6_hdr.proto = (ipsec_xform->proto ==
2890 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2891 IPPROTO_ESP : IPPROTO_AH;
2892 memcpy(&ip6_hdr.src_addr,
2893 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2894 memcpy(&ip6_hdr.dst_addr,
2895 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2896 encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2897 hdr = (uint8_t *)&ip6_hdr;
2900 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2901 1, 0, SHR_SERIAL, &encap_pdb,
2902 hdr, &cipherdata, &authdata);
2903 } else if (ipsec_xform->direction ==
2904 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2905 struct ipsec_decap_pdb decap_pdb;
2907 flc->dhr = SEC_FLC_DHR_INBOUND;
2908 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2909 /* copy algo specific data to PDB */
2910 switch (cipherdata.algtype) {
2911 case OP_PCL_IPSEC_AES_CTR:
2912 decap_pdb.ctr.ctr_initial = 0x00000001;
2913 decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2915 case OP_PCL_IPSEC_AES_GCM8:
2916 case OP_PCL_IPSEC_AES_GCM12:
2917 case OP_PCL_IPSEC_AES_GCM16:
2918 memcpy(decap_pdb.gcm.salt,
2919 (uint8_t *)&(ipsec_xform->salt), 4);
2923 decap_pdb.options = (ipsec_xform->tunnel.type ==
2924 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2925 sizeof(struct ip) << 16 :
2926 sizeof(struct rte_ipv6_hdr) << 16;
2927 if (ipsec_xform->options.esn)
2928 decap_pdb.options |= PDBOPTS_ESP_ESN;
2930 if (ipsec_xform->replay_win_sz) {
2932 win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2941 decap_pdb.options |= PDBOPTS_ESP_ARS32;
2944 decap_pdb.options |= PDBOPTS_ESP_ARS64;
2947 decap_pdb.options |= PDBOPTS_ESP_ARS128;
2950 session->dir = DIR_DEC;
2951 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2953 &decap_pdb, &cipherdata, &authdata);
2958 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2962 flc->word1_sdl = (uint8_t)bufsize;
2964 /* Enable the stashing control bit */
2965 DPAA2_SET_FLC_RSC(flc);
2966 flc->word2_rflc_31_0 = lower_32_bits(
2967 (size_t)&(((struct dpaa2_sec_qp *)
2968 dev->data->queue_pairs[0])->rx_vq) | 0x14);
2969 flc->word3_rflc_63_32 = upper_32_bits(
2970 (size_t)&(((struct dpaa2_sec_qp *)
2971 dev->data->queue_pairs[0])->rx_vq));
2973 /* Set EWS bit i.e. enable write-safe */
2974 DPAA2_SET_FLC_EWS(flc);
2975 /* Set BS = 1 i.e reuse input buffers as output buffers */
2976 DPAA2_SET_FLC_REUSE_BS(flc);
2977 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2978 DPAA2_SET_FLC_REUSE_FF(flc);
2980 session->ctxt = priv;
2984 rte_free(session->auth_key.data);
2985 rte_free(session->cipher_key.data);
2991 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2992 struct rte_security_session_conf *conf,
2995 struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2996 struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2997 struct rte_crypto_auth_xform *auth_xform = NULL;
2998 struct rte_crypto_cipher_xform *cipher_xform;
2999 dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3000 struct ctxt_priv *priv;
3001 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3002 struct alginfo authdata, cipherdata;
3003 struct alginfo *p_authdata = NULL;
3005 struct sec_flow_context *flc;
3006 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3012 PMD_INIT_FUNC_TRACE();
3014 memset(session, 0, sizeof(dpaa2_sec_session));
3016 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3017 sizeof(struct ctxt_priv) +
3018 sizeof(struct sec_flc_desc),
3019 RTE_CACHE_LINE_SIZE);
3022 DPAA2_SEC_ERR("No memory for priv CTXT");
3026 priv->fle_pool = dev_priv->fle_pool;
3027 flc = &priv->flc_desc[0].flc;
3029 /* find xfrm types */
3030 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
3031 cipher_xform = &xform->cipher;
3032 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
3033 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3034 session->ext_params.aead_ctxt.auth_cipher_text = true;
3035 cipher_xform = &xform->cipher;
3036 auth_xform = &xform->next->auth;
3037 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
3038 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3039 session->ext_params.aead_ctxt.auth_cipher_text = false;
3040 cipher_xform = &xform->next->cipher;
3041 auth_xform = &xform->auth;
3043 DPAA2_SEC_ERR("Invalid crypto type");
3047 session->ctxt_type = DPAA2_SEC_PDCP;
3049 session->cipher_key.data = rte_zmalloc(NULL,
3050 cipher_xform->key.length,
3051 RTE_CACHE_LINE_SIZE);
3052 if (session->cipher_key.data == NULL &&
3053 cipher_xform->key.length > 0) {
3054 DPAA2_SEC_ERR("No Memory for cipher key");
3058 session->cipher_key.length = cipher_xform->key.length;
3059 memcpy(session->cipher_key.data, cipher_xform->key.data,
3060 cipher_xform->key.length);
3062 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3064 session->cipher_alg = cipher_xform->algo;
3066 session->cipher_key.data = NULL;
3067 session->cipher_key.length = 0;
3068 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3069 session->dir = DIR_ENC;
3072 session->pdcp.domain = pdcp_xform->domain;
3073 session->pdcp.bearer = pdcp_xform->bearer;
3074 session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3075 session->pdcp.sn_size = pdcp_xform->sn_size;
3076 session->pdcp.hfn = pdcp_xform->hfn;
3077 session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3078 session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3079 /* hfv ovd offset location is stored in iv.offset value*/
3080 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3082 cipherdata.key = (size_t)session->cipher_key.data;
3083 cipherdata.keylen = session->cipher_key.length;
3084 cipherdata.key_enc_flags = 0;
3085 cipherdata.key_type = RTA_DATA_IMM;
3087 switch (session->cipher_alg) {
3088 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3089 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3091 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3092 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3094 case RTE_CRYPTO_CIPHER_AES_CTR:
3095 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3097 case RTE_CRYPTO_CIPHER_NULL:
3098 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3101 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3102 session->cipher_alg);
3107 session->auth_key.data = rte_zmalloc(NULL,
3108 auth_xform->key.length,
3109 RTE_CACHE_LINE_SIZE);
3110 if (!session->auth_key.data &&
3111 auth_xform->key.length > 0) {
3112 DPAA2_SEC_ERR("No Memory for auth key");
3113 rte_free(session->cipher_key.data);
3117 session->auth_key.length = auth_xform->key.length;
3118 memcpy(session->auth_key.data, auth_xform->key.data,
3119 auth_xform->key.length);
3120 session->auth_alg = auth_xform->algo;
3122 session->auth_key.data = NULL;
3123 session->auth_key.length = 0;
3124 session->auth_alg = 0;
3126 authdata.key = (size_t)session->auth_key.data;
3127 authdata.keylen = session->auth_key.length;
3128 authdata.key_enc_flags = 0;
3129 authdata.key_type = RTA_DATA_IMM;
3131 if (session->auth_alg) {
3132 switch (session->auth_alg) {
3133 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3134 authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3136 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3137 authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3139 case RTE_CRYPTO_AUTH_AES_CMAC:
3140 authdata.algtype = PDCP_AUTH_TYPE_AES;
3142 case RTE_CRYPTO_AUTH_NULL:
3143 authdata.algtype = PDCP_AUTH_TYPE_NULL;
3146 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3151 p_authdata = &authdata;
3152 } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3153 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3157 if (rta_inline_pdcp_query(authdata.algtype,
3159 session->pdcp.sn_size,
3160 session->pdcp.hfn_ovd)) {
3161 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3162 cipherdata.key_type = RTA_DATA_PTR;
3165 if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3166 if (session->dir == DIR_ENC)
3167 bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3168 priv->flc_desc[0].desc, 1, swap,
3170 session->pdcp.sn_size,
3172 pdcp_xform->pkt_dir,
3173 pdcp_xform->hfn_threshold,
3174 &cipherdata, &authdata,
3176 else if (session->dir == DIR_DEC)
3177 bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3178 priv->flc_desc[0].desc, 1, swap,
3180 session->pdcp.sn_size,
3182 pdcp_xform->pkt_dir,
3183 pdcp_xform->hfn_threshold,
3184 &cipherdata, &authdata,
3187 if (session->dir == DIR_ENC)
3188 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3189 priv->flc_desc[0].desc, 1, swap,
3190 session->pdcp.sn_size,
3193 pdcp_xform->pkt_dir,
3194 pdcp_xform->hfn_threshold,
3195 &cipherdata, p_authdata, 0);
3196 else if (session->dir == DIR_DEC)
3197 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3198 priv->flc_desc[0].desc, 1, swap,
3199 session->pdcp.sn_size,
3202 pdcp_xform->pkt_dir,
3203 pdcp_xform->hfn_threshold,
3204 &cipherdata, p_authdata, 0);
3208 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3212 /* Enable the stashing control bit */
3213 DPAA2_SET_FLC_RSC(flc);
3214 flc->word2_rflc_31_0 = lower_32_bits(
3215 (size_t)&(((struct dpaa2_sec_qp *)
3216 dev->data->queue_pairs[0])->rx_vq) | 0x14);
3217 flc->word3_rflc_63_32 = upper_32_bits(
3218 (size_t)&(((struct dpaa2_sec_qp *)
3219 dev->data->queue_pairs[0])->rx_vq));
3221 flc->word1_sdl = (uint8_t)bufsize;
3223 /* TODO - check the perf impact or
3224 * align as per descriptor type
3225 * Set EWS bit i.e. enable write-safe
3226 * DPAA2_SET_FLC_EWS(flc);
3229 /* Set BS = 1 i.e reuse input buffers as output buffers */
3230 DPAA2_SET_FLC_REUSE_BS(flc);
3231 /* Set FF = 10; reuse input buffers if they provide sufficient space */
3232 DPAA2_SET_FLC_REUSE_FF(flc);
3234 session->ctxt = priv;
3238 rte_free(session->auth_key.data);
3239 rte_free(session->cipher_key.data);
3245 dpaa2_sec_security_session_create(void *dev,
3246 struct rte_security_session_conf *conf,
3247 struct rte_security_session *sess,
3248 struct rte_mempool *mempool)
3250 void *sess_private_data;
3251 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3254 if (rte_mempool_get(mempool, &sess_private_data)) {
3255 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3259 switch (conf->protocol) {
3260 case RTE_SECURITY_PROTOCOL_IPSEC:
3261 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3264 case RTE_SECURITY_PROTOCOL_MACSEC:
3266 case RTE_SECURITY_PROTOCOL_PDCP:
3267 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3274 DPAA2_SEC_ERR("Failed to configure session parameters");
3275 /* Return session to mempool */
3276 rte_mempool_put(mempool, sess_private_data);
3280 set_sec_session_private_data(sess, sess_private_data);
3285 /** Clear the memory of session so it doesn't leave key material behind */
3287 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3288 struct rte_security_session *sess)
3290 PMD_INIT_FUNC_TRACE();
3291 void *sess_priv = get_sec_session_private_data(sess);
3293 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3296 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3299 rte_free(s->cipher_key.data);
3300 rte_free(s->auth_key.data);
3301 memset(s, 0, sizeof(dpaa2_sec_session));
3302 set_sec_session_private_data(sess, NULL);
3303 rte_mempool_put(sess_mp, sess_priv);
3309 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3310 struct rte_crypto_sym_xform *xform,
3311 struct rte_cryptodev_sym_session *sess,
3312 struct rte_mempool *mempool)
3314 void *sess_private_data;
3317 if (rte_mempool_get(mempool, &sess_private_data)) {
3318 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3322 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3324 DPAA2_SEC_ERR("Failed to configure session parameters");
3325 /* Return session to mempool */
3326 rte_mempool_put(mempool, sess_private_data);
3330 set_sym_session_private_data(sess, dev->driver_id,
3336 /** Clear the memory of session so it doesn't leave key material behind */
3338 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3339 struct rte_cryptodev_sym_session *sess)
3341 PMD_INIT_FUNC_TRACE();
3342 uint8_t index = dev->driver_id;
3343 void *sess_priv = get_sym_session_private_data(sess, index);
3344 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3348 rte_free(s->cipher_key.data);
3349 rte_free(s->auth_key.data);
3350 memset(s, 0, sizeof(dpaa2_sec_session));
3351 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3352 set_sym_session_private_data(sess, index, NULL);
3353 rte_mempool_put(sess_mp, sess_priv);
3358 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3359 struct rte_cryptodev_config *config __rte_unused)
3361 PMD_INIT_FUNC_TRACE();
3367 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3369 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3370 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3371 struct dpseci_attr attr;
3372 struct dpaa2_queue *dpaa2_q;
3373 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3374 dev->data->queue_pairs;
3375 struct dpseci_rx_queue_attr rx_attr;
3376 struct dpseci_tx_queue_attr tx_attr;
3379 PMD_INIT_FUNC_TRACE();
3381 memset(&attr, 0, sizeof(struct dpseci_attr));
3383 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3385 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3387 goto get_attr_failure;
3389 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3391 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3392 goto get_attr_failure;
3394 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3395 dpaa2_q = &qp[i]->rx_vq;
3396 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3398 dpaa2_q->fqid = rx_attr.fqid;
3399 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3401 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3402 dpaa2_q = &qp[i]->tx_vq;
3403 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3405 dpaa2_q->fqid = tx_attr.fqid;
3406 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3411 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3416 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3418 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3419 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3422 PMD_INIT_FUNC_TRACE();
3424 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3426 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3431 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3433 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3439 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3441 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3442 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3445 PMD_INIT_FUNC_TRACE();
3447 /* Function is reverse of dpaa2_sec_dev_init.
3448 * It does the following:
3449 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3450 * 2. Close the DPSECI device
3451 * 3. Free the allocated resources.
3454 /*Close the device at underlying layer*/
3455 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3457 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3461 /*Free the allocated memory for ethernet private data and dpseci*/
3469 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3470 struct rte_cryptodev_info *info)
3472 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3474 PMD_INIT_FUNC_TRACE();
3476 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3477 info->feature_flags = dev->feature_flags;
3478 info->capabilities = dpaa2_sec_capabilities;
3479 /* No limit of number of sessions */
3480 info->sym.max_nb_sessions = 0;
3481 info->driver_id = cryptodev_driver_id;
3486 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3487 struct rte_cryptodev_stats *stats)
3489 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3490 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3491 struct dpseci_sec_counters counters = {0};
3492 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3493 dev->data->queue_pairs;
3496 PMD_INIT_FUNC_TRACE();
3497 if (stats == NULL) {
3498 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3501 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3502 if (qp[i] == NULL) {
3503 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3507 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3508 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3509 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3510 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3513 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3516 DPAA2_SEC_ERR("SEC counters failed");
3518 DPAA2_SEC_INFO("dpseci hardware stats:"
3519 "\n\tNum of Requests Dequeued = %" PRIu64
3520 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3521 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3522 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3523 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3524 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3525 "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3526 counters.dequeued_requests,
3527 counters.ob_enc_requests,
3528 counters.ib_dec_requests,
3529 counters.ob_enc_bytes,
3530 counters.ob_prot_bytes,
3531 counters.ib_dec_bytes,
3532 counters.ib_valid_bytes);
3537 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3540 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3541 (dev->data->queue_pairs);
3543 PMD_INIT_FUNC_TRACE();
3545 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3546 if (qp[i] == NULL) {
3547 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3550 qp[i]->tx_vq.rx_pkts = 0;
3551 qp[i]->tx_vq.tx_pkts = 0;
3552 qp[i]->tx_vq.err_pkts = 0;
3553 qp[i]->rx_vq.rx_pkts = 0;
3554 qp[i]->rx_vq.tx_pkts = 0;
3555 qp[i]->rx_vq.err_pkts = 0;
3559 static void __rte_hot
3560 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3561 const struct qbman_fd *fd,
3562 const struct qbman_result *dq,
3563 struct dpaa2_queue *rxq,
3564 struct rte_event *ev)
3566 /* Prefetching mbuf */
3567 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3568 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3570 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3571 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3573 ev->flow_id = rxq->ev.flow_id;
3574 ev->sub_event_type = rxq->ev.sub_event_type;
3575 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3576 ev->op = RTE_EVENT_OP_NEW;
3577 ev->sched_type = rxq->ev.sched_type;
3578 ev->queue_id = rxq->ev.queue_id;
3579 ev->priority = rxq->ev.priority;
3580 ev->event_ptr = sec_fd_to_mbuf(fd);
3582 qbman_swp_dqrr_consume(swp, dq);
3585 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3586 const struct qbman_fd *fd,
3587 const struct qbman_result *dq,
3588 struct dpaa2_queue *rxq,
3589 struct rte_event *ev)
3592 struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3593 /* Prefetching mbuf */
3594 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3595 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3597 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3598 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3600 ev->flow_id = rxq->ev.flow_id;
3601 ev->sub_event_type = rxq->ev.sub_event_type;
3602 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3603 ev->op = RTE_EVENT_OP_NEW;
3604 ev->sched_type = rxq->ev.sched_type;
3605 ev->queue_id = rxq->ev.queue_id;
3606 ev->priority = rxq->ev.priority;
3608 ev->event_ptr = sec_fd_to_mbuf(fd);
3609 dqrr_index = qbman_get_dqrr_idx(dq);
3610 crypto_op->sym->m_src->seqn = dqrr_index + 1;
3611 DPAA2_PER_LCORE_DQRR_SIZE++;
3612 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3613 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3617 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3619 struct dpaa2_dpcon_dev *dpcon,
3620 const struct rte_event *event)
3622 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3623 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3624 struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3625 struct dpseci_rx_queue_cfg cfg;
3629 if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3630 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3631 else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3632 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3636 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3637 (dpcon->num_priorities - 1);
3639 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3640 cfg.options = DPSECI_QUEUE_OPT_DEST;
3641 cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3642 cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3643 cfg.dest_cfg.priority = priority;
3645 cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3646 cfg.user_ctx = (size_t)(qp);
3647 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3648 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3649 cfg.order_preservation_en = 1;
3651 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3654 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3658 memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3664 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3667 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3668 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3669 struct dpseci_rx_queue_cfg cfg;
3672 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3673 cfg.options = DPSECI_QUEUE_OPT_DEST;
3674 cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3676 ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3679 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3684 static struct rte_cryptodev_ops crypto_ops = {
3685 .dev_configure = dpaa2_sec_dev_configure,
3686 .dev_start = dpaa2_sec_dev_start,
3687 .dev_stop = dpaa2_sec_dev_stop,
3688 .dev_close = dpaa2_sec_dev_close,
3689 .dev_infos_get = dpaa2_sec_dev_infos_get,
3690 .stats_get = dpaa2_sec_stats_get,
3691 .stats_reset = dpaa2_sec_stats_reset,
3692 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
3693 .queue_pair_release = dpaa2_sec_queue_pair_release,
3694 .sym_session_get_size = dpaa2_sec_sym_session_get_size,
3695 .sym_session_configure = dpaa2_sec_sym_session_configure,
3696 .sym_session_clear = dpaa2_sec_sym_session_clear,
3699 #ifdef RTE_LIBRTE_SECURITY
3700 static const struct rte_security_capability *
3701 dpaa2_sec_capabilities_get(void *device __rte_unused)
3703 return dpaa2_sec_security_cap;
3706 static const struct rte_security_ops dpaa2_sec_security_ops = {
3707 .session_create = dpaa2_sec_security_session_create,
3708 .session_update = NULL,
3709 .session_stats_get = NULL,
3710 .session_destroy = dpaa2_sec_security_session_destroy,
3711 .set_pkt_metadata = NULL,
3712 .capabilities_get = dpaa2_sec_capabilities_get
3717 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3719 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3721 rte_free(dev->security_ctx);
3723 rte_mempool_free(internals->fle_pool);
3725 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3726 dev->data->name, rte_socket_id());
3732 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3734 struct dpaa2_sec_dev_private *internals;
3735 struct rte_device *dev = cryptodev->device;
3736 struct rte_dpaa2_device *dpaa2_dev;
3737 #ifdef RTE_LIBRTE_SECURITY
3738 struct rte_security_ctx *security_instance;
3740 struct fsl_mc_io *dpseci;
3742 struct dpseci_attr attr;
3746 PMD_INIT_FUNC_TRACE();
3747 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3748 hw_id = dpaa2_dev->object_id;
3750 cryptodev->driver_id = cryptodev_driver_id;
3751 cryptodev->dev_ops = &crypto_ops;
3753 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3754 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3755 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3756 RTE_CRYPTODEV_FF_HW_ACCELERATED |
3757 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3758 RTE_CRYPTODEV_FF_SECURITY |
3759 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3760 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3761 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3762 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3763 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3765 internals = cryptodev->data->dev_private;
3768 * For secondary processes, we don't initialise any further as primary
3769 * has already done this work. Only check we don't need a different
3772 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3773 DPAA2_SEC_DEBUG("Device already init by primary process");
3776 #ifdef RTE_LIBRTE_SECURITY
3777 /* Initialize security_ctx only for primary process*/
3778 security_instance = rte_malloc("rte_security_instances_ops",
3779 sizeof(struct rte_security_ctx), 0);
3780 if (security_instance == NULL)
3782 security_instance->device = (void *)cryptodev;
3783 security_instance->ops = &dpaa2_sec_security_ops;
3784 security_instance->sess_cnt = 0;
3785 cryptodev->security_ctx = security_instance;
3787 /*Open the rte device via MC and save the handle for further use*/
3788 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3789 sizeof(struct fsl_mc_io), 0);
3792 "Error in allocating the memory for dpsec object");
3795 dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3797 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3799 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3803 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3806 "Cannot get dpsec device attributed: Error = %x",
3810 snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3813 internals->max_nb_queue_pairs = attr.num_tx_queues;
3814 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3815 internals->hw = dpseci;
3816 internals->token = token;
3818 snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3819 getpid(), cryptodev->data->dev_id);
3820 internals->fle_pool = rte_mempool_create((const char *)str,
3823 FLE_POOL_CACHE_SIZE, 0,
3824 NULL, NULL, NULL, NULL,
3826 if (!internals->fle_pool) {
3827 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3831 DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3835 DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3837 /* dpaa2_sec_uninit(crypto_dev_name); */
3842 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3843 struct rte_dpaa2_device *dpaa2_dev)
3845 struct rte_cryptodev *cryptodev;
3846 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3850 snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3851 dpaa2_dev->object_id);
3853 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3854 if (cryptodev == NULL)
3857 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3858 cryptodev->data->dev_private = rte_zmalloc_socket(
3859 "cryptodev private structure",
3860 sizeof(struct dpaa2_sec_dev_private),
3861 RTE_CACHE_LINE_SIZE,
3864 if (cryptodev->data->dev_private == NULL)
3865 rte_panic("Cannot allocate memzone for private "
3869 dpaa2_dev->cryptodev = cryptodev;
3870 cryptodev->device = &dpaa2_dev->device;
3872 /* init user callbacks */
3873 TAILQ_INIT(&(cryptodev->link_intr_cbs));
3875 if (dpaa2_svr_family == SVR_LX2160A)
3876 rta_set_sec_era(RTA_SEC_ERA_10);
3878 rta_set_sec_era(RTA_SEC_ERA_8);
3880 DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3882 /* Invoke PMD device initialization function */
3883 retval = dpaa2_sec_dev_init(cryptodev);
3887 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3888 rte_free(cryptodev->data->dev_private);
3890 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3896 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3898 struct rte_cryptodev *cryptodev;
3901 cryptodev = dpaa2_dev->cryptodev;
3902 if (cryptodev == NULL)
3905 ret = dpaa2_sec_uninit(cryptodev);
3909 return rte_cryptodev_pmd_destroy(cryptodev);
3912 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3913 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3914 .drv_type = DPAA2_CRYPTO,
3916 .name = "DPAA2 SEC PMD"
3918 .probe = cryptodev_dpaa2_sec_probe,
3919 .remove = cryptodev_dpaa2_sec_remove,
3922 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3924 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3925 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3926 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3927 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);