1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <cryptodev_pmd.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
14 #include <desc/algo.h>
16 struct dpaa2_sec_raw_dp_ctx {
17 dpaa2_sec_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 struct rte_crypto_sgl *sgl,
27 struct rte_crypto_sgl *dest_sgl,
28 struct rte_crypto_va_iova_ptr *iv,
29 struct rte_crypto_va_iova_ptr *digest,
30 struct rte_crypto_va_iova_ptr *auth_iv,
31 union rte_crypto_sym_ofs ofs,
35 RTE_SET_USED(auth_iv);
37 dpaa2_sec_session *sess =
38 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39 struct ctxt_priv *priv = sess->ctxt;
40 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41 struct sec_flow_context *flc;
42 int data_len = 0, auth_len = 0, cipher_len = 0;
44 uint16_t auth_hdr_len = ofs.ofs.cipher.head -
47 uint16_t auth_tail_len = ofs.ofs.auth.tail;
48 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
49 int icv_len = sess->digest_length;
51 uint8_t *iv_ptr = iv->va;
53 for (i = 0; i < sgl->num; i++)
54 data_len += sgl->vec[i].len;
56 cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58 /* first FLE entry used to store session ctxt */
59 fle = (struct qbman_fle *)rte_malloc(NULL,
60 FLE_SG_MEM_SIZE(2 * sgl->num),
63 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
66 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
67 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
68 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
74 /* Save the shared descriptor */
75 flc = &priv->flc_desc[0].flc;
77 /* Configure FD as a FRAME LIST */
78 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
79 DPAA2_SET_FD_COMPOUND_FMT(fd);
80 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
82 /* Configure Output FLE with Scatter/Gather Entry */
83 DPAA2_SET_FLE_SG_EXT(op_fle);
84 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
87 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
89 op_fle->length = (sess->dir == DIR_ENC) ?
90 (cipher_len + icv_len) :
95 /* Configure Output SGE for Encap/Decap */
96 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
97 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
98 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
101 for (i = 1; i < dest_sgl->num; i++) {
103 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
104 DPAA2_SET_FLE_OFFSET(sge, 0);
105 sge->length = dest_sgl->vec[i].len;
108 /* Configure Output SGE for Encap/Decap */
109 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
110 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
111 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
114 for (i = 1; i < sgl->num; i++) {
116 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117 DPAA2_SET_FLE_OFFSET(sge, 0);
118 sge->length = sgl->vec[i].len;
122 if (sess->dir == DIR_ENC) {
124 DPAA2_SET_FLE_ADDR(sge,
126 sge->length = icv_len;
128 DPAA2_SET_FLE_FIN(sge);
132 /* Configure Input FLE with Scatter/Gather Entry */
133 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134 DPAA2_SET_FLE_SG_EXT(ip_fle);
135 DPAA2_SET_FLE_FIN(ip_fle);
137 ip_fle->length = (sess->dir == DIR_ENC) ?
138 (auth_len + sess->iv.length) :
139 (auth_len + sess->iv.length +
142 /* Configure Input SGE for Encap/Decap */
143 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144 sge->length = sess->iv.length;
147 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
148 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
149 sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
151 for (i = 1; i < sgl->num; i++) {
153 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
154 DPAA2_SET_FLE_OFFSET(sge, 0);
155 sge->length = sgl->vec[i].len;
158 if (sess->dir == DIR_DEC) {
160 old_icv = (uint8_t *)(sge + 1);
161 memcpy(old_icv, digest->va,
163 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
164 sge->length = icv_len;
167 DPAA2_SET_FLE_FIN(sge);
169 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
170 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
172 DPAA2_SET_FD_LEN(fd, ip_fle->length);
178 build_raw_dp_aead_fd(uint8_t *drv_ctx,
179 struct rte_crypto_sgl *sgl,
180 struct rte_crypto_sgl *dest_sgl,
181 struct rte_crypto_va_iova_ptr *iv,
182 struct rte_crypto_va_iova_ptr *digest,
183 struct rte_crypto_va_iova_ptr *auth_iv,
184 union rte_crypto_sym_ofs ofs,
188 dpaa2_sec_session *sess =
189 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
190 struct ctxt_priv *priv = sess->ctxt;
191 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
192 struct sec_flow_context *flc;
193 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
194 int icv_len = sess->digest_length;
196 uint8_t *IV_ptr = iv->va;
198 int data_len = 0, aead_len = 0;
200 for (i = 0; i < sgl->num; i++)
201 data_len += sgl->vec[i].len;
203 aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
205 /* first FLE entry used to store mbuf and session ctxt */
206 fle = (struct qbman_fle *)rte_malloc(NULL,
207 FLE_SG_MEM_SIZE(2 * sgl->num),
208 RTE_CACHE_LINE_SIZE);
209 if (unlikely(!fle)) {
210 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
213 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
214 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
215 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
221 /* Save the shared descriptor */
222 flc = &priv->flc_desc[0].flc;
224 /* Configure FD as a FRAME LIST */
225 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
226 DPAA2_SET_FD_COMPOUND_FMT(fd);
227 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
229 /* Configure Output FLE with Scatter/Gather Entry */
230 DPAA2_SET_FLE_SG_EXT(op_fle);
231 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
234 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
236 op_fle->length = (sess->dir == DIR_ENC) ?
237 (aead_len + icv_len) :
242 /* Configure Output SGE for Encap/Decap */
243 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
244 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
245 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
248 for (i = 1; i < dest_sgl->num; i++) {
250 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
251 DPAA2_SET_FLE_OFFSET(sge, 0);
252 sge->length = dest_sgl->vec[i].len;
255 /* Configure Output SGE for Encap/Decap */
256 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
257 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
258 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
261 for (i = 1; i < sgl->num; i++) {
263 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
264 DPAA2_SET_FLE_OFFSET(sge, 0);
265 sge->length = sgl->vec[i].len;
269 if (sess->dir == DIR_ENC) {
271 DPAA2_SET_FLE_ADDR(sge, digest->iova);
272 sge->length = icv_len;
274 DPAA2_SET_FLE_FIN(sge);
278 /* Configure Input FLE with Scatter/Gather Entry */
279 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
280 DPAA2_SET_FLE_SG_EXT(ip_fle);
281 DPAA2_SET_FLE_FIN(ip_fle);
282 ip_fle->length = (sess->dir == DIR_ENC) ?
283 (aead_len + sess->iv.length + auth_only_len) :
284 (aead_len + sess->iv.length + auth_only_len +
287 /* Configure Input SGE for Encap/Decap */
288 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
289 sge->length = sess->iv.length;
293 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
294 sge->length = auth_only_len;
298 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
299 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
300 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
303 for (i = 1; i < sgl->num; i++) {
305 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
306 DPAA2_SET_FLE_OFFSET(sge, 0);
307 sge->length = sgl->vec[i].len;
310 if (sess->dir == DIR_DEC) {
312 old_icv = (uint8_t *)(sge + 1);
313 memcpy(old_icv, digest->va, icv_len);
314 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
315 sge->length = icv_len;
318 DPAA2_SET_FLE_FIN(sge);
320 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
321 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
323 DPAA2_SET_FD_LEN(fd, ip_fle->length);
329 build_raw_dp_auth_fd(uint8_t *drv_ctx,
330 struct rte_crypto_sgl *sgl,
331 struct rte_crypto_sgl *dest_sgl,
332 struct rte_crypto_va_iova_ptr *iv,
333 struct rte_crypto_va_iova_ptr *digest,
334 struct rte_crypto_va_iova_ptr *auth_iv,
335 union rte_crypto_sym_ofs ofs,
340 RTE_SET_USED(auth_iv);
341 RTE_SET_USED(dest_sgl);
343 dpaa2_sec_session *sess =
344 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
345 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
346 struct sec_flow_context *flc;
347 int total_len = 0, data_len = 0, data_offset;
349 struct ctxt_priv *priv = sess->ctxt;
352 for (i = 0; i < sgl->num; i++)
353 total_len += sgl->vec[i].len;
355 data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
356 data_offset = ofs.ofs.auth.head;
358 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
359 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
360 if ((data_len & 7) || (data_offset & 7)) {
361 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
365 data_len = data_len >> 3;
366 data_offset = data_offset >> 3;
368 fle = (struct qbman_fle *)rte_malloc(NULL,
369 FLE_SG_MEM_SIZE(2 * sgl->num),
370 RTE_CACHE_LINE_SIZE);
371 if (unlikely(!fle)) {
372 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
375 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
376 /* first FLE entry used to store mbuf and session ctxt */
377 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
378 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
383 flc = &priv->flc_desc[DESC_INITFINAL].flc;
386 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
387 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
388 DPAA2_SET_FD_COMPOUND_FMT(fd);
391 DPAA2_SET_FLE_ADDR(op_fle,
392 DPAA2_VADDR_TO_IOVA(digest->va));
393 op_fle->length = sess->digest_length;
396 DPAA2_SET_FLE_SG_EXT(ip_fle);
397 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
398 ip_fle->length = data_len;
400 if (sess->iv.length) {
403 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
406 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
407 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
409 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
410 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
413 sge->length = sess->iv.length;
415 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
416 ip_fle->length += sge->length;
420 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
421 DPAA2_SET_FLE_OFFSET(sge, data_offset);
423 if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
424 sge->length = data_len;
427 sge->length = sgl->vec[0].len - data_offset;
428 for (i = 1; i < sgl->num; i++) {
430 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
431 DPAA2_SET_FLE_OFFSET(sge, 0);
432 sge->length = sgl->vec[i].len;
435 if (sess->dir == DIR_DEC) {
436 /* Digest verification case */
438 old_digest = (uint8_t *)(sge + 1);
439 rte_memcpy(old_digest, digest->va,
440 sess->digest_length);
441 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
442 sge->length = sess->digest_length;
443 ip_fle->length += sess->digest_length;
445 DPAA2_SET_FLE_FIN(sge);
446 DPAA2_SET_FLE_FIN(ip_fle);
447 DPAA2_SET_FD_LEN(fd, ip_fle->length);
453 build_raw_dp_proto_fd(uint8_t *drv_ctx,
454 struct rte_crypto_sgl *sgl,
455 struct rte_crypto_sgl *dest_sgl,
456 struct rte_crypto_va_iova_ptr *iv,
457 struct rte_crypto_va_iova_ptr *digest,
458 struct rte_crypto_va_iova_ptr *auth_iv,
459 union rte_crypto_sym_ofs ofs,
464 RTE_SET_USED(digest);
465 RTE_SET_USED(auth_iv);
468 dpaa2_sec_session *sess =
469 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
470 struct ctxt_priv *priv = sess->ctxt;
471 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
472 struct sec_flow_context *flc;
473 uint32_t in_len = 0, out_len = 0, i;
475 /* first FLE entry used to store mbuf and session ctxt */
476 fle = (struct qbman_fle *)rte_malloc(NULL,
477 FLE_SG_MEM_SIZE(2 * sgl->num),
478 RTE_CACHE_LINE_SIZE);
479 if (unlikely(!fle)) {
480 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
483 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
484 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
485 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
487 /* Save the shared descriptor */
488 flc = &priv->flc_desc[0].flc;
493 DPAA2_SET_FD_IVP(fd);
494 DPAA2_SET_FLE_IVP(op_fle);
495 DPAA2_SET_FLE_IVP(ip_fle);
497 /* Configure FD as a FRAME LIST */
498 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
499 DPAA2_SET_FD_COMPOUND_FMT(fd);
500 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
502 /* Configure Output FLE with Scatter/Gather Entry */
503 DPAA2_SET_FLE_SG_EXT(op_fle);
504 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
508 /* Configure Output SGE for Encap/Decap */
509 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
510 DPAA2_SET_FLE_OFFSET(sge, 0);
511 sge->length = dest_sgl->vec[0].len;
512 out_len += sge->length;
514 for (i = 1; i < dest_sgl->num; i++) {
516 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
517 DPAA2_SET_FLE_OFFSET(sge, 0);
518 sge->length = dest_sgl->vec[i].len;
519 out_len += sge->length;
521 sge->length = dest_sgl->vec[i - 1].tot_len;
524 /* Configure Output SGE for Encap/Decap */
525 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
526 DPAA2_SET_FLE_OFFSET(sge, 0);
527 sge->length = sgl->vec[0].len;
528 out_len += sge->length;
530 for (i = 1; i < sgl->num; i++) {
532 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
533 DPAA2_SET_FLE_OFFSET(sge, 0);
534 sge->length = sgl->vec[i].len;
535 out_len += sge->length;
537 sge->length = sgl->vec[i - 1].tot_len;
539 out_len += sge->length;
541 DPAA2_SET_FLE_FIN(sge);
542 op_fle->length = out_len;
546 /* Configure Input FLE with Scatter/Gather Entry */
547 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
548 DPAA2_SET_FLE_SG_EXT(ip_fle);
549 DPAA2_SET_FLE_FIN(ip_fle);
551 /* Configure input SGE for Encap/Decap */
552 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
553 DPAA2_SET_FLE_OFFSET(sge, 0);
554 sge->length = sgl->vec[0].len;
555 in_len += sge->length;
557 for (i = 1; i < sgl->num; i++) {
559 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
560 DPAA2_SET_FLE_OFFSET(sge, 0);
561 sge->length = sgl->vec[i].len;
562 in_len += sge->length;
565 ip_fle->length = in_len;
566 DPAA2_SET_FLE_FIN(sge);
568 /* In case of PDCP, per packet HFN is stored in
569 * mbuf priv after sym_op.
571 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
572 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
573 sess->pdcp.hfn_ovd_offset);
574 /* enable HFN override */
575 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
576 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
577 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
579 DPAA2_SET_FD_LEN(fd, ip_fle->length);
585 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
586 struct rte_crypto_sgl *sgl,
587 struct rte_crypto_sgl *dest_sgl,
588 struct rte_crypto_va_iova_ptr *iv,
589 struct rte_crypto_va_iova_ptr *digest,
590 struct rte_crypto_va_iova_ptr *auth_iv,
591 union rte_crypto_sym_ofs ofs,
595 RTE_SET_USED(digest);
596 RTE_SET_USED(auth_iv);
598 dpaa2_sec_session *sess =
599 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
600 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
601 int total_len = 0, data_len = 0, data_offset;
602 struct sec_flow_context *flc;
603 struct ctxt_priv *priv = sess->ctxt;
606 for (i = 0; i < sgl->num; i++)
607 total_len += sgl->vec[i].len;
609 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
610 data_offset = ofs.ofs.cipher.head;
612 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
613 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
614 if ((data_len & 7) || (data_offset & 7)) {
615 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
619 data_len = data_len >> 3;
620 data_offset = data_offset >> 3;
623 /* first FLE entry used to store mbuf and session ctxt */
624 fle = (struct qbman_fle *)rte_malloc(NULL,
625 FLE_SG_MEM_SIZE(2*sgl->num),
626 RTE_CACHE_LINE_SIZE);
628 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
631 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
632 /* first FLE entry used to store userdata and session ctxt */
633 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
634 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
640 flc = &priv->flc_desc[0].flc;
643 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
649 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
650 op_fle->length = data_len;
651 DPAA2_SET_FLE_SG_EXT(op_fle);
656 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
657 DPAA2_SET_FLE_OFFSET(sge, data_offset);
658 sge->length = dest_sgl->vec[0].len - data_offset;
661 for (i = 1; i < dest_sgl->num; i++) {
663 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
664 DPAA2_SET_FLE_OFFSET(sge, 0);
665 sge->length = dest_sgl->vec[i].len;
669 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
670 DPAA2_SET_FLE_OFFSET(sge, data_offset);
671 sge->length = sgl->vec[0].len - data_offset;
674 for (i = 1; i < sgl->num; i++) {
676 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
677 DPAA2_SET_FLE_OFFSET(sge, 0);
678 sge->length = sgl->vec[i].len;
681 DPAA2_SET_FLE_FIN(sge);
684 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
685 flc, fle, fle->addr_hi, fle->addr_lo,
690 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
691 ip_fle->length = sess->iv.length + data_len;
692 DPAA2_SET_FLE_SG_EXT(ip_fle);
695 DPAA2_SET_FLE_ADDR(sge, iv->iova);
696 DPAA2_SET_FLE_OFFSET(sge, 0);
697 sge->length = sess->iv.length;
702 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
703 DPAA2_SET_FLE_OFFSET(sge, data_offset);
704 sge->length = sgl->vec[0].len - data_offset;
707 for (i = 1; i < sgl->num; i++) {
709 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
710 DPAA2_SET_FLE_OFFSET(sge, 0);
711 sge->length = sgl->vec[i].len;
713 DPAA2_SET_FLE_FIN(sge);
714 DPAA2_SET_FLE_FIN(ip_fle);
717 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
718 DPAA2_SET_FD_LEN(fd, ip_fle->length);
719 DPAA2_SET_FD_COMPOUND_FMT(fd);
720 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
723 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
724 DPAA2_GET_FD_ADDR(fd),
725 DPAA2_GET_FD_OFFSET(fd),
726 DPAA2_GET_FD_LEN(fd));
731 static __rte_always_inline uint32_t
732 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
733 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
734 void *user_data[], int *status)
736 RTE_SET_USED(user_data);
739 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
740 uint32_t frames_to_send, retry_count;
741 struct qbman_eq_desc eqdesc;
742 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
743 dpaa2_sec_session *sess =
744 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
745 struct qbman_swp *swp;
747 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
749 if (unlikely(vec->num == 0))
753 DPAA2_SEC_ERR("sessionless raw crypto not supported");
756 /*Prepare enqueue descriptor*/
757 qbman_eq_desc_clear(&eqdesc);
758 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
759 qbman_eq_desc_set_response(&eqdesc, 0, 0);
760 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
762 if (!DPAA2_PER_LCORE_DPIO) {
763 ret = dpaa2_affine_qbman_swp();
766 "Failed to allocate IO portal, tid: %d\n",
771 swp = DPAA2_PER_LCORE_PORTAL;
774 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
775 dpaa2_eqcr_size : vec->num;
777 for (loop = 0; loop < frames_to_send; loop++) {
778 /*Clear the unused FD fields before sending*/
779 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
780 ret = sess->build_raw_dp_fd(drv_ctx,
782 &vec->dest_sgl[loop],
790 DPAA2_SEC_ERR("error: Improper packet contents"
791 " for crypto operation");
799 while (loop < frames_to_send) {
800 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
803 frames_to_send - loop);
804 if (unlikely(ret < 0)) {
806 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
821 dpaa2_qp->tx_vq.tx_pkts += num_tx;
822 dpaa2_qp->tx_vq.err_pkts += vec->num;
827 static __rte_always_inline int
828 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
829 struct rte_crypto_vec *data_vec,
830 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
831 struct rte_crypto_va_iova_ptr *iv,
832 struct rte_crypto_va_iova_ptr *digest,
833 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
836 RTE_SET_USED(qp_data);
837 RTE_SET_USED(drv_ctx);
838 RTE_SET_USED(data_vec);
839 RTE_SET_USED(n_data_vecs);
842 RTE_SET_USED(digest);
843 RTE_SET_USED(aad_or_auth_iv);
844 RTE_SET_USED(user_data);
850 sec_fd_to_userdata(const struct qbman_fd *fd)
852 struct qbman_fle *fle;
854 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
856 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
857 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
858 userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
859 /* free the fle memory */
860 rte_free((void *)(fle-1));
865 static __rte_always_inline uint32_t
866 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
867 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
868 uint32_t max_nb_to_dequeue,
869 rte_cryptodev_raw_post_dequeue_t post_dequeue,
870 void **out_user_data, uint8_t is_user_data_array,
871 uint32_t *n_success, int *dequeue_status)
873 RTE_SET_USED(drv_ctx);
874 RTE_SET_USED(get_dequeue_count);
876 /* Function is responsible to receive frames for a given device and VQ*/
877 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
878 struct qbman_result *dq_storage;
879 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
881 uint8_t is_last = 0, status;
882 struct qbman_swp *swp;
883 const struct qbman_fd *fd;
884 struct qbman_pull_desc pulldesc;
886 uint32_t nb_ops = max_nb_to_dequeue;
888 if (!DPAA2_PER_LCORE_DPIO) {
889 ret = dpaa2_affine_qbman_swp();
892 "Failed to allocate IO portal, tid: %d\n",
897 swp = DPAA2_PER_LCORE_PORTAL;
898 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
900 qbman_pull_desc_clear(&pulldesc);
901 qbman_pull_desc_set_numframes(&pulldesc,
902 (nb_ops > dpaa2_dqrr_size) ?
903 dpaa2_dqrr_size : nb_ops);
904 qbman_pull_desc_set_fq(&pulldesc, fqid);
905 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
906 (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
909 /*Issue a volatile dequeue command. */
911 if (qbman_swp_pull(swp, &pulldesc)) {
913 "SEC VDQ command is not issued : QBMAN busy");
914 /* Portal was busy, try again */
920 /* Receive the packets till Last Dequeue entry is found with
921 * respect to the above issues PULL command.
924 /* Check if the previous issued command is completed.
925 * Also seems like the SWP is shared between the Ethernet Driver
926 * and the SEC driver.
928 while (!qbman_check_command_complete(dq_storage))
931 /* Loop until the dq_storage is updated with
934 while (!qbman_check_new_result(dq_storage))
936 /* Check whether Last Pull command is Expired and
937 * setting Condition for Loop termination
939 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
941 /* Check for valid frame. */
942 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
944 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
945 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
950 fd = qbman_result_DQ_fd(dq_storage);
951 user_data = sec_fd_to_userdata(fd);
952 if (is_user_data_array)
953 out_user_data[num_rx] = user_data;
955 out_user_data[0] = user_data;
956 if (unlikely(fd->simple.frc)) {
957 /* TODO Parse SEC errors */
958 DPAA2_SEC_ERR("SEC returned Error - %x",
960 status = RTE_CRYPTO_OP_STATUS_ERROR;
962 status = RTE_CRYPTO_OP_STATUS_SUCCESS;
964 post_dequeue(user_data, num_rx, status);
968 } /* End of Packet Rx loop */
970 dpaa2_qp->rx_vq.rx_pkts += num_rx;
974 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
975 /*Return the total number of packets received to DPAA2 app*/
979 static __rte_always_inline void *
980 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
981 enum rte_crypto_op_status *op_status)
983 RTE_SET_USED(qp_data);
984 RTE_SET_USED(drv_ctx);
985 RTE_SET_USED(dequeue_status);
986 RTE_SET_USED(op_status);
991 static __rte_always_inline int
992 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
994 RTE_SET_USED(qp_data);
995 RTE_SET_USED(drv_ctx);
1001 static __rte_always_inline int
1002 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1004 RTE_SET_USED(qp_data);
1005 RTE_SET_USED(drv_ctx);
1012 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
1013 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
1014 enum rte_crypto_op_sess_type sess_type,
1015 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1017 dpaa2_sec_session *sess;
1018 struct dpaa2_sec_raw_dp_ctx *dp_ctx;
1019 RTE_SET_USED(qp_id);
1022 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1023 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1026 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1027 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1028 session_ctx.sec_sess);
1029 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1030 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1031 session_ctx.crypto_sess, cryptodev_driver_id);
1034 raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
1035 raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
1036 raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
1037 raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
1038 raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
1039 raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
1041 if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
1042 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
1043 else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1044 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1045 else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1046 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1047 else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1048 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1049 else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1050 sess->ctxt_type == DPAA2_SEC_PDCP)
1051 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1054 dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1055 dp_ctx->session = sess;
1061 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1063 return sizeof(struct dpaa2_sec_raw_dp_ctx);