1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <cryptodev_pmd.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
14 #include <desc/algo.h>
16 struct dpaa2_sec_raw_dp_ctx {
17 dpaa2_sec_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 struct rte_crypto_sgl *sgl,
27 struct rte_crypto_sgl *dest_sgl,
28 struct rte_crypto_va_iova_ptr *iv,
29 struct rte_crypto_va_iova_ptr *digest,
30 struct rte_crypto_va_iova_ptr *auth_iv,
31 union rte_crypto_sym_ofs ofs,
35 RTE_SET_USED(auth_iv);
37 dpaa2_sec_session *sess =
38 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39 struct ctxt_priv *priv = sess->ctxt;
40 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41 struct sec_flow_context *flc;
42 int data_len = 0, auth_len = 0, cipher_len = 0;
44 uint16_t auth_hdr_len = ofs.ofs.cipher.head -
47 uint16_t auth_tail_len = ofs.ofs.auth.tail;
48 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
49 int icv_len = sess->digest_length;
51 uint8_t *iv_ptr = iv->va;
53 for (i = 0; i < sgl->num; i++)
54 data_len += sgl->vec[i].len;
56 cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58 /* first FLE entry used to store session ctxt */
59 fle = (struct qbman_fle *)rte_malloc(NULL,
60 FLE_SG_MEM_SIZE(2 * sgl->num),
63 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
66 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
67 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
68 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
74 /* Save the shared descriptor */
75 flc = &priv->flc_desc[0].flc;
77 /* Configure FD as a FRAME LIST */
78 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
79 DPAA2_SET_FD_COMPOUND_FMT(fd);
80 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
82 /* Configure Output FLE with Scatter/Gather Entry */
83 DPAA2_SET_FLE_SG_EXT(op_fle);
84 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
87 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
89 op_fle->length = (sess->dir == DIR_ENC) ?
90 (cipher_len + icv_len) :
95 /* Configure Output SGE for Encap/Decap */
96 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
97 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
98 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
101 for (i = 1; i < dest_sgl->num; i++) {
103 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
104 DPAA2_SET_FLE_OFFSET(sge, 0);
105 sge->length = dest_sgl->vec[i].len;
108 /* Configure Output SGE for Encap/Decap */
109 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
110 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
111 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
114 for (i = 1; i < sgl->num; i++) {
116 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117 DPAA2_SET_FLE_OFFSET(sge, 0);
118 sge->length = sgl->vec[i].len;
122 if (sess->dir == DIR_ENC) {
124 DPAA2_SET_FLE_ADDR(sge,
126 sge->length = icv_len;
128 DPAA2_SET_FLE_FIN(sge);
132 /* Configure Input FLE with Scatter/Gather Entry */
133 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134 DPAA2_SET_FLE_SG_EXT(ip_fle);
135 DPAA2_SET_FLE_FIN(ip_fle);
137 ip_fle->length = (sess->dir == DIR_ENC) ?
138 (auth_len + sess->iv.length) :
139 (auth_len + sess->iv.length +
142 /* Configure Input SGE for Encap/Decap */
143 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144 sge->length = sess->iv.length;
147 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
148 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
149 sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
151 for (i = 1; i < sgl->num; i++) {
153 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
154 DPAA2_SET_FLE_OFFSET(sge, 0);
155 sge->length = sgl->vec[i].len;
158 if (sess->dir == DIR_DEC) {
160 old_icv = (uint8_t *)(sge + 1);
161 memcpy(old_icv, digest->va,
163 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
164 sge->length = icv_len;
167 DPAA2_SET_FLE_FIN(sge);
169 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
170 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
172 DPAA2_SET_FD_LEN(fd, ip_fle->length);
178 build_raw_dp_aead_fd(uint8_t *drv_ctx,
179 struct rte_crypto_sgl *sgl,
180 struct rte_crypto_sgl *dest_sgl,
181 struct rte_crypto_va_iova_ptr *iv,
182 struct rte_crypto_va_iova_ptr *digest,
183 struct rte_crypto_va_iova_ptr *auth_iv,
184 union rte_crypto_sym_ofs ofs,
188 dpaa2_sec_session *sess =
189 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
190 struct ctxt_priv *priv = sess->ctxt;
191 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
192 struct sec_flow_context *flc;
193 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
194 int icv_len = sess->digest_length;
196 uint8_t *IV_ptr = iv->va;
198 int data_len = 0, aead_len = 0;
200 for (i = 0; i < sgl->num; i++)
201 data_len += sgl->vec[i].len;
203 aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
205 /* first FLE entry used to store mbuf and session ctxt */
206 fle = (struct qbman_fle *)rte_malloc(NULL,
207 FLE_SG_MEM_SIZE(2 * sgl->num),
208 RTE_CACHE_LINE_SIZE);
209 if (unlikely(!fle)) {
210 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
213 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
214 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
215 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
221 /* Save the shared descriptor */
222 flc = &priv->flc_desc[0].flc;
224 /* Configure FD as a FRAME LIST */
225 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
226 DPAA2_SET_FD_COMPOUND_FMT(fd);
227 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
229 /* Configure Output FLE with Scatter/Gather Entry */
230 DPAA2_SET_FLE_SG_EXT(op_fle);
231 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
234 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
236 op_fle->length = (sess->dir == DIR_ENC) ?
237 (aead_len + icv_len) :
242 /* Configure Output SGE for Encap/Decap */
243 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
244 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
245 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
248 for (i = 1; i < dest_sgl->num; i++) {
250 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
251 DPAA2_SET_FLE_OFFSET(sge, 0);
252 sge->length = dest_sgl->vec[i].len;
255 /* Configure Output SGE for Encap/Decap */
256 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
257 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
258 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
261 for (i = 1; i < sgl->num; i++) {
263 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
264 DPAA2_SET_FLE_OFFSET(sge, 0);
265 sge->length = sgl->vec[i].len;
269 if (sess->dir == DIR_ENC) {
271 DPAA2_SET_FLE_ADDR(sge, digest->iova);
272 sge->length = icv_len;
274 DPAA2_SET_FLE_FIN(sge);
278 /* Configure Input FLE with Scatter/Gather Entry */
279 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
280 DPAA2_SET_FLE_SG_EXT(ip_fle);
281 DPAA2_SET_FLE_FIN(ip_fle);
282 ip_fle->length = (sess->dir == DIR_ENC) ?
283 (aead_len + sess->iv.length + auth_only_len) :
284 (aead_len + sess->iv.length + auth_only_len +
287 /* Configure Input SGE for Encap/Decap */
288 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
289 sge->length = sess->iv.length;
293 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
294 sge->length = auth_only_len;
298 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
299 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
300 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
303 for (i = 1; i < sgl->num; i++) {
305 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
306 DPAA2_SET_FLE_OFFSET(sge, 0);
307 sge->length = sgl->vec[i].len;
310 if (sess->dir == DIR_DEC) {
312 old_icv = (uint8_t *)(sge + 1);
313 memcpy(old_icv, digest->va, icv_len);
314 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
315 sge->length = icv_len;
318 DPAA2_SET_FLE_FIN(sge);
320 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
321 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
323 DPAA2_SET_FD_LEN(fd, ip_fle->length);
329 build_raw_dp_auth_fd(uint8_t *drv_ctx,
330 struct rte_crypto_sgl *sgl,
331 struct rte_crypto_sgl *dest_sgl,
332 struct rte_crypto_va_iova_ptr *iv,
333 struct rte_crypto_va_iova_ptr *digest,
334 struct rte_crypto_va_iova_ptr *auth_iv,
335 union rte_crypto_sym_ofs ofs,
340 RTE_SET_USED(auth_iv);
341 RTE_SET_USED(dest_sgl);
343 dpaa2_sec_session *sess =
344 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
345 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
346 struct sec_flow_context *flc;
347 int total_len = 0, data_len = 0, data_offset;
349 struct ctxt_priv *priv = sess->ctxt;
352 for (i = 0; i < sgl->num; i++)
353 total_len += sgl->vec[i].len;
355 data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
356 data_offset = ofs.ofs.auth.head;
358 /* For SNOW3G and ZUC, lengths in bits only supported */
359 fle = (struct qbman_fle *)rte_malloc(NULL,
360 FLE_SG_MEM_SIZE(2 * sgl->num),
361 RTE_CACHE_LINE_SIZE);
362 if (unlikely(!fle)) {
363 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
366 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
367 /* first FLE entry used to store mbuf and session ctxt */
368 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
369 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
374 flc = &priv->flc_desc[DESC_INITFINAL].flc;
377 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
378 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
379 DPAA2_SET_FD_COMPOUND_FMT(fd);
382 DPAA2_SET_FLE_ADDR(op_fle,
383 DPAA2_VADDR_TO_IOVA(digest->va));
384 op_fle->length = sess->digest_length;
387 DPAA2_SET_FLE_SG_EXT(ip_fle);
388 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
389 ip_fle->length = data_len;
391 if (sess->iv.length) {
394 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
397 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
398 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
400 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
401 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
404 sge->length = sess->iv.length;
406 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
407 ip_fle->length += sge->length;
411 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
412 DPAA2_SET_FLE_OFFSET(sge, data_offset);
414 if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
415 sge->length = data_len;
418 sge->length = sgl->vec[0].len - data_offset;
419 for (i = 1; i < sgl->num; i++) {
421 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
422 DPAA2_SET_FLE_OFFSET(sge, 0);
423 sge->length = sgl->vec[i].len;
426 if (sess->dir == DIR_DEC) {
427 /* Digest verification case */
429 old_digest = (uint8_t *)(sge + 1);
430 rte_memcpy(old_digest, digest->va,
431 sess->digest_length);
432 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
433 sge->length = sess->digest_length;
434 ip_fle->length += sess->digest_length;
436 DPAA2_SET_FLE_FIN(sge);
437 DPAA2_SET_FLE_FIN(ip_fle);
438 DPAA2_SET_FD_LEN(fd, ip_fle->length);
444 build_raw_dp_proto_fd(uint8_t *drv_ctx,
445 struct rte_crypto_sgl *sgl,
446 struct rte_crypto_sgl *dest_sgl,
447 struct rte_crypto_va_iova_ptr *iv,
448 struct rte_crypto_va_iova_ptr *digest,
449 struct rte_crypto_va_iova_ptr *auth_iv,
450 union rte_crypto_sym_ofs ofs,
455 RTE_SET_USED(digest);
456 RTE_SET_USED(auth_iv);
459 dpaa2_sec_session *sess =
460 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
461 struct ctxt_priv *priv = sess->ctxt;
462 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
463 struct sec_flow_context *flc;
464 uint32_t in_len = 0, out_len = 0, i;
466 /* first FLE entry used to store mbuf and session ctxt */
467 fle = (struct qbman_fle *)rte_malloc(NULL,
468 FLE_SG_MEM_SIZE(2 * sgl->num),
469 RTE_CACHE_LINE_SIZE);
470 if (unlikely(!fle)) {
471 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
474 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
475 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
476 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
478 /* Save the shared descriptor */
479 flc = &priv->flc_desc[0].flc;
484 DPAA2_SET_FD_IVP(fd);
485 DPAA2_SET_FLE_IVP(op_fle);
486 DPAA2_SET_FLE_IVP(ip_fle);
488 /* Configure FD as a FRAME LIST */
489 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
490 DPAA2_SET_FD_COMPOUND_FMT(fd);
491 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
493 /* Configure Output FLE with Scatter/Gather Entry */
494 DPAA2_SET_FLE_SG_EXT(op_fle);
495 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
499 /* Configure Output SGE for Encap/Decap */
500 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
501 DPAA2_SET_FLE_OFFSET(sge, 0);
502 sge->length = dest_sgl->vec[0].len;
503 out_len += sge->length;
505 for (i = 1; i < dest_sgl->num; i++) {
507 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
508 DPAA2_SET_FLE_OFFSET(sge, 0);
509 sge->length = dest_sgl->vec[i].len;
510 out_len += sge->length;
512 sge->length = dest_sgl->vec[i - 1].tot_len;
515 /* Configure Output SGE for Encap/Decap */
516 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
517 DPAA2_SET_FLE_OFFSET(sge, 0);
518 sge->length = sgl->vec[0].len;
519 out_len += sge->length;
521 for (i = 1; i < sgl->num; i++) {
523 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
524 DPAA2_SET_FLE_OFFSET(sge, 0);
525 sge->length = sgl->vec[i].len;
526 out_len += sge->length;
528 sge->length = sgl->vec[i - 1].tot_len;
530 out_len += sge->length;
532 DPAA2_SET_FLE_FIN(sge);
533 op_fle->length = out_len;
537 /* Configure Input FLE with Scatter/Gather Entry */
538 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
539 DPAA2_SET_FLE_SG_EXT(ip_fle);
540 DPAA2_SET_FLE_FIN(ip_fle);
542 /* Configure input SGE for Encap/Decap */
543 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
544 DPAA2_SET_FLE_OFFSET(sge, 0);
545 sge->length = sgl->vec[0].len;
546 in_len += sge->length;
548 for (i = 1; i < sgl->num; i++) {
550 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
551 DPAA2_SET_FLE_OFFSET(sge, 0);
552 sge->length = sgl->vec[i].len;
553 in_len += sge->length;
556 ip_fle->length = in_len;
557 DPAA2_SET_FLE_FIN(sge);
559 /* In case of PDCP, per packet HFN is stored in
560 * mbuf priv after sym_op.
562 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
563 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
564 sess->pdcp.hfn_ovd_offset);
565 /* enable HFN override */
566 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
567 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
568 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
570 DPAA2_SET_FD_LEN(fd, ip_fle->length);
576 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
577 struct rte_crypto_sgl *sgl,
578 struct rte_crypto_sgl *dest_sgl,
579 struct rte_crypto_va_iova_ptr *iv,
580 struct rte_crypto_va_iova_ptr *digest,
581 struct rte_crypto_va_iova_ptr *auth_iv,
582 union rte_crypto_sym_ofs ofs,
586 RTE_SET_USED(digest);
587 RTE_SET_USED(auth_iv);
589 dpaa2_sec_session *sess =
590 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
591 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
592 int total_len = 0, data_len = 0, data_offset;
593 struct sec_flow_context *flc;
594 struct ctxt_priv *priv = sess->ctxt;
597 for (i = 0; i < sgl->num; i++)
598 total_len += sgl->vec[i].len;
600 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
601 data_offset = ofs.ofs.cipher.head;
603 /* For SNOW3G and ZUC, lengths in bits only supported */
604 /* first FLE entry used to store mbuf and session ctxt */
605 fle = (struct qbman_fle *)rte_malloc(NULL,
606 FLE_SG_MEM_SIZE(2*sgl->num),
607 RTE_CACHE_LINE_SIZE);
609 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
612 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
613 /* first FLE entry used to store userdata and session ctxt */
614 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
615 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
621 flc = &priv->flc_desc[0].flc;
624 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
630 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
631 op_fle->length = data_len;
632 DPAA2_SET_FLE_SG_EXT(op_fle);
637 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
638 DPAA2_SET_FLE_OFFSET(sge, data_offset);
639 sge->length = dest_sgl->vec[0].len - data_offset;
642 for (i = 1; i < dest_sgl->num; i++) {
644 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
645 DPAA2_SET_FLE_OFFSET(sge, 0);
646 sge->length = dest_sgl->vec[i].len;
650 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
651 DPAA2_SET_FLE_OFFSET(sge, data_offset);
652 sge->length = sgl->vec[0].len - data_offset;
655 for (i = 1; i < sgl->num; i++) {
657 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
658 DPAA2_SET_FLE_OFFSET(sge, 0);
659 sge->length = sgl->vec[i].len;
662 DPAA2_SET_FLE_FIN(sge);
665 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
666 flc, fle, fle->addr_hi, fle->addr_lo,
671 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
672 ip_fle->length = sess->iv.length + data_len;
673 DPAA2_SET_FLE_SG_EXT(ip_fle);
676 DPAA2_SET_FLE_ADDR(sge, iv->iova);
677 DPAA2_SET_FLE_OFFSET(sge, 0);
678 sge->length = sess->iv.length;
683 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
684 DPAA2_SET_FLE_OFFSET(sge, data_offset);
685 sge->length = sgl->vec[0].len - data_offset;
688 for (i = 1; i < sgl->num; i++) {
690 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
691 DPAA2_SET_FLE_OFFSET(sge, 0);
692 sge->length = sgl->vec[i].len;
694 DPAA2_SET_FLE_FIN(sge);
695 DPAA2_SET_FLE_FIN(ip_fle);
698 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
699 DPAA2_SET_FD_LEN(fd, ip_fle->length);
700 DPAA2_SET_FD_COMPOUND_FMT(fd);
701 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
704 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
705 DPAA2_GET_FD_ADDR(fd),
706 DPAA2_GET_FD_OFFSET(fd),
707 DPAA2_GET_FD_LEN(fd));
712 static __rte_always_inline uint32_t
713 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
714 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
715 void *user_data[], int *status)
717 RTE_SET_USED(user_data);
720 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
721 uint32_t frames_to_send, retry_count;
722 struct qbman_eq_desc eqdesc;
723 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
724 dpaa2_sec_session *sess =
725 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
726 struct qbman_swp *swp;
728 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
730 if (unlikely(vec->num == 0))
734 DPAA2_SEC_ERR("sessionless raw crypto not supported");
737 /*Prepare enqueue descriptor*/
738 qbman_eq_desc_clear(&eqdesc);
739 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
740 qbman_eq_desc_set_response(&eqdesc, 0, 0);
741 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
743 if (!DPAA2_PER_LCORE_DPIO) {
744 ret = dpaa2_affine_qbman_swp();
747 "Failed to allocate IO portal, tid: %d\n",
752 swp = DPAA2_PER_LCORE_PORTAL;
755 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
756 dpaa2_eqcr_size : vec->num;
758 for (loop = 0; loop < frames_to_send; loop++) {
759 /*Clear the unused FD fields before sending*/
760 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
761 ret = sess->build_raw_dp_fd(drv_ctx,
763 &vec->dest_sgl[loop],
771 DPAA2_SEC_ERR("error: Improper packet contents"
772 " for crypto operation");
780 while (loop < frames_to_send) {
781 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
784 frames_to_send - loop);
785 if (unlikely(ret < 0)) {
787 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
802 dpaa2_qp->tx_vq.tx_pkts += num_tx;
803 dpaa2_qp->tx_vq.err_pkts += vec->num;
808 static __rte_always_inline int
809 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
810 struct rte_crypto_vec *data_vec,
811 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
812 struct rte_crypto_va_iova_ptr *iv,
813 struct rte_crypto_va_iova_ptr *digest,
814 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
817 RTE_SET_USED(qp_data);
818 RTE_SET_USED(drv_ctx);
819 RTE_SET_USED(data_vec);
820 RTE_SET_USED(n_data_vecs);
823 RTE_SET_USED(digest);
824 RTE_SET_USED(aad_or_auth_iv);
825 RTE_SET_USED(user_data);
831 sec_fd_to_userdata(const struct qbman_fd *fd)
833 struct qbman_fle *fle;
835 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
837 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
838 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
839 userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
840 /* free the fle memory */
841 rte_free((void *)(fle-1));
846 static __rte_always_inline uint32_t
847 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
848 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
849 uint32_t max_nb_to_dequeue,
850 rte_cryptodev_raw_post_dequeue_t post_dequeue,
851 void **out_user_data, uint8_t is_user_data_array,
852 uint32_t *n_success, int *dequeue_status)
854 RTE_SET_USED(drv_ctx);
855 RTE_SET_USED(get_dequeue_count);
857 /* Function is responsible to receive frames for a given device and VQ*/
858 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
859 struct qbman_result *dq_storage;
860 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
862 uint8_t is_last = 0, status, is_success = 0;
863 struct qbman_swp *swp;
864 const struct qbman_fd *fd;
865 struct qbman_pull_desc pulldesc;
867 uint32_t nb_ops = max_nb_to_dequeue;
869 if (!DPAA2_PER_LCORE_DPIO) {
870 ret = dpaa2_affine_qbman_swp();
873 "Failed to allocate IO portal, tid: %d\n",
878 swp = DPAA2_PER_LCORE_PORTAL;
879 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
881 qbman_pull_desc_clear(&pulldesc);
882 qbman_pull_desc_set_numframes(&pulldesc,
883 (nb_ops > dpaa2_dqrr_size) ?
884 dpaa2_dqrr_size : nb_ops);
885 qbman_pull_desc_set_fq(&pulldesc, fqid);
886 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
887 (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
890 /*Issue a volatile dequeue command. */
892 if (qbman_swp_pull(swp, &pulldesc)) {
894 "SEC VDQ command is not issued : QBMAN busy");
895 /* Portal was busy, try again */
901 /* Receive the packets till Last Dequeue entry is found with
902 * respect to the above issues PULL command.
905 /* Check if the previous issued command is completed.
906 * Also seems like the SWP is shared between the Ethernet Driver
907 * and the SEC driver.
909 while (!qbman_check_command_complete(dq_storage))
912 /* Loop until the dq_storage is updated with
915 while (!qbman_check_new_result(dq_storage))
917 /* Check whether Last Pull command is Expired and
918 * setting Condition for Loop termination
920 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
922 /* Check for valid frame. */
923 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
925 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
926 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
931 fd = qbman_result_DQ_fd(dq_storage);
932 user_data = sec_fd_to_userdata(fd);
933 if (is_user_data_array)
934 out_user_data[num_rx] = user_data;
936 out_user_data[0] = user_data;
937 if (unlikely(fd->simple.frc)) {
938 /* TODO Parse SEC errors */
939 DPAA2_SEC_ERR("SEC returned Error - %x",
945 post_dequeue(user_data, num_rx, is_success);
949 } /* End of Packet Rx loop */
951 dpaa2_qp->rx_vq.rx_pkts += num_rx;
955 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
956 /*Return the total number of packets received to DPAA2 app*/
960 static __rte_always_inline void *
961 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
962 enum rte_crypto_op_status *op_status)
964 RTE_SET_USED(qp_data);
965 RTE_SET_USED(drv_ctx);
966 RTE_SET_USED(dequeue_status);
967 RTE_SET_USED(op_status);
972 static __rte_always_inline int
973 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
975 RTE_SET_USED(qp_data);
976 RTE_SET_USED(drv_ctx);
982 static __rte_always_inline int
983 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
985 RTE_SET_USED(qp_data);
986 RTE_SET_USED(drv_ctx);
993 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
994 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
995 enum rte_crypto_op_sess_type sess_type,
996 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
998 dpaa2_sec_session *sess;
999 struct dpaa2_sec_raw_dp_ctx *dp_ctx;
1000 RTE_SET_USED(qp_id);
1003 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1004 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1007 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1008 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1009 session_ctx.sec_sess);
1010 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1011 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1012 session_ctx.crypto_sess, cryptodev_driver_id);
1015 raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
1016 raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
1017 raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
1018 raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
1019 raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
1020 raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
1022 if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
1023 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
1024 else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1025 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1026 else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1027 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1028 else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1029 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1030 else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1031 sess->ctxt_type == DPAA2_SEC_PDCP)
1032 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1035 dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1036 dp_ctx->session = sess;
1042 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1044 return sizeof(struct dpaa2_sec_raw_dp_ctx);