1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <cryptodev_pmd.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
14 #include <desc/algo.h>
16 struct dpaa2_sec_raw_dp_ctx {
17 dpaa2_sec_session *session;
20 uint16_t cached_enqueue;
21 uint16_t cached_dequeue;
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26 struct rte_crypto_sgl *sgl,
27 struct rte_crypto_va_iova_ptr *iv,
28 struct rte_crypto_va_iova_ptr *digest,
29 struct rte_crypto_va_iova_ptr *auth_iv,
30 union rte_crypto_sym_ofs ofs,
34 RTE_SET_USED(auth_iv);
36 dpaa2_sec_session *sess =
37 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
38 struct ctxt_priv *priv = sess->ctxt;
39 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
40 struct sec_flow_context *flc;
41 int data_len = 0, auth_len = 0, cipher_len = 0;
43 uint16_t auth_hdr_len = ofs.ofs.cipher.head -
46 uint16_t auth_tail_len = ofs.ofs.auth.tail;
47 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
48 int icv_len = sess->digest_length;
50 uint8_t *iv_ptr = iv->va;
52 for (i = 0; i < sgl->num; i++)
53 data_len += sgl->vec[i].len;
55 cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
56 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
57 /* first FLE entry used to store session ctxt */
58 fle = (struct qbman_fle *)rte_malloc(NULL,
59 FLE_SG_MEM_SIZE(2 * sgl->num),
62 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
65 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
66 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
67 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
73 /* Save the shared descriptor */
74 flc = &priv->flc_desc[0].flc;
76 /* Configure FD as a FRAME LIST */
77 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
78 DPAA2_SET_FD_COMPOUND_FMT(fd);
79 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
81 /* Configure Output FLE with Scatter/Gather Entry */
82 DPAA2_SET_FLE_SG_EXT(op_fle);
83 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
86 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
88 op_fle->length = (sess->dir == DIR_ENC) ?
89 (cipher_len + icv_len) :
92 /* Configure Output SGE for Encap/Decap */
93 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
94 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
95 sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
98 for (i = 1; i < sgl->num; i++) {
100 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
101 DPAA2_SET_FLE_OFFSET(sge, 0);
102 sge->length = sgl->vec[i].len;
105 if (sess->dir == DIR_ENC) {
107 DPAA2_SET_FLE_ADDR(sge,
109 sge->length = icv_len;
111 DPAA2_SET_FLE_FIN(sge);
115 /* Configure Input FLE with Scatter/Gather Entry */
116 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
117 DPAA2_SET_FLE_SG_EXT(ip_fle);
118 DPAA2_SET_FLE_FIN(ip_fle);
120 ip_fle->length = (sess->dir == DIR_ENC) ?
121 (auth_len + sess->iv.length) :
122 (auth_len + sess->iv.length +
125 /* Configure Input SGE for Encap/Decap */
126 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
127 sge->length = sess->iv.length;
130 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
131 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
132 sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
134 for (i = 1; i < sgl->num; i++) {
136 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
137 DPAA2_SET_FLE_OFFSET(sge, 0);
138 sge->length = sgl->vec[i].len;
141 if (sess->dir == DIR_DEC) {
143 old_icv = (uint8_t *)(sge + 1);
144 memcpy(old_icv, digest->va,
146 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
147 sge->length = icv_len;
150 DPAA2_SET_FLE_FIN(sge);
152 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
153 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
155 DPAA2_SET_FD_LEN(fd, ip_fle->length);
161 build_raw_dp_aead_fd(uint8_t *drv_ctx,
162 struct rte_crypto_sgl *sgl,
163 struct rte_crypto_va_iova_ptr *iv,
164 struct rte_crypto_va_iova_ptr *digest,
165 struct rte_crypto_va_iova_ptr *auth_iv,
166 union rte_crypto_sym_ofs ofs,
170 dpaa2_sec_session *sess =
171 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
172 struct ctxt_priv *priv = sess->ctxt;
173 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
174 struct sec_flow_context *flc;
175 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
176 int icv_len = sess->digest_length;
178 uint8_t *IV_ptr = iv->va;
180 int data_len = 0, aead_len = 0;
182 for (i = 0; i < sgl->num; i++)
183 data_len += sgl->vec[i].len;
185 aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
187 /* first FLE entry used to store mbuf and session ctxt */
188 fle = (struct qbman_fle *)rte_malloc(NULL,
189 FLE_SG_MEM_SIZE(2 * sgl->num),
190 RTE_CACHE_LINE_SIZE);
191 if (unlikely(!fle)) {
192 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
195 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
196 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
197 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
203 /* Save the shared descriptor */
204 flc = &priv->flc_desc[0].flc;
206 /* Configure FD as a FRAME LIST */
207 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
208 DPAA2_SET_FD_COMPOUND_FMT(fd);
209 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
211 /* Configure Output FLE with Scatter/Gather Entry */
212 DPAA2_SET_FLE_SG_EXT(op_fle);
213 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
216 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
218 op_fle->length = (sess->dir == DIR_ENC) ?
219 (aead_len + icv_len) :
222 /* Configure Output SGE for Encap/Decap */
223 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
224 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
225 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
228 for (i = 1; i < sgl->num; i++) {
230 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
231 DPAA2_SET_FLE_OFFSET(sge, 0);
232 sge->length = sgl->vec[i].len;
235 if (sess->dir == DIR_ENC) {
237 DPAA2_SET_FLE_ADDR(sge, digest->iova);
238 sge->length = icv_len;
240 DPAA2_SET_FLE_FIN(sge);
244 /* Configure Input FLE with Scatter/Gather Entry */
245 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
246 DPAA2_SET_FLE_SG_EXT(ip_fle);
247 DPAA2_SET_FLE_FIN(ip_fle);
248 ip_fle->length = (sess->dir == DIR_ENC) ?
249 (aead_len + sess->iv.length + auth_only_len) :
250 (aead_len + sess->iv.length + auth_only_len +
253 /* Configure Input SGE for Encap/Decap */
254 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
255 sge->length = sess->iv.length;
259 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
260 sge->length = auth_only_len;
264 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
265 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
266 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
269 for (i = 1; i < sgl->num; i++) {
271 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
272 DPAA2_SET_FLE_OFFSET(sge, 0);
273 sge->length = sgl->vec[i].len;
276 if (sess->dir == DIR_DEC) {
278 old_icv = (uint8_t *)(sge + 1);
279 memcpy(old_icv, digest->va, icv_len);
280 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
281 sge->length = icv_len;
284 DPAA2_SET_FLE_FIN(sge);
286 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
287 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
289 DPAA2_SET_FD_LEN(fd, ip_fle->length);
295 build_raw_dp_auth_fd(uint8_t *drv_ctx,
296 struct rte_crypto_sgl *sgl,
297 struct rte_crypto_va_iova_ptr *iv,
298 struct rte_crypto_va_iova_ptr *digest,
299 struct rte_crypto_va_iova_ptr *auth_iv,
300 union rte_crypto_sym_ofs ofs,
305 RTE_SET_USED(auth_iv);
307 dpaa2_sec_session *sess =
308 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
309 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
310 struct sec_flow_context *flc;
311 int total_len = 0, data_len = 0, data_offset;
313 struct ctxt_priv *priv = sess->ctxt;
316 for (i = 0; i < sgl->num; i++)
317 total_len += sgl->vec[i].len;
319 data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
320 data_offset = ofs.ofs.auth.head;
322 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
323 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
324 if ((data_len & 7) || (data_offset & 7)) {
325 DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
329 data_len = data_len >> 3;
330 data_offset = data_offset >> 3;
332 fle = (struct qbman_fle *)rte_malloc(NULL,
333 FLE_SG_MEM_SIZE(2 * sgl->num),
334 RTE_CACHE_LINE_SIZE);
335 if (unlikely(!fle)) {
336 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
339 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
340 /* first FLE entry used to store mbuf and session ctxt */
341 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
342 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
347 flc = &priv->flc_desc[DESC_INITFINAL].flc;
350 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
351 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
352 DPAA2_SET_FD_COMPOUND_FMT(fd);
355 DPAA2_SET_FLE_ADDR(op_fle,
356 DPAA2_VADDR_TO_IOVA(digest->va));
357 op_fle->length = sess->digest_length;
360 DPAA2_SET_FLE_SG_EXT(ip_fle);
361 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
362 ip_fle->length = data_len;
364 if (sess->iv.length) {
367 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
370 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
371 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
373 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
374 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
377 sge->length = sess->iv.length;
379 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
380 ip_fle->length += sge->length;
384 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
385 DPAA2_SET_FLE_OFFSET(sge, data_offset);
387 if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
388 sge->length = data_len;
391 sge->length = sgl->vec[0].len - data_offset;
392 for (i = 1; i < sgl->num; i++) {
394 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
395 DPAA2_SET_FLE_OFFSET(sge, 0);
396 sge->length = sgl->vec[i].len;
399 if (sess->dir == DIR_DEC) {
400 /* Digest verification case */
402 old_digest = (uint8_t *)(sge + 1);
403 rte_memcpy(old_digest, digest->va,
404 sess->digest_length);
405 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
406 sge->length = sess->digest_length;
407 ip_fle->length += sess->digest_length;
409 DPAA2_SET_FLE_FIN(sge);
410 DPAA2_SET_FLE_FIN(ip_fle);
411 DPAA2_SET_FD_LEN(fd, ip_fle->length);
417 build_raw_dp_proto_fd(uint8_t *drv_ctx,
418 struct rte_crypto_sgl *sgl,
419 struct rte_crypto_va_iova_ptr *iv,
420 struct rte_crypto_va_iova_ptr *digest,
421 struct rte_crypto_va_iova_ptr *auth_iv,
422 union rte_crypto_sym_ofs ofs,
427 RTE_SET_USED(digest);
428 RTE_SET_USED(auth_iv);
431 dpaa2_sec_session *sess =
432 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
433 struct ctxt_priv *priv = sess->ctxt;
434 struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
435 struct sec_flow_context *flc;
436 uint32_t in_len = 0, out_len = 0, i;
438 /* first FLE entry used to store mbuf and session ctxt */
439 fle = (struct qbman_fle *)rte_malloc(NULL,
440 FLE_SG_MEM_SIZE(2 * sgl->num),
441 RTE_CACHE_LINE_SIZE);
442 if (unlikely(!fle)) {
443 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
446 memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
447 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
448 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
450 /* Save the shared descriptor */
451 flc = &priv->flc_desc[0].flc;
456 DPAA2_SET_FD_IVP(fd);
457 DPAA2_SET_FLE_IVP(op_fle);
458 DPAA2_SET_FLE_IVP(ip_fle);
460 /* Configure FD as a FRAME LIST */
461 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
462 DPAA2_SET_FD_COMPOUND_FMT(fd);
463 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
465 /* Configure Output FLE with Scatter/Gather Entry */
466 DPAA2_SET_FLE_SG_EXT(op_fle);
467 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
469 /* Configure Output SGE for Encap/Decap */
470 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
471 DPAA2_SET_FLE_OFFSET(sge, 0);
472 sge->length = sgl->vec[0].len;
473 out_len += sge->length;
475 for (i = 1; i < sgl->num; i++) {
477 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
478 DPAA2_SET_FLE_OFFSET(sge, 0);
479 sge->length = sgl->vec[i].len;
480 out_len += sge->length;
482 sge->length = sgl->vec[i - 1].tot_len;
483 out_len += sge->length;
485 DPAA2_SET_FLE_FIN(sge);
486 op_fle->length = out_len;
490 /* Configure Input FLE with Scatter/Gather Entry */
491 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
492 DPAA2_SET_FLE_SG_EXT(ip_fle);
493 DPAA2_SET_FLE_FIN(ip_fle);
495 /* Configure input SGE for Encap/Decap */
496 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
497 DPAA2_SET_FLE_OFFSET(sge, 0);
498 sge->length = sgl->vec[0].len;
499 in_len += sge->length;
501 for (i = 1; i < sgl->num; i++) {
503 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
504 DPAA2_SET_FLE_OFFSET(sge, 0);
505 sge->length = sgl->vec[i].len;
506 in_len += sge->length;
509 ip_fle->length = in_len;
510 DPAA2_SET_FLE_FIN(sge);
512 /* In case of PDCP, per packet HFN is stored in
513 * mbuf priv after sym_op.
515 if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
516 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
517 sess->pdcp.hfn_ovd_offset);
518 /* enable HFN override */
519 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
520 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
521 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
523 DPAA2_SET_FD_LEN(fd, ip_fle->length);
529 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
530 struct rte_crypto_sgl *sgl,
531 struct rte_crypto_va_iova_ptr *iv,
532 struct rte_crypto_va_iova_ptr *digest,
533 struct rte_crypto_va_iova_ptr *auth_iv,
534 union rte_crypto_sym_ofs ofs,
538 RTE_SET_USED(digest);
539 RTE_SET_USED(auth_iv);
541 dpaa2_sec_session *sess =
542 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
543 struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
544 int total_len = 0, data_len = 0, data_offset;
545 struct sec_flow_context *flc;
546 struct ctxt_priv *priv = sess->ctxt;
549 for (i = 0; i < sgl->num; i++)
550 total_len += sgl->vec[i].len;
552 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
553 data_offset = ofs.ofs.cipher.head;
555 if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
556 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
557 if ((data_len & 7) || (data_offset & 7)) {
558 DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
562 data_len = data_len >> 3;
563 data_offset = data_offset >> 3;
566 /* first FLE entry used to store mbuf and session ctxt */
567 fle = (struct qbman_fle *)rte_malloc(NULL,
568 FLE_SG_MEM_SIZE(2*sgl->num),
569 RTE_CACHE_LINE_SIZE);
571 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
574 memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
575 /* first FLE entry used to store userdata and session ctxt */
576 DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
577 DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
583 flc = &priv->flc_desc[0].flc;
586 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
592 DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
593 op_fle->length = data_len;
594 DPAA2_SET_FLE_SG_EXT(op_fle);
597 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
598 DPAA2_SET_FLE_OFFSET(sge, data_offset);
599 sge->length = sgl->vec[0].len - data_offset;
602 for (i = 1; i < sgl->num; i++) {
604 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
605 DPAA2_SET_FLE_OFFSET(sge, 0);
606 sge->length = sgl->vec[i].len;
608 DPAA2_SET_FLE_FIN(sge);
611 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
612 flc, fle, fle->addr_hi, fle->addr_lo,
617 DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
618 ip_fle->length = sess->iv.length + data_len;
619 DPAA2_SET_FLE_SG_EXT(ip_fle);
622 DPAA2_SET_FLE_ADDR(sge, iv->iova);
623 DPAA2_SET_FLE_OFFSET(sge, 0);
624 sge->length = sess->iv.length;
629 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
630 DPAA2_SET_FLE_OFFSET(sge, data_offset);
631 sge->length = sgl->vec[0].len - data_offset;
634 for (i = 1; i < sgl->num; i++) {
636 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
637 DPAA2_SET_FLE_OFFSET(sge, 0);
638 sge->length = sgl->vec[i].len;
640 DPAA2_SET_FLE_FIN(sge);
641 DPAA2_SET_FLE_FIN(ip_fle);
644 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
645 DPAA2_SET_FD_LEN(fd, ip_fle->length);
646 DPAA2_SET_FD_COMPOUND_FMT(fd);
647 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
650 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
651 DPAA2_GET_FD_ADDR(fd),
652 DPAA2_GET_FD_OFFSET(fd),
653 DPAA2_GET_FD_LEN(fd));
658 static __rte_always_inline uint32_t
659 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
660 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
661 void *user_data[], int *status)
663 RTE_SET_USED(user_data);
666 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
667 uint32_t frames_to_send, retry_count;
668 struct qbman_eq_desc eqdesc;
669 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
670 dpaa2_sec_session *sess =
671 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
672 struct qbman_swp *swp;
674 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
676 if (unlikely(vec->num == 0))
680 DPAA2_SEC_ERR("sessionless raw crypto not supported");
683 /*Prepare enqueue descriptor*/
684 qbman_eq_desc_clear(&eqdesc);
685 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
686 qbman_eq_desc_set_response(&eqdesc, 0, 0);
687 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
689 if (!DPAA2_PER_LCORE_DPIO) {
690 ret = dpaa2_affine_qbman_swp();
693 "Failed to allocate IO portal, tid: %d\n",
698 swp = DPAA2_PER_LCORE_PORTAL;
701 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
702 dpaa2_eqcr_size : vec->num;
704 for (loop = 0; loop < frames_to_send; loop++) {
705 /*Clear the unused FD fields before sending*/
706 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
707 ret = sess->build_raw_dp_fd(drv_ctx,
716 DPAA2_SEC_ERR("error: Improper packet contents"
717 " for crypto operation");
725 while (loop < frames_to_send) {
726 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
729 frames_to_send - loop);
730 if (unlikely(ret < 0)) {
732 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
747 dpaa2_qp->tx_vq.tx_pkts += num_tx;
748 dpaa2_qp->tx_vq.err_pkts += vec->num;
753 static __rte_always_inline int
754 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
755 struct rte_crypto_vec *data_vec,
756 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
757 struct rte_crypto_va_iova_ptr *iv,
758 struct rte_crypto_va_iova_ptr *digest,
759 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
762 RTE_SET_USED(qp_data);
763 RTE_SET_USED(drv_ctx);
764 RTE_SET_USED(data_vec);
765 RTE_SET_USED(n_data_vecs);
768 RTE_SET_USED(digest);
769 RTE_SET_USED(aad_or_auth_iv);
770 RTE_SET_USED(user_data);
776 sec_fd_to_userdata(const struct qbman_fd *fd)
778 struct qbman_fle *fle;
780 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
782 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
783 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
784 userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
785 /* free the fle memory */
786 rte_free((void *)(fle-1));
791 static __rte_always_inline uint32_t
792 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
793 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
794 uint32_t max_nb_to_dequeue,
795 rte_cryptodev_raw_post_dequeue_t post_dequeue,
796 void **out_user_data, uint8_t is_user_data_array,
797 uint32_t *n_success, int *dequeue_status)
799 RTE_SET_USED(drv_ctx);
800 RTE_SET_USED(get_dequeue_count);
802 /* Function is responsible to receive frames for a given device and VQ*/
803 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
804 struct qbman_result *dq_storage;
805 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
807 uint8_t is_last = 0, status;
808 struct qbman_swp *swp;
809 const struct qbman_fd *fd;
810 struct qbman_pull_desc pulldesc;
812 uint32_t nb_ops = max_nb_to_dequeue;
814 if (!DPAA2_PER_LCORE_DPIO) {
815 ret = dpaa2_affine_qbman_swp();
818 "Failed to allocate IO portal, tid: %d\n",
823 swp = DPAA2_PER_LCORE_PORTAL;
824 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
826 qbman_pull_desc_clear(&pulldesc);
827 qbman_pull_desc_set_numframes(&pulldesc,
828 (nb_ops > dpaa2_dqrr_size) ?
829 dpaa2_dqrr_size : nb_ops);
830 qbman_pull_desc_set_fq(&pulldesc, fqid);
831 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
832 (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
835 /*Issue a volatile dequeue command. */
837 if (qbman_swp_pull(swp, &pulldesc)) {
839 "SEC VDQ command is not issued : QBMAN busy");
840 /* Portal was busy, try again */
846 /* Receive the packets till Last Dequeue entry is found with
847 * respect to the above issues PULL command.
850 /* Check if the previous issued command is completed.
851 * Also seems like the SWP is shared between the Ethernet Driver
852 * and the SEC driver.
854 while (!qbman_check_command_complete(dq_storage))
857 /* Loop until the dq_storage is updated with
860 while (!qbman_check_new_result(dq_storage))
862 /* Check whether Last Pull command is Expired and
863 * setting Condition for Loop termination
865 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
867 /* Check for valid frame. */
868 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
870 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
871 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
876 fd = qbman_result_DQ_fd(dq_storage);
877 user_data = sec_fd_to_userdata(fd);
878 if (is_user_data_array)
879 out_user_data[num_rx] = user_data;
881 out_user_data[0] = user_data;
882 if (unlikely(fd->simple.frc)) {
883 /* TODO Parse SEC errors */
884 DPAA2_SEC_ERR("SEC returned Error - %x",
886 status = RTE_CRYPTO_OP_STATUS_ERROR;
888 status = RTE_CRYPTO_OP_STATUS_SUCCESS;
890 post_dequeue(user_data, num_rx, status);
894 } /* End of Packet Rx loop */
896 dpaa2_qp->rx_vq.rx_pkts += num_rx;
900 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
901 /*Return the total number of packets received to DPAA2 app*/
905 static __rte_always_inline void *
906 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
907 enum rte_crypto_op_status *op_status)
909 RTE_SET_USED(qp_data);
910 RTE_SET_USED(drv_ctx);
911 RTE_SET_USED(dequeue_status);
912 RTE_SET_USED(op_status);
917 static __rte_always_inline int
918 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
920 RTE_SET_USED(qp_data);
921 RTE_SET_USED(drv_ctx);
927 static __rte_always_inline int
928 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
930 RTE_SET_USED(qp_data);
931 RTE_SET_USED(drv_ctx);
938 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
939 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
940 enum rte_crypto_op_sess_type sess_type,
941 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
943 dpaa2_sec_session *sess;
944 struct dpaa2_sec_raw_dp_ctx *dp_ctx;
948 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
949 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
952 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
953 sess = (dpaa2_sec_session *)get_sec_session_private_data(
954 session_ctx.sec_sess);
955 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
956 sess = (dpaa2_sec_session *)get_sym_session_private_data(
957 session_ctx.crypto_sess, cryptodev_driver_id);
960 raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
961 raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
962 raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
963 raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
964 raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
965 raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
967 if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
968 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
969 else if (sess->ctxt_type == DPAA2_SEC_AEAD)
970 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
971 else if (sess->ctxt_type == DPAA2_SEC_AUTH)
972 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
973 else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
974 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
975 else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
976 sess->ctxt_type == DPAA2_SEC_PDCP)
977 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
980 dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
981 dp_ctx->session = sess;
987 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
989 return sizeof(struct dpaa2_sec_raw_dp_ctx);