1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021-2022 NXP
5 #include <rte_byteorder.h>
6 #include <rte_common.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIB_SECURITY
11 #include <rte_security_driver.h>
14 /* RTA header files */
15 #include <desc/algo.h>
16 #include <desc/ipsec.h>
18 #include <rte_dpaa_bus.h>
20 #include <dpaa_sec_log.h>
22 struct dpaa_sec_raw_dp_ctx {
23 dpaa_sec_session *session;
26 uint16_t cached_enqueue;
27 uint16_t cached_dequeue;
31 is_encode(dpaa_sec_session *ses)
33 return ses->dir == DIR_ENC;
36 static inline int is_decode(dpaa_sec_session *ses)
38 return ses->dir == DIR_DEC;
41 static __rte_always_inline int
42 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
44 RTE_SET_USED(qp_data);
45 RTE_SET_USED(drv_ctx);
51 static __rte_always_inline int
52 dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
54 RTE_SET_USED(qp_data);
55 RTE_SET_USED(drv_ctx);
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
64 struct dpaa_sec_op_ctx *ctx;
67 retval = rte_mempool_get(
68 ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
71 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78 * each packet, memset is costlier than dcbz_64().
80 for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81 dcbz_64(&ctx->job.sg[i]);
83 ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
89 static struct dpaa_sec_job *
90 build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
91 struct rte_crypto_sgl *sgl,
92 struct rte_crypto_sgl *dest_sgl,
93 struct rte_crypto_va_iova_ptr *iv,
94 struct rte_crypto_va_iova_ptr *digest,
95 struct rte_crypto_va_iova_ptr *auth_iv,
96 union rte_crypto_sym_ofs ofs,
100 RTE_SET_USED(dest_sgl);
102 RTE_SET_USED(auth_iv);
105 dpaa_sec_session *ses =
106 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
107 struct dpaa_sec_job *cf;
108 struct dpaa_sec_op_ctx *ctx;
109 struct qm_sg_entry *sg, *out_sg, *in_sg;
110 phys_addr_t start_addr;
111 uint8_t *old_digest, extra_segs;
112 int data_len, data_offset, total_len = 0;
115 for (i = 0; i < sgl->num; i++)
116 total_len += sgl->vec[i].len;
118 data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
119 data_offset = ofs.ofs.auth.head;
121 /* Support only length in bits for SNOW3G and ZUC */
128 if (sgl->num > MAX_SG_ENTRIES) {
129 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
133 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
138 ctx->userdata = (void *)userdata;
139 old_digest = ctx->digest;
143 qm_sg_entry_set64(out_sg, digest->iova);
144 out_sg->length = ses->digest_length;
145 cpu_to_hw_sg(out_sg);
149 /* need to extend the input to a compound frame */
150 in_sg->extension = 1;
152 in_sg->length = data_len;
153 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
158 if (ses->iv.length) {
161 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
164 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
165 iv_ptr = conv_to_snow_f9_iv(iv_ptr);
167 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
168 iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
171 sg->length = ses->iv.length;
173 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
174 in_sg->length += sg->length;
179 qm_sg_entry_set64(sg, sgl->vec[0].iova);
180 sg->offset = data_offset;
182 if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
183 sg->length = data_len;
185 sg->length = sgl->vec[0].len - data_offset;
187 /* remaining i/p segs */
188 for (i = 1; i < sgl->num; i++) {
191 qm_sg_entry_set64(sg, sgl->vec[i].iova);
192 if (data_len > (int)sgl->vec[i].len)
193 sg->length = sgl->vec[0].len;
195 sg->length = data_len;
197 data_len = data_len - sg->length;
203 if (is_decode(ses)) {
204 /* Digest verification case */
207 rte_memcpy(old_digest, digest->va,
209 start_addr = rte_dpaa_mem_vtop(old_digest);
210 qm_sg_entry_set64(sg, start_addr);
211 sg->length = ses->digest_length;
212 in_sg->length += ses->digest_length;
221 static inline struct dpaa_sec_job *
222 build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
223 struct rte_crypto_sgl *sgl,
224 struct rte_crypto_sgl *dest_sgl,
225 struct rte_crypto_va_iova_ptr *iv,
226 struct rte_crypto_va_iova_ptr *digest,
227 struct rte_crypto_va_iova_ptr *auth_iv,
228 union rte_crypto_sym_ofs ofs,
232 dpaa_sec_session *ses =
233 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
234 struct dpaa_sec_job *cf;
235 struct dpaa_sec_op_ctx *ctx;
236 struct qm_sg_entry *sg, *out_sg, *in_sg;
237 uint8_t extra_req_segs;
238 uint8_t *IV_ptr = iv->va;
239 int data_len = 0, aead_len = 0;
242 for (i = 0; i < sgl->num; i++)
243 data_len += sgl->vec[i].len;
246 aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
248 if (ses->auth_only_len)
251 if (sgl->num > MAX_SG_ENTRIES) {
252 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
257 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_req_segs);
262 ctx->userdata = (void *)userdata;
264 rte_prefetch0(cf->sg);
268 out_sg->extension = 1;
270 out_sg->length = aead_len + ses->digest_length;
272 out_sg->length = aead_len;
274 /* output sg entries */
276 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
277 cpu_to_hw_sg(out_sg);
281 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
282 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
283 sg->offset = ofs.ofs.cipher.head;
285 /* Successive segs */
286 for (i = 1; i < dest_sgl->num; i++) {
289 qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
290 sg->length = dest_sgl->vec[i].len;
294 qm_sg_entry_set64(sg, sgl->vec[0].iova);
295 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
296 sg->offset = ofs.ofs.cipher.head;
298 /* Successive segs */
299 for (i = 1; i < sgl->num; i++) {
302 qm_sg_entry_set64(sg, sgl->vec[i].iova);
303 sg->length = sgl->vec[i].len;
308 if (is_encode(ses)) {
310 /* set auth output */
312 qm_sg_entry_set64(sg, digest->iova);
313 sg->length = ses->digest_length;
320 in_sg->extension = 1;
323 in_sg->length = ses->iv.length + aead_len
324 + ses->auth_only_len;
326 in_sg->length = ses->iv.length + aead_len
327 + ses->auth_only_len + ses->digest_length;
329 /* input sg entries */
331 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
335 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
336 sg->length = ses->iv.length;
339 /* 2 seg auth only */
340 if (ses->auth_only_len) {
342 qm_sg_entry_set64(sg, auth_iv->iova);
343 sg->length = ses->auth_only_len;
349 qm_sg_entry_set64(sg, sgl->vec[0].iova);
350 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
351 sg->offset = ofs.ofs.cipher.head;
353 /* Successive segs */
354 for (i = 1; i < sgl->num; i++) {
357 qm_sg_entry_set64(sg, sgl->vec[i].iova);
358 sg->length = sgl->vec[i].len;
361 if (is_decode(ses)) {
364 memcpy(ctx->digest, digest->va,
366 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
367 sg->length = ses->digest_length;
372 if (ses->auth_only_len)
373 fd->cmd = 0x80000000 | ses->auth_only_len;
378 static inline struct dpaa_sec_job *
379 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
380 struct rte_crypto_sgl *sgl,
381 struct rte_crypto_sgl *dest_sgl,
382 struct rte_crypto_va_iova_ptr *iv,
383 struct rte_crypto_va_iova_ptr *digest,
384 struct rte_crypto_va_iova_ptr *auth_iv,
385 union rte_crypto_sym_ofs ofs,
389 RTE_SET_USED(auth_iv);
391 dpaa_sec_session *ses =
392 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
393 struct dpaa_sec_job *cf;
394 struct dpaa_sec_op_ctx *ctx;
395 struct qm_sg_entry *sg, *out_sg, *in_sg;
396 uint8_t *IV_ptr = iv->va;
398 uint16_t auth_hdr_len = ofs.ofs.cipher.head -
400 uint16_t auth_tail_len;
401 uint32_t auth_only_len;
402 int data_len = 0, auth_len = 0, cipher_len = 0;
404 for (i = 0; i < sgl->num; i++)
405 data_len += sgl->vec[i].len;
407 cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
408 auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
409 auth_tail_len = auth_len - cipher_len - auth_hdr_len;
410 auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
412 if (sgl->num > MAX_SG_ENTRIES) {
413 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
418 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
423 ctx->userdata = (void *)userdata;
425 rte_prefetch0(cf->sg);
429 out_sg->extension = 1;
431 out_sg->length = cipher_len + ses->digest_length;
433 out_sg->length = cipher_len;
435 /* output sg entries */
437 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
438 cpu_to_hw_sg(out_sg);
442 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
443 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
444 sg->offset = ofs.ofs.cipher.head;
446 /* Successive segs */
447 for (i = 1; i < dest_sgl->num; i++) {
450 qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
451 sg->length = dest_sgl->vec[i].len;
453 sg->length -= ofs.ofs.cipher.tail;
455 qm_sg_entry_set64(sg, sgl->vec[0].iova);
456 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
457 sg->offset = ofs.ofs.cipher.head;
459 /* Successive segs */
460 for (i = 1; i < sgl->num; i++) {
463 qm_sg_entry_set64(sg, sgl->vec[i].iova);
464 sg->length = sgl->vec[i].len;
466 sg->length -= ofs.ofs.cipher.tail;
469 if (is_encode(ses)) {
471 /* set auth output */
473 qm_sg_entry_set64(sg, digest->iova);
474 sg->length = ses->digest_length;
481 in_sg->extension = 1;
484 in_sg->length = ses->iv.length + auth_len;
486 in_sg->length = ses->iv.length + auth_len
487 + ses->digest_length;
489 /* input sg entries */
491 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
495 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
496 sg->length = ses->iv.length;
501 qm_sg_entry_set64(sg, sgl->vec[0].iova);
502 sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
503 sg->offset = ofs.ofs.auth.head;
505 /* Successive segs */
506 for (i = 1; i < sgl->num; i++) {
509 qm_sg_entry_set64(sg, sgl->vec[i].iova);
510 sg->length = sgl->vec[i].len;
513 if (is_decode(ses)) {
516 memcpy(ctx->digest, digest->va,
518 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
519 sg->length = ses->digest_length;
525 fd->cmd = 0x80000000 | auth_only_len;
530 static struct dpaa_sec_job *
531 build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
532 struct rte_crypto_sgl *sgl,
533 struct rte_crypto_sgl *dest_sgl,
534 struct rte_crypto_va_iova_ptr *iv,
535 struct rte_crypto_va_iova_ptr *digest,
536 struct rte_crypto_va_iova_ptr *auth_iv,
537 union rte_crypto_sym_ofs ofs,
541 RTE_SET_USED(digest);
542 RTE_SET_USED(auth_iv);
545 dpaa_sec_session *ses =
546 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
547 struct dpaa_sec_job *cf;
548 struct dpaa_sec_op_ctx *ctx;
549 struct qm_sg_entry *sg, *out_sg, *in_sg;
551 uint8_t *IV_ptr = iv->va;
552 int data_len, total_len = 0, data_offset;
554 for (i = 0; i < sgl->num; i++)
555 total_len += sgl->vec[i].len;
557 data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
558 data_offset = ofs.ofs.cipher.head;
560 /* Support lengths in bits only for SNOW3G and ZUC */
561 if (sgl->num > MAX_SG_ENTRIES) {
562 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
567 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
572 ctx->userdata = (void *)userdata;
576 out_sg->extension = 1;
577 out_sg->length = data_len;
578 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
579 cpu_to_hw_sg(out_sg);
584 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
585 sg->length = dest_sgl->vec[0].len - data_offset;
586 sg->offset = data_offset;
588 /* Successive segs */
589 for (i = 1; i < dest_sgl->num; i++) {
592 qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
593 sg->length = dest_sgl->vec[i].len;
598 qm_sg_entry_set64(sg, sgl->vec[0].iova);
599 sg->length = sgl->vec[0].len - data_offset;
600 sg->offset = data_offset;
602 /* Successive segs */
603 for (i = 1; i < sgl->num; i++) {
606 qm_sg_entry_set64(sg, sgl->vec[i].iova);
607 sg->length = sgl->vec[i].len;
616 in_sg->extension = 1;
618 in_sg->length = data_len + ses->iv.length;
621 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
625 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
626 sg->length = ses->iv.length;
631 qm_sg_entry_set64(sg, sgl->vec[0].iova);
632 sg->length = sgl->vec[0].len - data_offset;
633 sg->offset = data_offset;
635 /* Successive segs */
636 for (i = 1; i < sgl->num; i++) {
639 qm_sg_entry_set64(sg, sgl->vec[i].iova);
640 sg->length = sgl->vec[i].len;
648 #ifdef RTE_LIB_SECURITY
649 static inline struct dpaa_sec_job *
650 build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
651 struct rte_crypto_sgl *sgl,
652 struct rte_crypto_sgl *dest_sgl,
653 struct rte_crypto_va_iova_ptr *iv,
654 struct rte_crypto_va_iova_ptr *digest,
655 struct rte_crypto_va_iova_ptr *auth_iv,
656 union rte_crypto_sym_ofs ofs,
661 RTE_SET_USED(digest);
662 RTE_SET_USED(auth_iv);
665 dpaa_sec_session *ses =
666 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
667 struct dpaa_sec_job *cf;
668 struct dpaa_sec_op_ctx *ctx;
669 struct qm_sg_entry *sg, *out_sg, *in_sg;
670 uint32_t in_len = 0, out_len = 0;
673 if (sgl->num > MAX_SG_ENTRIES) {
674 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
679 ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
683 ctx->userdata = (void *)userdata;
686 out_sg->extension = 1;
687 qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
692 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
694 sg->length = dest_sgl->vec[0].len;
695 out_len += sg->length;
697 for (i = 1; i < dest_sgl->num; i++) {
698 /* Successive segs */
701 qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
703 sg->length = dest_sgl->vec[i].len;
704 out_len += sg->length;
706 sg->length = dest_sgl->vec[i - 1].tot_len;
710 qm_sg_entry_set64(sg, sgl->vec[0].iova);
712 sg->length = sgl->vec[0].len;
713 out_len += sg->length;
715 for (i = 1; i < sgl->num; i++) {
716 /* Successive segs */
719 qm_sg_entry_set64(sg, sgl->vec[i].iova);
721 sg->length = sgl->vec[i].len;
722 out_len += sg->length;
724 sg->length = sgl->vec[i - 1].tot_len;
727 out_len += sg->length;
731 out_sg->length = out_len;
732 cpu_to_hw_sg(out_sg);
736 in_sg->extension = 1;
738 in_len = sgl->vec[0].len;
741 qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
744 qm_sg_entry_set64(sg, sgl->vec[0].iova);
745 sg->length = sgl->vec[0].len;
748 /* Successive segs */
749 for (i = 1; i < sgl->num; i++) {
752 qm_sg_entry_set64(sg, sgl->vec[i].iova);
753 sg->length = sgl->vec[i].len;
755 in_len += sg->length;
760 in_sg->length = in_len;
763 if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
764 fd->cmd = 0x80000000 |
765 *((uint32_t *)((uint8_t *)userdata +
766 ses->pdcp.hfn_ovd_offset));
767 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
768 *((uint32_t *)((uint8_t *)userdata +
769 ses->pdcp.hfn_ovd_offset)),
778 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
779 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
780 void *user_data[], int *status)
782 /* Function to transmit the frames to given device and queuepair */
784 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
786 struct qm_fd fds[DPAA_SEC_BURST], *fd;
787 uint32_t frames_to_send;
788 struct dpaa_sec_job *cf;
789 dpaa_sec_session *ses =
790 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
791 uint32_t flags[DPAA_SEC_BURST] = {0};
792 struct qman_fq *inq[DPAA_SEC_BURST];
794 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
795 if (rte_dpaa_portal_init((void *)0)) {
796 DPAA_SEC_ERR("Failure in affining portal");
802 frames_to_send = (vec->num > DPAA_SEC_BURST) ?
803 DPAA_SEC_BURST : vec->num;
804 for (loop = 0; loop < frames_to_send; loop++) {
805 if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
806 if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
807 frames_to_send = loop;
810 } else if (unlikely(ses->qp[rte_lcore_id() %
811 MAX_DPAA_CORES] != dpaa_qp)) {
812 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
814 ses->qp[rte_lcore_id() %
815 MAX_DPAA_CORES], dpaa_qp);
816 frames_to_send = loop;
820 /*Clear the unused FD fields before sending*/
822 memset(fd, 0, sizeof(struct qm_fd));
823 cf = ses->build_raw_dp_fd(drv_ctx,
825 &vec->dest_sgl[loop],
833 DPAA_SEC_ERR("error: Improper packet contents"
834 " for crypto operation");
837 inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
838 qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
839 fd->_format1 = qm_fd_compound;
840 fd->length29 = 2 * sizeof(struct qm_sg_entry);
846 while (loop < frames_to_send) {
847 loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
848 &flags[loop], frames_to_send - loop);
850 vec->num -= frames_to_send;
851 num_tx += frames_to_send;
855 dpaa_qp->tx_pkts += num_tx;
856 dpaa_qp->tx_errs += vec->num - num_tx;
862 dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
863 uint8_t is_user_data_array,
864 rte_cryptodev_raw_post_dequeue_t post_dequeue,
868 unsigned int pkts = 0;
869 int num_rx_bufs, ret;
870 struct qm_dqrr_entry *dq;
871 uint32_t vdqcr_flags = 0;
872 uint8_t is_success = 0;
876 * Until request for four buffers, we provide exact number of buffers.
877 * Otherwise we do not set the QM_VDQCR_EXACT flag.
878 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
879 * requested, so we request two less in this case.
882 vdqcr_flags = QM_VDQCR_EXACT;
883 num_rx_bufs = nb_ops;
885 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
886 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
888 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
893 const struct qm_fd *fd;
894 struct dpaa_sec_job *job;
895 struct dpaa_sec_op_ctx *ctx;
897 dq = qman_dequeue(fq);
902 /* sg is embedded in an op ctx,
903 * sg[0] is for output
906 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
908 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
909 ctx->fd_status = fd->status;
910 if (is_user_data_array)
911 out_user_data[pkts] = ctx->userdata;
913 out_user_data[0] = ctx->userdata;
915 if (!ctx->fd_status) {
919 DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
921 post_dequeue(ctx->op, pkts, is_success);
924 /* report op status to sym->op and then free the ctx memory */
925 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
927 qman_dqrr_consume(fq, dq);
928 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
934 static __rte_always_inline uint32_t
935 dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
936 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
937 uint32_t max_nb_to_dequeue,
938 rte_cryptodev_raw_post_dequeue_t post_dequeue,
939 void **out_user_data, uint8_t is_user_data_array,
940 uint32_t *n_success, int *dequeue_status)
942 RTE_SET_USED(drv_ctx);
943 RTE_SET_USED(get_dequeue_count);
945 struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
946 uint32_t nb_ops = max_nb_to_dequeue;
948 if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
949 if (rte_dpaa_portal_init((void *)0)) {
950 DPAA_SEC_ERR("Failure in affining portal");
955 num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
956 is_user_data_array, post_dequeue, nb_ops);
958 dpaa_qp->rx_pkts += num_rx;
962 DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
967 static __rte_always_inline int
968 dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
969 struct rte_crypto_vec *data_vec,
970 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
971 struct rte_crypto_va_iova_ptr *iv,
972 struct rte_crypto_va_iova_ptr *digest,
973 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
976 RTE_SET_USED(qp_data);
977 RTE_SET_USED(drv_ctx);
978 RTE_SET_USED(data_vec);
979 RTE_SET_USED(n_data_vecs);
982 RTE_SET_USED(digest);
983 RTE_SET_USED(aad_or_auth_iv);
984 RTE_SET_USED(user_data);
989 static __rte_always_inline void *
990 dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
991 enum rte_crypto_op_status *op_status)
993 RTE_SET_USED(qp_data);
994 RTE_SET_USED(drv_ctx);
995 RTE_SET_USED(dequeue_status);
996 RTE_SET_USED(op_status);
1002 dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
1003 struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
1004 enum rte_crypto_op_sess_type sess_type,
1005 union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1007 dpaa_sec_session *sess;
1008 struct dpaa_sec_raw_dp_ctx *dp_ctx;
1009 RTE_SET_USED(qp_id);
1012 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1013 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1016 if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1017 sess = (dpaa_sec_session *)get_sec_session_private_data(
1018 session_ctx.sec_sess);
1019 else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1020 sess = (dpaa_sec_session *)get_sym_session_private_data(
1021 session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
1024 raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
1025 raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
1026 raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
1027 raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
1028 raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
1029 raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
1031 if (sess->ctxt == DPAA_SEC_CIPHER)
1032 sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
1033 else if (sess->ctxt == DPAA_SEC_AUTH)
1034 sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
1035 else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
1036 sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
1037 else if (sess->ctxt == DPAA_SEC_AEAD)
1038 sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
1039 #ifdef RTE_LIB_SECURITY
1040 else if (sess->ctxt == DPAA_SEC_IPSEC ||
1041 sess->ctxt == DPAA_SEC_PDCP)
1042 sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
1046 dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1047 dp_ctx->session = sess;
1053 dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1055 return sizeof(struct dpaa_sec_raw_dp_ctx);