crypto/dpaa_sec: support authonly and chain with raw API
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <rte_byteorder.h>
6 #include <rte_common.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIB_SECURITY
11 #include <rte_security_driver.h>
12 #endif
13
14 /* RTA header files */
15 #include <desc/algo.h>
16 #include <desc/ipsec.h>
17
18 #include <rte_dpaa_bus.h>
19 #include <dpaa_sec.h>
20 #include <dpaa_sec_log.h>
21
22 struct dpaa_sec_raw_dp_ctx {
23         dpaa_sec_session *session;
24         uint32_t tail;
25         uint32_t head;
26         uint16_t cached_enqueue;
27         uint16_t cached_dequeue;
28 };
29
30 static inline int
31 is_encode(dpaa_sec_session *ses)
32 {
33         return ses->dir == DIR_ENC;
34 }
35
36 static inline int is_decode(dpaa_sec_session *ses)
37 {
38         return ses->dir == DIR_DEC;
39 }
40
41 static __rte_always_inline int
42 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
43 {
44         RTE_SET_USED(qp_data);
45         RTE_SET_USED(drv_ctx);
46         RTE_SET_USED(n);
47
48         return 0;
49 }
50
51 static __rte_always_inline int
52 dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
53 {
54         RTE_SET_USED(qp_data);
55         RTE_SET_USED(drv_ctx);
56         RTE_SET_USED(n);
57
58         return 0;
59 }
60
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
63 {
64         struct dpaa_sec_op_ctx *ctx;
65         int i, retval;
66
67         retval = rte_mempool_get(
68                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
69                         (void **)(&ctx));
70         if (!ctx || retval) {
71                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81                 dcbz_64(&ctx->job.sg[i]);
82
83         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static struct dpaa_sec_job *
90 build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
91                         struct rte_crypto_sgl *sgl,
92                         struct rte_crypto_sgl *dest_sgl,
93                         struct rte_crypto_va_iova_ptr *iv,
94                         struct rte_crypto_va_iova_ptr *digest,
95                         struct rte_crypto_va_iova_ptr *auth_iv,
96                         union rte_crypto_sym_ofs ofs,
97                         void *userdata,
98                         struct qm_fd *fd)
99 {
100         RTE_SET_USED(dest_sgl);
101         RTE_SET_USED(iv);
102         RTE_SET_USED(auth_iv);
103         RTE_SET_USED(fd);
104
105         dpaa_sec_session *ses =
106                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
107         struct dpaa_sec_job *cf;
108         struct dpaa_sec_op_ctx *ctx;
109         struct qm_sg_entry *sg, *out_sg, *in_sg;
110         phys_addr_t start_addr;
111         uint8_t *old_digest, extra_segs;
112         int data_len, data_offset, total_len = 0;
113         unsigned int i;
114
115         for (i = 0; i < sgl->num; i++)
116                 total_len += sgl->vec[i].len;
117
118         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
119         data_offset =  ofs.ofs.auth.head;
120
121         /* Support only length in bits for SNOW3G and ZUC */
122
123         if (is_decode(ses))
124                 extra_segs = 3;
125         else
126                 extra_segs = 2;
127
128         if (sgl->num > MAX_SG_ENTRIES) {
129                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
130                                 MAX_SG_ENTRIES);
131                 return NULL;
132         }
133         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
134         if (!ctx)
135                 return NULL;
136
137         cf = &ctx->job;
138         ctx->userdata = (void *)userdata;
139         old_digest = ctx->digest;
140
141         /* output */
142         out_sg = &cf->sg[0];
143         qm_sg_entry_set64(out_sg, digest->iova);
144         out_sg->length = ses->digest_length;
145         cpu_to_hw_sg(out_sg);
146
147         /* input */
148         in_sg = &cf->sg[1];
149         /* need to extend the input to a compound frame */
150         in_sg->extension = 1;
151         in_sg->final = 1;
152         in_sg->length = data_len;
153         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
154
155         /* 1st seg */
156         sg = in_sg + 1;
157
158         if (ses->iv.length) {
159                 uint8_t *iv_ptr;
160
161                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
162                                                    ses->iv.offset);
163
164                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
165                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
166                         sg->length = 12;
167                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
168                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
169                         sg->length = 8;
170                 } else {
171                         sg->length = ses->iv.length;
172                 }
173                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
174                 in_sg->length += sg->length;
175                 cpu_to_hw_sg(sg);
176                 sg++;
177         }
178
179         qm_sg_entry_set64(sg, sgl->vec[0].iova);
180         sg->offset = data_offset;
181
182         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
183                 sg->length = data_len;
184         } else {
185                 sg->length = sgl->vec[0].len - data_offset;
186
187                 /* remaining i/p segs */
188                 for (i = 1; i < sgl->num; i++) {
189                         cpu_to_hw_sg(sg);
190                         sg++;
191                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
192                         if (data_len > (int)sgl->vec[i].len)
193                                 sg->length = sgl->vec[0].len;
194                         else
195                                 sg->length = data_len;
196
197                         data_len = data_len - sg->length;
198                         if (data_len < 1)
199                                 break;
200                 }
201         }
202
203         if (is_decode(ses)) {
204                 /* Digest verification case */
205                 cpu_to_hw_sg(sg);
206                 sg++;
207                 rte_memcpy(old_digest, digest->va,
208                                 ses->digest_length);
209                 start_addr = rte_dpaa_mem_vtop(old_digest);
210                 qm_sg_entry_set64(sg, start_addr);
211                 sg->length = ses->digest_length;
212                 in_sg->length += ses->digest_length;
213         }
214         sg->final = 1;
215         cpu_to_hw_sg(sg);
216         cpu_to_hw_sg(in_sg);
217
218         return cf;
219 }
220
221 static inline struct dpaa_sec_job *
222 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
223                         struct rte_crypto_sgl *sgl,
224                         struct rte_crypto_sgl *dest_sgl,
225                         struct rte_crypto_va_iova_ptr *iv,
226                         struct rte_crypto_va_iova_ptr *digest,
227                         struct rte_crypto_va_iova_ptr *auth_iv,
228                         union rte_crypto_sym_ofs ofs,
229                         void *userdata,
230                         struct qm_fd *fd)
231 {
232         RTE_SET_USED(auth_iv);
233
234         dpaa_sec_session *ses =
235                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
236         struct dpaa_sec_job *cf;
237         struct dpaa_sec_op_ctx *ctx;
238         struct qm_sg_entry *sg, *out_sg, *in_sg;
239         uint8_t *IV_ptr = iv->va;
240         unsigned int i;
241         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
242                                 ofs.ofs.auth.head;
243         uint16_t auth_tail_len = ofs.ofs.auth.tail;
244         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
245         int data_len = 0, auth_len = 0, cipher_len = 0;
246
247         for (i = 0; i < sgl->num; i++)
248                 data_len += sgl->vec[i].len;
249
250         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
251         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
252
253         if (sgl->num > MAX_SG_ENTRIES) {
254                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
255                                 MAX_SG_ENTRIES);
256                 return NULL;
257         }
258
259         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
260         if (!ctx)
261                 return NULL;
262
263         cf = &ctx->job;
264         ctx->userdata = (void *)userdata;
265
266         rte_prefetch0(cf->sg);
267
268         /* output */
269         out_sg = &cf->sg[0];
270         out_sg->extension = 1;
271         if (is_encode(ses))
272                 out_sg->length = cipher_len + ses->digest_length;
273         else
274                 out_sg->length = cipher_len;
275
276         /* output sg entries */
277         sg = &cf->sg[2];
278         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
279         cpu_to_hw_sg(out_sg);
280
281         /* 1st seg */
282         if (dest_sgl) {
283                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
284                 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
285                 sg->offset = ofs.ofs.cipher.head;
286
287                 /* Successive segs */
288                 for (i = 1; i < dest_sgl->num; i++) {
289                         cpu_to_hw_sg(sg);
290                         sg++;
291                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
292                         sg->length = dest_sgl->vec[i].len;
293                 }
294         } else {
295                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
296                 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
297                 sg->offset = ofs.ofs.cipher.head;
298
299                 /* Successive segs */
300                 for (i = 1; i < sgl->num; i++) {
301                         cpu_to_hw_sg(sg);
302                         sg++;
303                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
304                         sg->length = sgl->vec[i].len;
305                 }
306         }
307
308         if (is_encode(ses)) {
309                 cpu_to_hw_sg(sg);
310                 /* set auth output */
311                 sg++;
312                 qm_sg_entry_set64(sg, digest->iova);
313                 sg->length = ses->digest_length;
314         }
315         sg->final = 1;
316         cpu_to_hw_sg(sg);
317
318         /* input */
319         in_sg = &cf->sg[1];
320         in_sg->extension = 1;
321         in_sg->final = 1;
322         if (is_encode(ses))
323                 in_sg->length = ses->iv.length + auth_len;
324         else
325                 in_sg->length = ses->iv.length + auth_len
326                                                 + ses->digest_length;
327
328         /* input sg entries */
329         sg++;
330         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
331         cpu_to_hw_sg(in_sg);
332
333         /* 1st seg IV */
334         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
335         sg->length = ses->iv.length;
336         cpu_to_hw_sg(sg);
337
338         /* 2 seg */
339         sg++;
340         qm_sg_entry_set64(sg, sgl->vec[0].iova);
341         sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
342         sg->offset = ofs.ofs.auth.head;
343
344         /* Successive segs */
345         for (i = 1; i < sgl->num; i++) {
346                 cpu_to_hw_sg(sg);
347                 sg++;
348                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
349                 sg->length = sgl->vec[i].len;
350         }
351
352         if (is_decode(ses)) {
353                 cpu_to_hw_sg(sg);
354                 sg++;
355                 memcpy(ctx->digest, digest->va,
356                         ses->digest_length);
357                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
358                 sg->length = ses->digest_length;
359         }
360         sg->final = 1;
361         cpu_to_hw_sg(sg);
362
363         if (auth_only_len)
364                 fd->cmd = 0x80000000 | auth_only_len;
365
366         return cf;
367 }
368
369 static struct dpaa_sec_job *
370 build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
371                         struct rte_crypto_sgl *sgl,
372                         struct rte_crypto_sgl *dest_sgl,
373                         struct rte_crypto_va_iova_ptr *iv,
374                         struct rte_crypto_va_iova_ptr *digest,
375                         struct rte_crypto_va_iova_ptr *auth_iv,
376                         union rte_crypto_sym_ofs ofs,
377                         void *userdata,
378                         struct qm_fd *fd)
379 {
380         RTE_SET_USED(digest);
381         RTE_SET_USED(auth_iv);
382         RTE_SET_USED(fd);
383
384         dpaa_sec_session *ses =
385                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
386         struct dpaa_sec_job *cf;
387         struct dpaa_sec_op_ctx *ctx;
388         struct qm_sg_entry *sg, *out_sg, *in_sg;
389         unsigned int i;
390         uint8_t *IV_ptr = iv->va;
391         int data_len, total_len = 0, data_offset;
392
393         for (i = 0; i < sgl->num; i++)
394                 total_len += sgl->vec[i].len;
395
396         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
397         data_offset = ofs.ofs.cipher.head;
398
399         /* Support lengths in bits only for SNOW3G and ZUC */
400         if (sgl->num > MAX_SG_ENTRIES) {
401                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
402                                 MAX_SG_ENTRIES);
403                 return NULL;
404         }
405
406         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
407         if (!ctx)
408                 return NULL;
409
410         cf = &ctx->job;
411         ctx->userdata = (void *)userdata;
412
413         /* output */
414         out_sg = &cf->sg[0];
415         out_sg->extension = 1;
416         out_sg->length = data_len;
417         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
418         cpu_to_hw_sg(out_sg);
419
420         if (dest_sgl) {
421                 /* 1st seg */
422                 sg = &cf->sg[2];
423                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
424                 sg->length = dest_sgl->vec[0].len - data_offset;
425                 sg->offset = data_offset;
426
427                 /* Successive segs */
428                 for (i = 1; i < dest_sgl->num; i++) {
429                         cpu_to_hw_sg(sg);
430                         sg++;
431                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
432                         sg->length = dest_sgl->vec[i].len;
433                 }
434         } else {
435                 /* 1st seg */
436                 sg = &cf->sg[2];
437                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
438                 sg->length = sgl->vec[0].len - data_offset;
439                 sg->offset = data_offset;
440
441                 /* Successive segs */
442                 for (i = 1; i < sgl->num; i++) {
443                         cpu_to_hw_sg(sg);
444                         sg++;
445                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
446                         sg->length = sgl->vec[i].len;
447                 }
448
449         }
450         sg->final = 1;
451         cpu_to_hw_sg(sg);
452
453         /* input */
454         in_sg = &cf->sg[1];
455         in_sg->extension = 1;
456         in_sg->final = 1;
457         in_sg->length = data_len + ses->iv.length;
458
459         sg++;
460         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
461         cpu_to_hw_sg(in_sg);
462
463         /* IV */
464         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
465         sg->length = ses->iv.length;
466         cpu_to_hw_sg(sg);
467
468         /* 1st seg */
469         sg++;
470         qm_sg_entry_set64(sg, sgl->vec[0].iova);
471         sg->length = sgl->vec[0].len - data_offset;
472         sg->offset = data_offset;
473
474         /* Successive segs */
475         for (i = 1; i < sgl->num; i++) {
476                 cpu_to_hw_sg(sg);
477                 sg++;
478                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
479                 sg->length = sgl->vec[i].len;
480         }
481         sg->final = 1;
482         cpu_to_hw_sg(sg);
483
484         return cf;
485 }
486
487 static uint32_t
488 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
489         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
490         void *user_data[], int *status)
491 {
492         /* Function to transmit the frames to given device and queuepair */
493         uint32_t loop;
494         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
495         uint16_t num_tx = 0;
496         struct qm_fd fds[DPAA_SEC_BURST], *fd;
497         uint32_t frames_to_send;
498         struct dpaa_sec_job *cf;
499         dpaa_sec_session *ses =
500                         ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
501         uint32_t flags[DPAA_SEC_BURST] = {0};
502         struct qman_fq *inq[DPAA_SEC_BURST];
503
504         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
505                 if (rte_dpaa_portal_init((void *)0)) {
506                         DPAA_SEC_ERR("Failure in affining portal");
507                         return 0;
508                 }
509         }
510
511         while (vec->num) {
512                 frames_to_send = (vec->num > DPAA_SEC_BURST) ?
513                                 DPAA_SEC_BURST : vec->num;
514                 for (loop = 0; loop < frames_to_send; loop++) {
515                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
516                                 if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
517                                         frames_to_send = loop;
518                                         goto send_pkts;
519                                 }
520                         } else if (unlikely(ses->qp[rte_lcore_id() %
521                                                 MAX_DPAA_CORES] != dpaa_qp)) {
522                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
523                                         " New qp = %p\n",
524                                         ses->qp[rte_lcore_id() %
525                                         MAX_DPAA_CORES], dpaa_qp);
526                                 frames_to_send = loop;
527                                 goto send_pkts;
528                         }
529
530                         /*Clear the unused FD fields before sending*/
531                         fd = &fds[loop];
532                         memset(fd, 0, sizeof(struct qm_fd));
533                         cf = ses->build_raw_dp_fd(drv_ctx,
534                                                 &vec->src_sgl[loop],
535                                                 &vec->dest_sgl[loop],
536                                                 &vec->iv[loop],
537                                                 &vec->digest[loop],
538                                                 &vec->auth_iv[loop],
539                                                 ofs,
540                                                 user_data[loop],
541                                                 fd);
542                         if (!cf) {
543                                 DPAA_SEC_ERR("error: Improper packet contents"
544                                         " for crypto operation");
545                                 goto skip_tx;
546                         }
547                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
548                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
549                         fd->_format1 = qm_fd_compound;
550                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
551
552                         status[loop] = 1;
553                 }
554 send_pkts:
555                 loop = 0;
556                 while (loop < frames_to_send) {
557                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
558                                         &flags[loop], frames_to_send - loop);
559                 }
560                 vec->num -= frames_to_send;
561                 num_tx += frames_to_send;
562         }
563
564 skip_tx:
565         dpaa_qp->tx_pkts += num_tx;
566         dpaa_qp->tx_errs += vec->num - num_tx;
567
568         return num_tx;
569 }
570
571 static int
572 dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
573                 uint8_t is_user_data_array,
574                 rte_cryptodev_raw_post_dequeue_t post_dequeue,
575                 int nb_ops)
576 {
577         struct qman_fq *fq;
578         unsigned int pkts = 0;
579         int num_rx_bufs, ret;
580         struct qm_dqrr_entry *dq;
581         uint32_t vdqcr_flags = 0;
582         uint8_t is_success = 0;
583
584         fq = &qp->outq;
585         /*
586          * Until request for four buffers, we provide exact number of buffers.
587          * Otherwise we do not set the QM_VDQCR_EXACT flag.
588          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
589          * requested, so we request two less in this case.
590          */
591         if (nb_ops < 4) {
592                 vdqcr_flags = QM_VDQCR_EXACT;
593                 num_rx_bufs = nb_ops;
594         } else {
595                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
596                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
597         }
598         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
599         if (ret)
600                 return 0;
601
602         do {
603                 const struct qm_fd *fd;
604                 struct dpaa_sec_job *job;
605                 struct dpaa_sec_op_ctx *ctx;
606
607                 dq = qman_dequeue(fq);
608                 if (!dq)
609                         continue;
610
611                 fd = &dq->fd;
612                 /* sg is embedded in an op ctx,
613                  * sg[0] is for output
614                  * sg[1] for input
615                  */
616                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
617
618                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
619                 ctx->fd_status = fd->status;
620                 if (is_user_data_array)
621                         out_user_data[pkts] = ctx->userdata;
622                 else
623                         out_user_data[0] = ctx->userdata;
624
625                 if (!ctx->fd_status) {
626                         is_success = true;
627                 } else {
628                         is_success = false;
629                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
630                 }
631                 post_dequeue(ctx->op, pkts, is_success);
632                 pkts++;
633
634                 /* report op status to sym->op and then free the ctx memory */
635                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
636
637                 qman_dqrr_consume(fq, dq);
638         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
639
640         return pkts;
641 }
642
643
644 static __rte_always_inline uint32_t
645 dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
646         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
647         uint32_t max_nb_to_dequeue,
648         rte_cryptodev_raw_post_dequeue_t post_dequeue,
649         void **out_user_data, uint8_t is_user_data_array,
650         uint32_t *n_success, int *dequeue_status)
651 {
652         RTE_SET_USED(drv_ctx);
653         RTE_SET_USED(get_dequeue_count);
654         uint16_t num_rx;
655         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
656         uint32_t nb_ops = max_nb_to_dequeue;
657
658         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
659                 if (rte_dpaa_portal_init((void *)0)) {
660                         DPAA_SEC_ERR("Failure in affining portal");
661                         return 0;
662                 }
663         }
664
665         num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
666                         is_user_data_array, post_dequeue, nb_ops);
667
668         dpaa_qp->rx_pkts += num_rx;
669         *dequeue_status = 1;
670         *n_success = num_rx;
671
672         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
673
674         return num_rx;
675 }
676
677 static __rte_always_inline int
678 dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
679         struct rte_crypto_vec *data_vec,
680         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
681         struct rte_crypto_va_iova_ptr *iv,
682         struct rte_crypto_va_iova_ptr *digest,
683         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
684         void *user_data)
685 {
686         RTE_SET_USED(qp_data);
687         RTE_SET_USED(drv_ctx);
688         RTE_SET_USED(data_vec);
689         RTE_SET_USED(n_data_vecs);
690         RTE_SET_USED(ofs);
691         RTE_SET_USED(iv);
692         RTE_SET_USED(digest);
693         RTE_SET_USED(aad_or_auth_iv);
694         RTE_SET_USED(user_data);
695
696         return 0;
697 }
698
699 static __rte_always_inline void *
700 dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
701         enum rte_crypto_op_status *op_status)
702 {
703         RTE_SET_USED(qp_data);
704         RTE_SET_USED(drv_ctx);
705         RTE_SET_USED(dequeue_status);
706         RTE_SET_USED(op_status);
707
708         return NULL;
709 }
710
711 int
712 dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
713         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
714         enum rte_crypto_op_sess_type sess_type,
715         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
716 {
717         dpaa_sec_session *sess;
718         struct dpaa_sec_raw_dp_ctx *dp_ctx;
719         RTE_SET_USED(qp_id);
720
721         if (!is_update) {
722                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
723                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
724         }
725
726         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
727                 sess = (dpaa_sec_session *)get_sec_session_private_data(
728                                 session_ctx.sec_sess);
729         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
730                 sess = (dpaa_sec_session *)get_sym_session_private_data(
731                         session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
732         else
733                 return -ENOTSUP;
734         raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
735         raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
736         raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
737         raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
738         raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
739         raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
740
741         if (sess->ctxt == DPAA_SEC_CIPHER)
742                 sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
743         else if (sess->ctxt == DPAA_SEC_AUTH)
744                 sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
745         else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
746                 sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
747         else
748                 return -ENOTSUP;
749         dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
750         dp_ctx->session = sess;
751
752         return 0;
753 }
754
755 int
756 dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
757 {
758         return sizeof(struct dpaa_sec_raw_dp_ctx);
759 }