565af6dcba2f39431d3dd709ae712b2f9e84c2e4
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13
14 #include <desc/algo.h>
15
16 struct dpaa2_sec_raw_dp_ctx {
17         dpaa2_sec_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static int
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26                        struct rte_crypto_sgl *sgl,
27                        struct rte_crypto_va_iova_ptr *iv,
28                        struct rte_crypto_va_iova_ptr *digest,
29                        struct rte_crypto_va_iova_ptr *auth_iv,
30                        union rte_crypto_sym_ofs ofs,
31                        void *userdata,
32                        struct qbman_fd *fd)
33 {
34         RTE_SET_USED(auth_iv);
35
36         dpaa2_sec_session *sess =
37                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
38         struct ctxt_priv *priv = sess->ctxt;
39         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
40         struct sec_flow_context *flc;
41         int data_len = 0, auth_len = 0, cipher_len = 0;
42         unsigned int i = 0;
43         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
44                                 ofs.ofs.auth.head;
45
46         uint16_t auth_tail_len = ofs.ofs.auth.tail;
47         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
48         int icv_len = sess->digest_length;
49         uint8_t *old_icv;
50         uint8_t *iv_ptr = iv->va;
51
52         for (i = 0; i < sgl->num; i++)
53                 data_len += sgl->vec[i].len;
54
55         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
56         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
57         /* first FLE entry used to store session ctxt */
58         fle = (struct qbman_fle *)rte_malloc(NULL,
59                         FLE_SG_MEM_SIZE(2 * sgl->num),
60                         RTE_CACHE_LINE_SIZE);
61         if (unlikely(!fle)) {
62                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
63                 return -ENOMEM;
64         }
65         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
66         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
67         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
68
69         op_fle = fle + 1;
70         ip_fle = fle + 2;
71         sge = fle + 3;
72
73         /* Save the shared descriptor */
74         flc = &priv->flc_desc[0].flc;
75
76         /* Configure FD as a FRAME LIST */
77         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
78         DPAA2_SET_FD_COMPOUND_FMT(fd);
79         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
80
81         /* Configure Output FLE with Scatter/Gather Entry */
82         DPAA2_SET_FLE_SG_EXT(op_fle);
83         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
84
85         if (auth_only_len)
86                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
87
88         op_fle->length = (sess->dir == DIR_ENC) ?
89                         (cipher_len + icv_len) :
90                         cipher_len;
91
92         /* Configure Output SGE for Encap/Decap */
93         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
94         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
95         sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
96
97         /* o/p segs */
98         for (i = 1; i < sgl->num; i++) {
99                 sge++;
100                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
101                 DPAA2_SET_FLE_OFFSET(sge, 0);
102                 sge->length = sgl->vec[i].len;
103         }
104
105         if (sess->dir == DIR_ENC) {
106                 sge++;
107                 DPAA2_SET_FLE_ADDR(sge,
108                         digest->iova);
109                 sge->length = icv_len;
110         }
111         DPAA2_SET_FLE_FIN(sge);
112
113         sge++;
114
115         /* Configure Input FLE with Scatter/Gather Entry */
116         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
117         DPAA2_SET_FLE_SG_EXT(ip_fle);
118         DPAA2_SET_FLE_FIN(ip_fle);
119
120         ip_fle->length = (sess->dir == DIR_ENC) ?
121                         (auth_len + sess->iv.length) :
122                         (auth_len + sess->iv.length +
123                         icv_len);
124
125         /* Configure Input SGE for Encap/Decap */
126         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
127         sge->length = sess->iv.length;
128
129         sge++;
130         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
131         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
132         sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
133
134         for (i = 1; i < sgl->num; i++) {
135                 sge++;
136                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
137                 DPAA2_SET_FLE_OFFSET(sge, 0);
138                 sge->length = sgl->vec[i].len;
139         }
140
141         if (sess->dir == DIR_DEC) {
142                 sge++;
143                 old_icv = (uint8_t *)(sge + 1);
144                 memcpy(old_icv, digest->va,
145                         icv_len);
146                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
147                 sge->length = icv_len;
148         }
149
150         DPAA2_SET_FLE_FIN(sge);
151         if (auth_only_len) {
152                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
153                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
154         }
155         DPAA2_SET_FD_LEN(fd, ip_fle->length);
156
157         return 0;
158 }
159
160 static int
161 build_raw_dp_aead_fd(uint8_t *drv_ctx,
162                        struct rte_crypto_sgl *sgl,
163                        struct rte_crypto_va_iova_ptr *iv,
164                        struct rte_crypto_va_iova_ptr *digest,
165                        struct rte_crypto_va_iova_ptr *auth_iv,
166                        union rte_crypto_sym_ofs ofs,
167                        void *userdata,
168                        struct qbman_fd *fd)
169 {
170         RTE_SET_USED(drv_ctx);
171         RTE_SET_USED(sgl);
172         RTE_SET_USED(iv);
173         RTE_SET_USED(digest);
174         RTE_SET_USED(auth_iv);
175         RTE_SET_USED(ofs);
176         RTE_SET_USED(userdata);
177         RTE_SET_USED(fd);
178
179         return 0;
180 }
181
182 static int
183 build_raw_dp_auth_fd(uint8_t *drv_ctx,
184                        struct rte_crypto_sgl *sgl,
185                        struct rte_crypto_va_iova_ptr *iv,
186                        struct rte_crypto_va_iova_ptr *digest,
187                        struct rte_crypto_va_iova_ptr *auth_iv,
188                        union rte_crypto_sym_ofs ofs,
189                        void *userdata,
190                        struct qbman_fd *fd)
191 {
192         RTE_SET_USED(iv);
193         RTE_SET_USED(auth_iv);
194
195         dpaa2_sec_session *sess =
196                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
197         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
198         struct sec_flow_context *flc;
199         int total_len = 0, data_len = 0, data_offset;
200         uint8_t *old_digest;
201         struct ctxt_priv *priv = sess->ctxt;
202         unsigned int i;
203
204         for (i = 0; i < sgl->num; i++)
205                 total_len += sgl->vec[i].len;
206
207         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
208         data_offset = ofs.ofs.auth.head;
209
210         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
211                 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
212                 if ((data_len & 7) || (data_offset & 7)) {
213                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
214                         return -ENOTSUP;
215                 }
216
217                 data_len = data_len >> 3;
218                 data_offset = data_offset >> 3;
219         }
220         fle = (struct qbman_fle *)rte_malloc(NULL,
221                 FLE_SG_MEM_SIZE(2 * sgl->num),
222                         RTE_CACHE_LINE_SIZE);
223         if (unlikely(!fle)) {
224                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
225                 return -ENOMEM;
226         }
227         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
228         /* first FLE entry used to store mbuf and session ctxt */
229         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
230         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
231         op_fle = fle + 1;
232         ip_fle = fle + 2;
233         sge = fle + 3;
234
235         flc = &priv->flc_desc[DESC_INITFINAL].flc;
236
237         /* sg FD */
238         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
239         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
240         DPAA2_SET_FD_COMPOUND_FMT(fd);
241
242         /* o/p fle */
243         DPAA2_SET_FLE_ADDR(op_fle,
244                         DPAA2_VADDR_TO_IOVA(digest->va));
245         op_fle->length = sess->digest_length;
246
247         /* i/p fle */
248         DPAA2_SET_FLE_SG_EXT(ip_fle);
249         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
250         ip_fle->length = data_len;
251
252         if (sess->iv.length) {
253                 uint8_t *iv_ptr;
254
255                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
256                                                 sess->iv.offset);
257
258                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
259                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
260                         sge->length = 12;
261                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
262                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
263                         sge->length = 8;
264                 } else {
265                         sge->length = sess->iv.length;
266                 }
267                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
268                 ip_fle->length += sge->length;
269                 sge++;
270         }
271         /* i/p 1st seg */
272         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
273         DPAA2_SET_FLE_OFFSET(sge, data_offset);
274
275         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
276                 sge->length = data_len;
277                 data_len = 0;
278         } else {
279                 sge->length = sgl->vec[0].len - data_offset;
280                 for (i = 1; i < sgl->num; i++) {
281                         sge++;
282                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
283                         DPAA2_SET_FLE_OFFSET(sge, 0);
284                         sge->length = sgl->vec[i].len;
285                 }
286         }
287         if (sess->dir == DIR_DEC) {
288                 /* Digest verification case */
289                 sge++;
290                 old_digest = (uint8_t *)(sge + 1);
291                 rte_memcpy(old_digest, digest->va,
292                         sess->digest_length);
293                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
294                 sge->length = sess->digest_length;
295                 ip_fle->length += sess->digest_length;
296         }
297         DPAA2_SET_FLE_FIN(sge);
298         DPAA2_SET_FLE_FIN(ip_fle);
299         DPAA2_SET_FD_LEN(fd, ip_fle->length);
300
301         return 0;
302 }
303
304 static int
305 build_raw_dp_proto_fd(uint8_t *drv_ctx,
306                        struct rte_crypto_sgl *sgl,
307                        struct rte_crypto_va_iova_ptr *iv,
308                        struct rte_crypto_va_iova_ptr *digest,
309                        struct rte_crypto_va_iova_ptr *auth_iv,
310                        union rte_crypto_sym_ofs ofs,
311                        void *userdata,
312                        struct qbman_fd *fd)
313 {
314         RTE_SET_USED(drv_ctx);
315         RTE_SET_USED(sgl);
316         RTE_SET_USED(iv);
317         RTE_SET_USED(digest);
318         RTE_SET_USED(auth_iv);
319         RTE_SET_USED(ofs);
320         RTE_SET_USED(userdata);
321         RTE_SET_USED(fd);
322
323         return 0;
324 }
325
326 static int
327 build_raw_dp_proto_compound_fd(uint8_t *drv_ctx,
328                        struct rte_crypto_sgl *sgl,
329                        struct rte_crypto_va_iova_ptr *iv,
330                        struct rte_crypto_va_iova_ptr *digest,
331                        struct rte_crypto_va_iova_ptr *auth_iv,
332                        union rte_crypto_sym_ofs ofs,
333                        void *userdata,
334                        struct qbman_fd *fd)
335 {
336         RTE_SET_USED(drv_ctx);
337         RTE_SET_USED(sgl);
338         RTE_SET_USED(iv);
339         RTE_SET_USED(digest);
340         RTE_SET_USED(auth_iv);
341         RTE_SET_USED(ofs);
342         RTE_SET_USED(userdata);
343         RTE_SET_USED(fd);
344
345         return 0;
346 }
347
348 static int
349 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
350                        struct rte_crypto_sgl *sgl,
351                        struct rte_crypto_va_iova_ptr *iv,
352                        struct rte_crypto_va_iova_ptr *digest,
353                        struct rte_crypto_va_iova_ptr *auth_iv,
354                        union rte_crypto_sym_ofs ofs,
355                        void *userdata,
356                        struct qbman_fd *fd)
357 {
358         RTE_SET_USED(digest);
359         RTE_SET_USED(auth_iv);
360
361         dpaa2_sec_session *sess =
362                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
363         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
364         int total_len = 0, data_len = 0, data_offset;
365         struct sec_flow_context *flc;
366         struct ctxt_priv *priv = sess->ctxt;
367         unsigned int i;
368
369         for (i = 0; i < sgl->num; i++)
370                 total_len += sgl->vec[i].len;
371
372         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
373         data_offset = ofs.ofs.cipher.head;
374
375         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
376                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
377                 if ((data_len & 7) || (data_offset & 7)) {
378                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
379                         return -ENOTSUP;
380                 }
381
382                 data_len = data_len >> 3;
383                 data_offset = data_offset >> 3;
384         }
385
386         /* first FLE entry used to store mbuf and session ctxt */
387         fle = (struct qbman_fle *)rte_malloc(NULL,
388                         FLE_SG_MEM_SIZE(2*sgl->num),
389                         RTE_CACHE_LINE_SIZE);
390         if (!fle) {
391                 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
392                 return -ENOMEM;
393         }
394         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
395         /* first FLE entry used to store userdata and session ctxt */
396         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
397         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
398
399         op_fle = fle + 1;
400         ip_fle = fle + 2;
401         sge = fle + 3;
402
403         flc = &priv->flc_desc[0].flc;
404
405         DPAA2_SEC_DP_DEBUG(
406                 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
407                 data_offset,
408                 data_len,
409                 sess->iv.length);
410
411         /* o/p fle */
412         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
413         op_fle->length = data_len;
414         DPAA2_SET_FLE_SG_EXT(op_fle);
415
416         /* o/p 1st seg */
417         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
418         DPAA2_SET_FLE_OFFSET(sge, data_offset);
419         sge->length = sgl->vec[0].len - data_offset;
420
421         /* o/p segs */
422         for (i = 1; i < sgl->num; i++) {
423                 sge++;
424                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
425                 DPAA2_SET_FLE_OFFSET(sge, 0);
426                 sge->length = sgl->vec[i].len;
427         }
428         DPAA2_SET_FLE_FIN(sge);
429
430         DPAA2_SEC_DP_DEBUG(
431                 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
432                 flc, fle, fle->addr_hi, fle->addr_lo,
433                 fle->length);
434
435         /* i/p fle */
436         sge++;
437         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
438         ip_fle->length = sess->iv.length + data_len;
439         DPAA2_SET_FLE_SG_EXT(ip_fle);
440
441         /* i/p IV */
442         DPAA2_SET_FLE_ADDR(sge, iv->iova);
443         DPAA2_SET_FLE_OFFSET(sge, 0);
444         sge->length = sess->iv.length;
445
446         sge++;
447
448         /* i/p 1st seg */
449         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
450         DPAA2_SET_FLE_OFFSET(sge, data_offset);
451         sge->length = sgl->vec[0].len - data_offset;
452
453         /* i/p segs */
454         for (i = 1; i < sgl->num; i++) {
455                 sge++;
456                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
457                 DPAA2_SET_FLE_OFFSET(sge, 0);
458                 sge->length = sgl->vec[i].len;
459         }
460         DPAA2_SET_FLE_FIN(sge);
461         DPAA2_SET_FLE_FIN(ip_fle);
462
463         /* sg fd */
464         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
465         DPAA2_SET_FD_LEN(fd, ip_fle->length);
466         DPAA2_SET_FD_COMPOUND_FMT(fd);
467         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
468
469         DPAA2_SEC_DP_DEBUG(
470                 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
471                 DPAA2_GET_FD_ADDR(fd),
472                 DPAA2_GET_FD_OFFSET(fd),
473                 DPAA2_GET_FD_LEN(fd));
474
475         return 0;
476 }
477
478 static __rte_always_inline uint32_t
479 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
480         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
481         void *user_data[], int *status)
482 {
483         RTE_SET_USED(user_data);
484         uint32_t loop;
485         int32_t ret;
486         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
487         uint32_t frames_to_send, retry_count;
488         struct qbman_eq_desc eqdesc;
489         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
490         dpaa2_sec_session *sess =
491                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
492         struct qbman_swp *swp;
493         uint16_t num_tx = 0;
494         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
495
496         if (unlikely(vec->num == 0))
497                 return 0;
498
499         if (sess == NULL) {
500                 DPAA2_SEC_ERR("sessionless raw crypto not supported");
501                 return 0;
502         }
503         /*Prepare enqueue descriptor*/
504         qbman_eq_desc_clear(&eqdesc);
505         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
506         qbman_eq_desc_set_response(&eqdesc, 0, 0);
507         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
508
509         if (!DPAA2_PER_LCORE_DPIO) {
510                 ret = dpaa2_affine_qbman_swp();
511                 if (ret) {
512                         DPAA2_SEC_ERR(
513                                 "Failed to allocate IO portal, tid: %d\n",
514                                 rte_gettid());
515                         return 0;
516                 }
517         }
518         swp = DPAA2_PER_LCORE_PORTAL;
519
520         while (vec->num) {
521                 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
522                         dpaa2_eqcr_size : vec->num;
523
524                 for (loop = 0; loop < frames_to_send; loop++) {
525                         /*Clear the unused FD fields before sending*/
526                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
527                         ret = sess->build_raw_dp_fd(drv_ctx,
528                                                     &vec->src_sgl[loop],
529                                                     &vec->iv[loop],
530                                                     &vec->digest[loop],
531                                                     &vec->auth_iv[loop],
532                                                     ofs,
533                                                     user_data[loop],
534                                                     &fd_arr[loop]);
535                         if (ret) {
536                                 DPAA2_SEC_ERR("error: Improper packet contents"
537                                               " for crypto operation");
538                                 goto skip_tx;
539                         }
540                         status[loop] = 1;
541                 }
542
543                 loop = 0;
544                 retry_count = 0;
545                 while (loop < frames_to_send) {
546                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
547                                                          &fd_arr[loop],
548                                                          &flags[loop],
549                                                          frames_to_send - loop);
550                         if (unlikely(ret < 0)) {
551                                 retry_count++;
552                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
553                                         num_tx += loop;
554                                         vec->num -= loop;
555                                         goto skip_tx;
556                                 }
557                         } else {
558                                 loop += ret;
559                                 retry_count = 0;
560                         }
561                 }
562
563                 num_tx += loop;
564                 vec->num -= loop;
565         }
566 skip_tx:
567         dpaa2_qp->tx_vq.tx_pkts += num_tx;
568         dpaa2_qp->tx_vq.err_pkts += vec->num;
569
570         return num_tx;
571 }
572
573 static __rte_always_inline int
574 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
575         struct rte_crypto_vec *data_vec,
576         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
577         struct rte_crypto_va_iova_ptr *iv,
578         struct rte_crypto_va_iova_ptr *digest,
579         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
580         void *user_data)
581 {
582         RTE_SET_USED(qp_data);
583         RTE_SET_USED(drv_ctx);
584         RTE_SET_USED(data_vec);
585         RTE_SET_USED(n_data_vecs);
586         RTE_SET_USED(ofs);
587         RTE_SET_USED(iv);
588         RTE_SET_USED(digest);
589         RTE_SET_USED(aad_or_auth_iv);
590         RTE_SET_USED(user_data);
591
592         return 0;
593 }
594
595 static inline void *
596 sec_fd_to_userdata(const struct qbman_fd *fd)
597 {
598         struct qbman_fle *fle;
599         void *userdata;
600         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
601
602         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
603                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
604         userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
605         /* free the fle memory */
606         rte_free((void *)(fle-1));
607
608         return userdata;
609 }
610
611 static __rte_always_inline uint32_t
612 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
613         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
614         uint32_t max_nb_to_dequeue,
615         rte_cryptodev_raw_post_dequeue_t post_dequeue,
616         void **out_user_data, uint8_t is_user_data_array,
617         uint32_t *n_success, int *dequeue_status)
618 {
619         RTE_SET_USED(drv_ctx);
620         RTE_SET_USED(get_dequeue_count);
621
622         /* Function is responsible to receive frames for a given device and VQ*/
623         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
624         struct qbman_result *dq_storage;
625         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
626         int ret, num_rx = 0;
627         uint8_t is_last = 0, status;
628         struct qbman_swp *swp;
629         const struct qbman_fd *fd;
630         struct qbman_pull_desc pulldesc;
631         void *user_data;
632         uint32_t nb_ops = max_nb_to_dequeue;
633
634         if (!DPAA2_PER_LCORE_DPIO) {
635                 ret = dpaa2_affine_qbman_swp();
636                 if (ret) {
637                         DPAA2_SEC_ERR(
638                                 "Failed to allocate IO portal, tid: %d\n",
639                                 rte_gettid());
640                         return 0;
641                 }
642         }
643         swp = DPAA2_PER_LCORE_PORTAL;
644         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
645
646         qbman_pull_desc_clear(&pulldesc);
647         qbman_pull_desc_set_numframes(&pulldesc,
648                                       (nb_ops > dpaa2_dqrr_size) ?
649                                       dpaa2_dqrr_size : nb_ops);
650         qbman_pull_desc_set_fq(&pulldesc, fqid);
651         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
652                                     (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
653                                     1);
654
655         /*Issue a volatile dequeue command. */
656         while (1) {
657                 if (qbman_swp_pull(swp, &pulldesc)) {
658                         DPAA2_SEC_WARN(
659                                 "SEC VDQ command is not issued : QBMAN busy");
660                         /* Portal was busy, try again */
661                         continue;
662                 }
663                 break;
664         };
665
666         /* Receive the packets till Last Dequeue entry is found with
667          * respect to the above issues PULL command.
668          */
669         while (!is_last) {
670                 /* Check if the previous issued command is completed.
671                  * Also seems like the SWP is shared between the Ethernet Driver
672                  * and the SEC driver.
673                  */
674                 while (!qbman_check_command_complete(dq_storage))
675                         ;
676
677                 /* Loop until the dq_storage is updated with
678                  * new token by QBMAN
679                  */
680                 while (!qbman_check_new_result(dq_storage))
681                         ;
682                 /* Check whether Last Pull command is Expired and
683                  * setting Condition for Loop termination
684                  */
685                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
686                         is_last = 1;
687                         /* Check for valid frame. */
688                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
689                         if (unlikely(
690                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
691                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
692                                 continue;
693                         }
694                 }
695
696                 fd = qbman_result_DQ_fd(dq_storage);
697                 user_data = sec_fd_to_userdata(fd);
698                 if (is_user_data_array)
699                         out_user_data[num_rx] = user_data;
700                 else
701                         out_user_data[0] = user_data;
702                 if (unlikely(fd->simple.frc)) {
703                         /* TODO Parse SEC errors */
704                         DPAA2_SEC_ERR("SEC returned Error - %x",
705                                       fd->simple.frc);
706                         status = RTE_CRYPTO_OP_STATUS_ERROR;
707                 } else {
708                         status = RTE_CRYPTO_OP_STATUS_SUCCESS;
709                 }
710                 post_dequeue(user_data, num_rx, status);
711
712                 num_rx++;
713                 dq_storage++;
714         } /* End of Packet Rx loop */
715
716         dpaa2_qp->rx_vq.rx_pkts += num_rx;
717         *dequeue_status = 1;
718         *n_success = num_rx;
719
720         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
721         /*Return the total number of packets received to DPAA2 app*/
722         return num_rx;
723 }
724
725 static __rte_always_inline void *
726 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
727                 enum rte_crypto_op_status *op_status)
728 {
729         RTE_SET_USED(qp_data);
730         RTE_SET_USED(drv_ctx);
731         RTE_SET_USED(dequeue_status);
732         RTE_SET_USED(op_status);
733
734         return NULL;
735 }
736
737 static __rte_always_inline int
738 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
739 {
740         RTE_SET_USED(qp_data);
741         RTE_SET_USED(drv_ctx);
742         RTE_SET_USED(n);
743
744         return 0;
745 }
746
747 static __rte_always_inline int
748 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
749 {
750         RTE_SET_USED(qp_data);
751         RTE_SET_USED(drv_ctx);
752         RTE_SET_USED(n);
753
754         return 0;
755 }
756
757 int
758 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
759         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
760         enum rte_crypto_op_sess_type sess_type,
761         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
762 {
763         dpaa2_sec_session *sess;
764         struct dpaa2_sec_raw_dp_ctx *dp_ctx;
765         RTE_SET_USED(qp_id);
766
767         if (!is_update) {
768                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
769                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
770         }
771
772         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
773                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
774                                 session_ctx.sec_sess);
775         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
776                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
777                         session_ctx.crypto_sess, cryptodev_driver_id);
778         else
779                 return -ENOTSUP;
780         raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
781         raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
782         raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
783         raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
784         raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
785         raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
786
787         if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
788                 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
789         else if (sess->ctxt_type == DPAA2_SEC_AEAD)
790                 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
791         else if (sess->ctxt_type == DPAA2_SEC_AUTH)
792                 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
793         else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
794                 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
795         else if (sess->ctxt_type == DPAA2_SEC_IPSEC)
796                 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
797         else if (sess->ctxt_type == DPAA2_SEC_PDCP)
798                 sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd;
799         else
800                 return -ENOTSUP;
801         dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
802         dp_ctx->session = sess;
803
804         return 0;
805 }
806
807 int
808 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
809 {
810         return sizeof(struct dpaa2_sec_raw_dp_ctx);
811 }