crypto/dpaa2_sec: support OOP with raw buffer API
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13
14 #include <desc/algo.h>
15
16 struct dpaa2_sec_raw_dp_ctx {
17         dpaa2_sec_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static int
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26                        struct rte_crypto_sgl *sgl,
27                        struct rte_crypto_sgl *dest_sgl,
28                        struct rte_crypto_va_iova_ptr *iv,
29                        struct rte_crypto_va_iova_ptr *digest,
30                        struct rte_crypto_va_iova_ptr *auth_iv,
31                        union rte_crypto_sym_ofs ofs,
32                        void *userdata,
33                        struct qbman_fd *fd)
34 {
35         RTE_SET_USED(auth_iv);
36
37         dpaa2_sec_session *sess =
38                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39         struct ctxt_priv *priv = sess->ctxt;
40         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41         struct sec_flow_context *flc;
42         int data_len = 0, auth_len = 0, cipher_len = 0;
43         unsigned int i = 0;
44         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
45                                 ofs.ofs.auth.head;
46
47         uint16_t auth_tail_len = ofs.ofs.auth.tail;
48         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
49         int icv_len = sess->digest_length;
50         uint8_t *old_icv;
51         uint8_t *iv_ptr = iv->va;
52
53         for (i = 0; i < sgl->num; i++)
54                 data_len += sgl->vec[i].len;
55
56         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
58         /* first FLE entry used to store session ctxt */
59         fle = (struct qbman_fle *)rte_malloc(NULL,
60                         FLE_SG_MEM_SIZE(2 * sgl->num),
61                         RTE_CACHE_LINE_SIZE);
62         if (unlikely(!fle)) {
63                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
64                 return -ENOMEM;
65         }
66         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
67         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
68         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
69
70         op_fle = fle + 1;
71         ip_fle = fle + 2;
72         sge = fle + 3;
73
74         /* Save the shared descriptor */
75         flc = &priv->flc_desc[0].flc;
76
77         /* Configure FD as a FRAME LIST */
78         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
79         DPAA2_SET_FD_COMPOUND_FMT(fd);
80         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
81
82         /* Configure Output FLE with Scatter/Gather Entry */
83         DPAA2_SET_FLE_SG_EXT(op_fle);
84         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
85
86         if (auth_only_len)
87                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
88
89         op_fle->length = (sess->dir == DIR_ENC) ?
90                         (cipher_len + icv_len) :
91                         cipher_len;
92
93         /* OOP */
94         if (dest_sgl) {
95                 /* Configure Output SGE for Encap/Decap */
96                 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
97                 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
98                 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
99
100                 /* o/p segs */
101                 for (i = 1; i < dest_sgl->num; i++) {
102                         sge++;
103                         DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
104                         DPAA2_SET_FLE_OFFSET(sge, 0);
105                         sge->length = dest_sgl->vec[i].len;
106                 }
107         } else {
108                 /* Configure Output SGE for Encap/Decap */
109                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
110                 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
111                 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
112
113                 /* o/p segs */
114                 for (i = 1; i < sgl->num; i++) {
115                         sge++;
116                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117                         DPAA2_SET_FLE_OFFSET(sge, 0);
118                         sge->length = sgl->vec[i].len;
119                 }
120         }
121
122         if (sess->dir == DIR_ENC) {
123                 sge++;
124                 DPAA2_SET_FLE_ADDR(sge,
125                         digest->iova);
126                 sge->length = icv_len;
127         }
128         DPAA2_SET_FLE_FIN(sge);
129
130         sge++;
131
132         /* Configure Input FLE with Scatter/Gather Entry */
133         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134         DPAA2_SET_FLE_SG_EXT(ip_fle);
135         DPAA2_SET_FLE_FIN(ip_fle);
136
137         ip_fle->length = (sess->dir == DIR_ENC) ?
138                         (auth_len + sess->iv.length) :
139                         (auth_len + sess->iv.length +
140                         icv_len);
141
142         /* Configure Input SGE for Encap/Decap */
143         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144         sge->length = sess->iv.length;
145
146         sge++;
147         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
148         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
149         sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
150
151         for (i = 1; i < sgl->num; i++) {
152                 sge++;
153                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
154                 DPAA2_SET_FLE_OFFSET(sge, 0);
155                 sge->length = sgl->vec[i].len;
156         }
157
158         if (sess->dir == DIR_DEC) {
159                 sge++;
160                 old_icv = (uint8_t *)(sge + 1);
161                 memcpy(old_icv, digest->va,
162                         icv_len);
163                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
164                 sge->length = icv_len;
165         }
166
167         DPAA2_SET_FLE_FIN(sge);
168         if (auth_only_len) {
169                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
170                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
171         }
172         DPAA2_SET_FD_LEN(fd, ip_fle->length);
173
174         return 0;
175 }
176
177 static int
178 build_raw_dp_aead_fd(uint8_t *drv_ctx,
179                        struct rte_crypto_sgl *sgl,
180                        struct rte_crypto_sgl *dest_sgl,
181                        struct rte_crypto_va_iova_ptr *iv,
182                        struct rte_crypto_va_iova_ptr *digest,
183                        struct rte_crypto_va_iova_ptr *auth_iv,
184                        union rte_crypto_sym_ofs ofs,
185                        void *userdata,
186                        struct qbman_fd *fd)
187 {
188         dpaa2_sec_session *sess =
189                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
190         struct ctxt_priv *priv = sess->ctxt;
191         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
192         struct sec_flow_context *flc;
193         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
194         int icv_len = sess->digest_length;
195         uint8_t *old_icv;
196         uint8_t *IV_ptr = iv->va;
197         unsigned int i = 0;
198         int data_len = 0, aead_len = 0;
199
200         for (i = 0; i < sgl->num; i++)
201                 data_len += sgl->vec[i].len;
202
203         aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
204
205         /* first FLE entry used to store mbuf and session ctxt */
206         fle = (struct qbman_fle *)rte_malloc(NULL,
207                         FLE_SG_MEM_SIZE(2 * sgl->num),
208                         RTE_CACHE_LINE_SIZE);
209         if (unlikely(!fle)) {
210                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
211                 return -ENOMEM;
212         }
213         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
214         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
215         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
216
217         op_fle = fle + 1;
218         ip_fle = fle + 2;
219         sge = fle + 3;
220
221         /* Save the shared descriptor */
222         flc = &priv->flc_desc[0].flc;
223
224         /* Configure FD as a FRAME LIST */
225         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
226         DPAA2_SET_FD_COMPOUND_FMT(fd);
227         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
228
229         /* Configure Output FLE with Scatter/Gather Entry */
230         DPAA2_SET_FLE_SG_EXT(op_fle);
231         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
232
233         if (auth_only_len)
234                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
235
236         op_fle->length = (sess->dir == DIR_ENC) ?
237                         (aead_len + icv_len) :
238                         aead_len;
239
240         /* OOP */
241         if (dest_sgl) {
242                 /* Configure Output SGE for Encap/Decap */
243                 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
244                 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
245                 sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
246
247                 /* o/p segs */
248                 for (i = 1; i < dest_sgl->num; i++) {
249                         sge++;
250                         DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
251                         DPAA2_SET_FLE_OFFSET(sge, 0);
252                         sge->length = dest_sgl->vec[i].len;
253                 }
254         } else {
255                 /* Configure Output SGE for Encap/Decap */
256                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
257                 DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
258                 sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
259
260                 /* o/p segs */
261                 for (i = 1; i < sgl->num; i++) {
262                         sge++;
263                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
264                         DPAA2_SET_FLE_OFFSET(sge, 0);
265                         sge->length = sgl->vec[i].len;
266                 }
267         }
268
269         if (sess->dir == DIR_ENC) {
270                 sge++;
271                 DPAA2_SET_FLE_ADDR(sge, digest->iova);
272                 sge->length = icv_len;
273         }
274         DPAA2_SET_FLE_FIN(sge);
275
276         sge++;
277
278         /* Configure Input FLE with Scatter/Gather Entry */
279         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
280         DPAA2_SET_FLE_SG_EXT(ip_fle);
281         DPAA2_SET_FLE_FIN(ip_fle);
282         ip_fle->length = (sess->dir == DIR_ENC) ?
283                 (aead_len + sess->iv.length + auth_only_len) :
284                 (aead_len + sess->iv.length + auth_only_len +
285                 icv_len);
286
287         /* Configure Input SGE for Encap/Decap */
288         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
289         sge->length = sess->iv.length;
290
291         sge++;
292         if (auth_only_len) {
293                 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
294                 sge->length = auth_only_len;
295                 sge++;
296         }
297
298         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
299         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
300         sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
301
302         /* i/p segs */
303         for (i = 1; i < sgl->num; i++) {
304                 sge++;
305                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
306                 DPAA2_SET_FLE_OFFSET(sge, 0);
307                 sge->length = sgl->vec[i].len;
308         }
309
310         if (sess->dir == DIR_DEC) {
311                 sge++;
312                 old_icv = (uint8_t *)(sge + 1);
313                 memcpy(old_icv,  digest->va, icv_len);
314                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
315                 sge->length = icv_len;
316         }
317
318         DPAA2_SET_FLE_FIN(sge);
319         if (auth_only_len) {
320                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
321                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
322         }
323         DPAA2_SET_FD_LEN(fd, ip_fle->length);
324
325         return 0;
326 }
327
328 static int
329 build_raw_dp_auth_fd(uint8_t *drv_ctx,
330                        struct rte_crypto_sgl *sgl,
331                        struct rte_crypto_sgl *dest_sgl,
332                        struct rte_crypto_va_iova_ptr *iv,
333                        struct rte_crypto_va_iova_ptr *digest,
334                        struct rte_crypto_va_iova_ptr *auth_iv,
335                        union rte_crypto_sym_ofs ofs,
336                        void *userdata,
337                        struct qbman_fd *fd)
338 {
339         RTE_SET_USED(iv);
340         RTE_SET_USED(auth_iv);
341         RTE_SET_USED(dest_sgl);
342
343         dpaa2_sec_session *sess =
344                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
345         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
346         struct sec_flow_context *flc;
347         int total_len = 0, data_len = 0, data_offset;
348         uint8_t *old_digest;
349         struct ctxt_priv *priv = sess->ctxt;
350         unsigned int i;
351
352         for (i = 0; i < sgl->num; i++)
353                 total_len += sgl->vec[i].len;
354
355         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
356         data_offset = ofs.ofs.auth.head;
357
358         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
359                 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
360                 if ((data_len & 7) || (data_offset & 7)) {
361                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
362                         return -ENOTSUP;
363                 }
364
365                 data_len = data_len >> 3;
366                 data_offset = data_offset >> 3;
367         }
368         fle = (struct qbman_fle *)rte_malloc(NULL,
369                 FLE_SG_MEM_SIZE(2 * sgl->num),
370                         RTE_CACHE_LINE_SIZE);
371         if (unlikely(!fle)) {
372                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
373                 return -ENOMEM;
374         }
375         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
376         /* first FLE entry used to store mbuf and session ctxt */
377         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
378         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
379         op_fle = fle + 1;
380         ip_fle = fle + 2;
381         sge = fle + 3;
382
383         flc = &priv->flc_desc[DESC_INITFINAL].flc;
384
385         /* sg FD */
386         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
387         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
388         DPAA2_SET_FD_COMPOUND_FMT(fd);
389
390         /* o/p fle */
391         DPAA2_SET_FLE_ADDR(op_fle,
392                         DPAA2_VADDR_TO_IOVA(digest->va));
393         op_fle->length = sess->digest_length;
394
395         /* i/p fle */
396         DPAA2_SET_FLE_SG_EXT(ip_fle);
397         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
398         ip_fle->length = data_len;
399
400         if (sess->iv.length) {
401                 uint8_t *iv_ptr;
402
403                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
404                                                 sess->iv.offset);
405
406                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
407                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
408                         sge->length = 12;
409                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
410                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
411                         sge->length = 8;
412                 } else {
413                         sge->length = sess->iv.length;
414                 }
415                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
416                 ip_fle->length += sge->length;
417                 sge++;
418         }
419         /* i/p 1st seg */
420         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
421         DPAA2_SET_FLE_OFFSET(sge, data_offset);
422
423         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
424                 sge->length = data_len;
425                 data_len = 0;
426         } else {
427                 sge->length = sgl->vec[0].len - data_offset;
428                 for (i = 1; i < sgl->num; i++) {
429                         sge++;
430                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
431                         DPAA2_SET_FLE_OFFSET(sge, 0);
432                         sge->length = sgl->vec[i].len;
433                 }
434         }
435         if (sess->dir == DIR_DEC) {
436                 /* Digest verification case */
437                 sge++;
438                 old_digest = (uint8_t *)(sge + 1);
439                 rte_memcpy(old_digest, digest->va,
440                         sess->digest_length);
441                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
442                 sge->length = sess->digest_length;
443                 ip_fle->length += sess->digest_length;
444         }
445         DPAA2_SET_FLE_FIN(sge);
446         DPAA2_SET_FLE_FIN(ip_fle);
447         DPAA2_SET_FD_LEN(fd, ip_fle->length);
448
449         return 0;
450 }
451
452 static int
453 build_raw_dp_proto_fd(uint8_t *drv_ctx,
454                        struct rte_crypto_sgl *sgl,
455                        struct rte_crypto_sgl *dest_sgl,
456                        struct rte_crypto_va_iova_ptr *iv,
457                        struct rte_crypto_va_iova_ptr *digest,
458                        struct rte_crypto_va_iova_ptr *auth_iv,
459                        union rte_crypto_sym_ofs ofs,
460                        void *userdata,
461                        struct qbman_fd *fd)
462 {
463         RTE_SET_USED(iv);
464         RTE_SET_USED(digest);
465         RTE_SET_USED(auth_iv);
466         RTE_SET_USED(ofs);
467
468         dpaa2_sec_session *sess =
469                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
470         struct ctxt_priv *priv = sess->ctxt;
471         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
472         struct sec_flow_context *flc;
473         uint32_t in_len = 0, out_len = 0, i;
474
475         /* first FLE entry used to store mbuf and session ctxt */
476         fle = (struct qbman_fle *)rte_malloc(NULL,
477                         FLE_SG_MEM_SIZE(2 * sgl->num),
478                         RTE_CACHE_LINE_SIZE);
479         if (unlikely(!fle)) {
480                 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
481                 return -ENOMEM;
482         }
483         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
484         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
485         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
486
487         /* Save the shared descriptor */
488         flc = &priv->flc_desc[0].flc;
489         op_fle = fle + 1;
490         ip_fle = fle + 2;
491         sge = fle + 3;
492
493         DPAA2_SET_FD_IVP(fd);
494         DPAA2_SET_FLE_IVP(op_fle);
495         DPAA2_SET_FLE_IVP(ip_fle);
496
497         /* Configure FD as a FRAME LIST */
498         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
499         DPAA2_SET_FD_COMPOUND_FMT(fd);
500         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
501
502         /* Configure Output FLE with Scatter/Gather Entry */
503         DPAA2_SET_FLE_SG_EXT(op_fle);
504         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
505
506         /* OOP */
507         if (dest_sgl) {
508                 /* Configure Output SGE for Encap/Decap */
509                 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
510                 DPAA2_SET_FLE_OFFSET(sge, 0);
511                 sge->length = dest_sgl->vec[0].len;
512                 out_len += sge->length;
513                 /* o/p segs */
514                 for (i = 1; i < dest_sgl->num; i++) {
515                         sge++;
516                         DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
517                         DPAA2_SET_FLE_OFFSET(sge, 0);
518                         sge->length = dest_sgl->vec[i].len;
519                         out_len += sge->length;
520                 }
521                 sge->length = dest_sgl->vec[i - 1].tot_len;
522
523         } else {
524                 /* Configure Output SGE for Encap/Decap */
525                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
526                 DPAA2_SET_FLE_OFFSET(sge, 0);
527                 sge->length = sgl->vec[0].len;
528                 out_len += sge->length;
529                 /* o/p segs */
530                 for (i = 1; i < sgl->num; i++) {
531                         sge++;
532                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
533                         DPAA2_SET_FLE_OFFSET(sge, 0);
534                         sge->length = sgl->vec[i].len;
535                         out_len += sge->length;
536                 }
537                 sge->length = sgl->vec[i - 1].tot_len;
538         }
539         out_len += sge->length;
540
541         DPAA2_SET_FLE_FIN(sge);
542         op_fle->length = out_len;
543
544         sge++;
545
546         /* Configure Input FLE with Scatter/Gather Entry */
547         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
548         DPAA2_SET_FLE_SG_EXT(ip_fle);
549         DPAA2_SET_FLE_FIN(ip_fle);
550
551         /* Configure input SGE for Encap/Decap */
552         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
553         DPAA2_SET_FLE_OFFSET(sge, 0);
554         sge->length = sgl->vec[0].len;
555         in_len += sge->length;
556         /* i/p segs */
557         for (i = 1; i < sgl->num; i++) {
558                 sge++;
559                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
560                 DPAA2_SET_FLE_OFFSET(sge, 0);
561                 sge->length = sgl->vec[i].len;
562                 in_len += sge->length;
563         }
564
565         ip_fle->length = in_len;
566         DPAA2_SET_FLE_FIN(sge);
567
568         /* In case of PDCP, per packet HFN is stored in
569          * mbuf priv after sym_op.
570          */
571         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
572                 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
573                                 sess->pdcp.hfn_ovd_offset);
574                 /* enable HFN override */
575                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
576                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
577                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
578         }
579         DPAA2_SET_FD_LEN(fd, ip_fle->length);
580
581         return 0;
582 }
583
584 static int
585 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
586                        struct rte_crypto_sgl *sgl,
587                        struct rte_crypto_sgl *dest_sgl,
588                        struct rte_crypto_va_iova_ptr *iv,
589                        struct rte_crypto_va_iova_ptr *digest,
590                        struct rte_crypto_va_iova_ptr *auth_iv,
591                        union rte_crypto_sym_ofs ofs,
592                        void *userdata,
593                        struct qbman_fd *fd)
594 {
595         RTE_SET_USED(digest);
596         RTE_SET_USED(auth_iv);
597
598         dpaa2_sec_session *sess =
599                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
600         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
601         int total_len = 0, data_len = 0, data_offset;
602         struct sec_flow_context *flc;
603         struct ctxt_priv *priv = sess->ctxt;
604         unsigned int i;
605
606         for (i = 0; i < sgl->num; i++)
607                 total_len += sgl->vec[i].len;
608
609         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
610         data_offset = ofs.ofs.cipher.head;
611
612         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
613                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
614                 if ((data_len & 7) || (data_offset & 7)) {
615                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
616                         return -ENOTSUP;
617                 }
618
619                 data_len = data_len >> 3;
620                 data_offset = data_offset >> 3;
621         }
622
623         /* first FLE entry used to store mbuf and session ctxt */
624         fle = (struct qbman_fle *)rte_malloc(NULL,
625                         FLE_SG_MEM_SIZE(2*sgl->num),
626                         RTE_CACHE_LINE_SIZE);
627         if (!fle) {
628                 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
629                 return -ENOMEM;
630         }
631         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
632         /* first FLE entry used to store userdata and session ctxt */
633         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
634         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
635
636         op_fle = fle + 1;
637         ip_fle = fle + 2;
638         sge = fle + 3;
639
640         flc = &priv->flc_desc[0].flc;
641
642         DPAA2_SEC_DP_DEBUG(
643                 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
644                 data_offset,
645                 data_len,
646                 sess->iv.length);
647
648         /* o/p fle */
649         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
650         op_fle->length = data_len;
651         DPAA2_SET_FLE_SG_EXT(op_fle);
652
653         /* OOP */
654         if (dest_sgl) {
655                 /* o/p 1st seg */
656                 DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
657                 DPAA2_SET_FLE_OFFSET(sge, data_offset);
658                 sge->length = dest_sgl->vec[0].len - data_offset;
659
660                 /* o/p segs */
661                 for (i = 1; i < dest_sgl->num; i++) {
662                         sge++;
663                         DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
664                         DPAA2_SET_FLE_OFFSET(sge, 0);
665                         sge->length = dest_sgl->vec[i].len;
666                 }
667         } else {
668                 /* o/p 1st seg */
669                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
670                 DPAA2_SET_FLE_OFFSET(sge, data_offset);
671                 sge->length = sgl->vec[0].len - data_offset;
672
673                 /* o/p segs */
674                 for (i = 1; i < sgl->num; i++) {
675                         sge++;
676                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
677                         DPAA2_SET_FLE_OFFSET(sge, 0);
678                         sge->length = sgl->vec[i].len;
679                 }
680         }
681         DPAA2_SET_FLE_FIN(sge);
682
683         DPAA2_SEC_DP_DEBUG(
684                 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
685                 flc, fle, fle->addr_hi, fle->addr_lo,
686                 fle->length);
687
688         /* i/p fle */
689         sge++;
690         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
691         ip_fle->length = sess->iv.length + data_len;
692         DPAA2_SET_FLE_SG_EXT(ip_fle);
693
694         /* i/p IV */
695         DPAA2_SET_FLE_ADDR(sge, iv->iova);
696         DPAA2_SET_FLE_OFFSET(sge, 0);
697         sge->length = sess->iv.length;
698
699         sge++;
700
701         /* i/p 1st seg */
702         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
703         DPAA2_SET_FLE_OFFSET(sge, data_offset);
704         sge->length = sgl->vec[0].len - data_offset;
705
706         /* i/p segs */
707         for (i = 1; i < sgl->num; i++) {
708                 sge++;
709                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
710                 DPAA2_SET_FLE_OFFSET(sge, 0);
711                 sge->length = sgl->vec[i].len;
712         }
713         DPAA2_SET_FLE_FIN(sge);
714         DPAA2_SET_FLE_FIN(ip_fle);
715
716         /* sg fd */
717         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
718         DPAA2_SET_FD_LEN(fd, ip_fle->length);
719         DPAA2_SET_FD_COMPOUND_FMT(fd);
720         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
721
722         DPAA2_SEC_DP_DEBUG(
723                 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
724                 DPAA2_GET_FD_ADDR(fd),
725                 DPAA2_GET_FD_OFFSET(fd),
726                 DPAA2_GET_FD_LEN(fd));
727
728         return 0;
729 }
730
731 static __rte_always_inline uint32_t
732 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
733         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
734         void *user_data[], int *status)
735 {
736         RTE_SET_USED(user_data);
737         uint32_t loop;
738         int32_t ret;
739         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
740         uint32_t frames_to_send, retry_count;
741         struct qbman_eq_desc eqdesc;
742         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
743         dpaa2_sec_session *sess =
744                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
745         struct qbman_swp *swp;
746         uint16_t num_tx = 0;
747         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
748
749         if (unlikely(vec->num == 0))
750                 return 0;
751
752         if (sess == NULL) {
753                 DPAA2_SEC_ERR("sessionless raw crypto not supported");
754                 return 0;
755         }
756         /*Prepare enqueue descriptor*/
757         qbman_eq_desc_clear(&eqdesc);
758         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
759         qbman_eq_desc_set_response(&eqdesc, 0, 0);
760         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
761
762         if (!DPAA2_PER_LCORE_DPIO) {
763                 ret = dpaa2_affine_qbman_swp();
764                 if (ret) {
765                         DPAA2_SEC_ERR(
766                                 "Failed to allocate IO portal, tid: %d\n",
767                                 rte_gettid());
768                         return 0;
769                 }
770         }
771         swp = DPAA2_PER_LCORE_PORTAL;
772
773         while (vec->num) {
774                 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
775                         dpaa2_eqcr_size : vec->num;
776
777                 for (loop = 0; loop < frames_to_send; loop++) {
778                         /*Clear the unused FD fields before sending*/
779                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
780                         ret = sess->build_raw_dp_fd(drv_ctx,
781                                                     &vec->src_sgl[loop],
782                                                     &vec->dest_sgl[loop],
783                                                     &vec->iv[loop],
784                                                     &vec->digest[loop],
785                                                     &vec->auth_iv[loop],
786                                                     ofs,
787                                                     user_data[loop],
788                                                     &fd_arr[loop]);
789                         if (ret) {
790                                 DPAA2_SEC_ERR("error: Improper packet contents"
791                                               " for crypto operation");
792                                 goto skip_tx;
793                         }
794                         status[loop] = 1;
795                 }
796
797                 loop = 0;
798                 retry_count = 0;
799                 while (loop < frames_to_send) {
800                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
801                                                          &fd_arr[loop],
802                                                          &flags[loop],
803                                                          frames_to_send - loop);
804                         if (unlikely(ret < 0)) {
805                                 retry_count++;
806                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
807                                         num_tx += loop;
808                                         vec->num -= loop;
809                                         goto skip_tx;
810                                 }
811                         } else {
812                                 loop += ret;
813                                 retry_count = 0;
814                         }
815                 }
816
817                 num_tx += loop;
818                 vec->num -= loop;
819         }
820 skip_tx:
821         dpaa2_qp->tx_vq.tx_pkts += num_tx;
822         dpaa2_qp->tx_vq.err_pkts += vec->num;
823
824         return num_tx;
825 }
826
827 static __rte_always_inline int
828 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
829         struct rte_crypto_vec *data_vec,
830         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
831         struct rte_crypto_va_iova_ptr *iv,
832         struct rte_crypto_va_iova_ptr *digest,
833         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
834         void *user_data)
835 {
836         RTE_SET_USED(qp_data);
837         RTE_SET_USED(drv_ctx);
838         RTE_SET_USED(data_vec);
839         RTE_SET_USED(n_data_vecs);
840         RTE_SET_USED(ofs);
841         RTE_SET_USED(iv);
842         RTE_SET_USED(digest);
843         RTE_SET_USED(aad_or_auth_iv);
844         RTE_SET_USED(user_data);
845
846         return 0;
847 }
848
849 static inline void *
850 sec_fd_to_userdata(const struct qbman_fd *fd)
851 {
852         struct qbman_fle *fle;
853         void *userdata;
854         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
855
856         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
857                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
858         userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
859         /* free the fle memory */
860         rte_free((void *)(fle-1));
861
862         return userdata;
863 }
864
865 static __rte_always_inline uint32_t
866 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
867         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
868         uint32_t max_nb_to_dequeue,
869         rte_cryptodev_raw_post_dequeue_t post_dequeue,
870         void **out_user_data, uint8_t is_user_data_array,
871         uint32_t *n_success, int *dequeue_status)
872 {
873         RTE_SET_USED(drv_ctx);
874         RTE_SET_USED(get_dequeue_count);
875
876         /* Function is responsible to receive frames for a given device and VQ*/
877         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
878         struct qbman_result *dq_storage;
879         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
880         int ret, num_rx = 0;
881         uint8_t is_last = 0, status;
882         struct qbman_swp *swp;
883         const struct qbman_fd *fd;
884         struct qbman_pull_desc pulldesc;
885         void *user_data;
886         uint32_t nb_ops = max_nb_to_dequeue;
887
888         if (!DPAA2_PER_LCORE_DPIO) {
889                 ret = dpaa2_affine_qbman_swp();
890                 if (ret) {
891                         DPAA2_SEC_ERR(
892                                 "Failed to allocate IO portal, tid: %d\n",
893                                 rte_gettid());
894                         return 0;
895                 }
896         }
897         swp = DPAA2_PER_LCORE_PORTAL;
898         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
899
900         qbman_pull_desc_clear(&pulldesc);
901         qbman_pull_desc_set_numframes(&pulldesc,
902                                       (nb_ops > dpaa2_dqrr_size) ?
903                                       dpaa2_dqrr_size : nb_ops);
904         qbman_pull_desc_set_fq(&pulldesc, fqid);
905         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
906                                     (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
907                                     1);
908
909         /*Issue a volatile dequeue command. */
910         while (1) {
911                 if (qbman_swp_pull(swp, &pulldesc)) {
912                         DPAA2_SEC_WARN(
913                                 "SEC VDQ command is not issued : QBMAN busy");
914                         /* Portal was busy, try again */
915                         continue;
916                 }
917                 break;
918         };
919
920         /* Receive the packets till Last Dequeue entry is found with
921          * respect to the above issues PULL command.
922          */
923         while (!is_last) {
924                 /* Check if the previous issued command is completed.
925                  * Also seems like the SWP is shared between the Ethernet Driver
926                  * and the SEC driver.
927                  */
928                 while (!qbman_check_command_complete(dq_storage))
929                         ;
930
931                 /* Loop until the dq_storage is updated with
932                  * new token by QBMAN
933                  */
934                 while (!qbman_check_new_result(dq_storage))
935                         ;
936                 /* Check whether Last Pull command is Expired and
937                  * setting Condition for Loop termination
938                  */
939                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
940                         is_last = 1;
941                         /* Check for valid frame. */
942                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
943                         if (unlikely(
944                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
945                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
946                                 continue;
947                         }
948                 }
949
950                 fd = qbman_result_DQ_fd(dq_storage);
951                 user_data = sec_fd_to_userdata(fd);
952                 if (is_user_data_array)
953                         out_user_data[num_rx] = user_data;
954                 else
955                         out_user_data[0] = user_data;
956                 if (unlikely(fd->simple.frc)) {
957                         /* TODO Parse SEC errors */
958                         DPAA2_SEC_ERR("SEC returned Error - %x",
959                                       fd->simple.frc);
960                         status = RTE_CRYPTO_OP_STATUS_ERROR;
961                 } else {
962                         status = RTE_CRYPTO_OP_STATUS_SUCCESS;
963                 }
964                 post_dequeue(user_data, num_rx, status);
965
966                 num_rx++;
967                 dq_storage++;
968         } /* End of Packet Rx loop */
969
970         dpaa2_qp->rx_vq.rx_pkts += num_rx;
971         *dequeue_status = 1;
972         *n_success = num_rx;
973
974         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
975         /*Return the total number of packets received to DPAA2 app*/
976         return num_rx;
977 }
978
979 static __rte_always_inline void *
980 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
981                 enum rte_crypto_op_status *op_status)
982 {
983         RTE_SET_USED(qp_data);
984         RTE_SET_USED(drv_ctx);
985         RTE_SET_USED(dequeue_status);
986         RTE_SET_USED(op_status);
987
988         return NULL;
989 }
990
991 static __rte_always_inline int
992 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
993 {
994         RTE_SET_USED(qp_data);
995         RTE_SET_USED(drv_ctx);
996         RTE_SET_USED(n);
997
998         return 0;
999 }
1000
1001 static __rte_always_inline int
1002 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
1003 {
1004         RTE_SET_USED(qp_data);
1005         RTE_SET_USED(drv_ctx);
1006         RTE_SET_USED(n);
1007
1008         return 0;
1009 }
1010
1011 int
1012 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
1013         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
1014         enum rte_crypto_op_sess_type sess_type,
1015         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1016 {
1017         dpaa2_sec_session *sess;
1018         struct dpaa2_sec_raw_dp_ctx *dp_ctx;
1019         RTE_SET_USED(qp_id);
1020
1021         if (!is_update) {
1022                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1023                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1024         }
1025
1026         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1027                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1028                                 session_ctx.sec_sess);
1029         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1030                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1031                         session_ctx.crypto_sess, cryptodev_driver_id);
1032         else
1033                 return -ENOTSUP;
1034         raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
1035         raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
1036         raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
1037         raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
1038         raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
1039         raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
1040
1041         if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
1042                 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
1043         else if (sess->ctxt_type == DPAA2_SEC_AEAD)
1044                 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
1045         else if (sess->ctxt_type == DPAA2_SEC_AUTH)
1046                 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
1047         else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
1048                 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1049         else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1050                 sess->ctxt_type == DPAA2_SEC_PDCP)
1051                 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
1052         else
1053                 return -ENOTSUP;
1054         dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1055         dp_ctx->session = sess;
1056
1057         return 0;
1058 }
1059
1060 int
1061 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1062 {
1063         return sizeof(struct dpaa2_sec_raw_dp_ctx);
1064 }