crypto/dpaa2_sec: support AEAD with raw buffer API
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <cryptodev_pmd.h>
6 #include <rte_fslmc.h>
7 #include <fslmc_vfio.h>
8 #include <dpaa2_hw_pvt.h>
9 #include <dpaa2_hw_dpio.h>
10
11 #include "dpaa2_sec_priv.h"
12 #include "dpaa2_sec_logs.h"
13
14 #include <desc/algo.h>
15
16 struct dpaa2_sec_raw_dp_ctx {
17         dpaa2_sec_session *session;
18         uint32_t tail;
19         uint32_t head;
20         uint16_t cached_enqueue;
21         uint16_t cached_dequeue;
22 };
23
24 static int
25 build_raw_dp_chain_fd(uint8_t *drv_ctx,
26                        struct rte_crypto_sgl *sgl,
27                        struct rte_crypto_va_iova_ptr *iv,
28                        struct rte_crypto_va_iova_ptr *digest,
29                        struct rte_crypto_va_iova_ptr *auth_iv,
30                        union rte_crypto_sym_ofs ofs,
31                        void *userdata,
32                        struct qbman_fd *fd)
33 {
34         RTE_SET_USED(auth_iv);
35
36         dpaa2_sec_session *sess =
37                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
38         struct ctxt_priv *priv = sess->ctxt;
39         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
40         struct sec_flow_context *flc;
41         int data_len = 0, auth_len = 0, cipher_len = 0;
42         unsigned int i = 0;
43         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
44                                 ofs.ofs.auth.head;
45
46         uint16_t auth_tail_len = ofs.ofs.auth.tail;
47         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
48         int icv_len = sess->digest_length;
49         uint8_t *old_icv;
50         uint8_t *iv_ptr = iv->va;
51
52         for (i = 0; i < sgl->num; i++)
53                 data_len += sgl->vec[i].len;
54
55         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
56         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
57         /* first FLE entry used to store session ctxt */
58         fle = (struct qbman_fle *)rte_malloc(NULL,
59                         FLE_SG_MEM_SIZE(2 * sgl->num),
60                         RTE_CACHE_LINE_SIZE);
61         if (unlikely(!fle)) {
62                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
63                 return -ENOMEM;
64         }
65         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
66         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
67         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
68
69         op_fle = fle + 1;
70         ip_fle = fle + 2;
71         sge = fle + 3;
72
73         /* Save the shared descriptor */
74         flc = &priv->flc_desc[0].flc;
75
76         /* Configure FD as a FRAME LIST */
77         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
78         DPAA2_SET_FD_COMPOUND_FMT(fd);
79         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
80
81         /* Configure Output FLE with Scatter/Gather Entry */
82         DPAA2_SET_FLE_SG_EXT(op_fle);
83         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
84
85         if (auth_only_len)
86                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
87
88         op_fle->length = (sess->dir == DIR_ENC) ?
89                         (cipher_len + icv_len) :
90                         cipher_len;
91
92         /* Configure Output SGE for Encap/Decap */
93         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
94         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
95         sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
96
97         /* o/p segs */
98         for (i = 1; i < sgl->num; i++) {
99                 sge++;
100                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
101                 DPAA2_SET_FLE_OFFSET(sge, 0);
102                 sge->length = sgl->vec[i].len;
103         }
104
105         if (sess->dir == DIR_ENC) {
106                 sge++;
107                 DPAA2_SET_FLE_ADDR(sge,
108                         digest->iova);
109                 sge->length = icv_len;
110         }
111         DPAA2_SET_FLE_FIN(sge);
112
113         sge++;
114
115         /* Configure Input FLE with Scatter/Gather Entry */
116         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
117         DPAA2_SET_FLE_SG_EXT(ip_fle);
118         DPAA2_SET_FLE_FIN(ip_fle);
119
120         ip_fle->length = (sess->dir == DIR_ENC) ?
121                         (auth_len + sess->iv.length) :
122                         (auth_len + sess->iv.length +
123                         icv_len);
124
125         /* Configure Input SGE for Encap/Decap */
126         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
127         sge->length = sess->iv.length;
128
129         sge++;
130         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
131         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head);
132         sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
133
134         for (i = 1; i < sgl->num; i++) {
135                 sge++;
136                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
137                 DPAA2_SET_FLE_OFFSET(sge, 0);
138                 sge->length = sgl->vec[i].len;
139         }
140
141         if (sess->dir == DIR_DEC) {
142                 sge++;
143                 old_icv = (uint8_t *)(sge + 1);
144                 memcpy(old_icv, digest->va,
145                         icv_len);
146                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
147                 sge->length = icv_len;
148         }
149
150         DPAA2_SET_FLE_FIN(sge);
151         if (auth_only_len) {
152                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
153                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
154         }
155         DPAA2_SET_FD_LEN(fd, ip_fle->length);
156
157         return 0;
158 }
159
160 static int
161 build_raw_dp_aead_fd(uint8_t *drv_ctx,
162                        struct rte_crypto_sgl *sgl,
163                        struct rte_crypto_va_iova_ptr *iv,
164                        struct rte_crypto_va_iova_ptr *digest,
165                        struct rte_crypto_va_iova_ptr *auth_iv,
166                        union rte_crypto_sym_ofs ofs,
167                        void *userdata,
168                        struct qbman_fd *fd)
169 {
170         dpaa2_sec_session *sess =
171                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
172         struct ctxt_priv *priv = sess->ctxt;
173         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
174         struct sec_flow_context *flc;
175         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
176         int icv_len = sess->digest_length;
177         uint8_t *old_icv;
178         uint8_t *IV_ptr = iv->va;
179         unsigned int i = 0;
180         int data_len = 0, aead_len = 0;
181
182         for (i = 0; i < sgl->num; i++)
183                 data_len += sgl->vec[i].len;
184
185         aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
186
187         /* first FLE entry used to store mbuf and session ctxt */
188         fle = (struct qbman_fle *)rte_malloc(NULL,
189                         FLE_SG_MEM_SIZE(2 * sgl->num),
190                         RTE_CACHE_LINE_SIZE);
191         if (unlikely(!fle)) {
192                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
193                 return -ENOMEM;
194         }
195         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
196         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
197         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
198
199         op_fle = fle + 1;
200         ip_fle = fle + 2;
201         sge = fle + 3;
202
203         /* Save the shared descriptor */
204         flc = &priv->flc_desc[0].flc;
205
206         /* Configure FD as a FRAME LIST */
207         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
208         DPAA2_SET_FD_COMPOUND_FMT(fd);
209         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
210
211         /* Configure Output FLE with Scatter/Gather Entry */
212         DPAA2_SET_FLE_SG_EXT(op_fle);
213         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
214
215         if (auth_only_len)
216                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
217
218         op_fle->length = (sess->dir == DIR_ENC) ?
219                         (aead_len + icv_len) :
220                         aead_len;
221
222         /* Configure Output SGE for Encap/Decap */
223         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
224         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
225         sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
226
227         /* o/p segs */
228         for (i = 1; i < sgl->num; i++) {
229                 sge++;
230                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
231                 DPAA2_SET_FLE_OFFSET(sge, 0);
232                 sge->length = sgl->vec[i].len;
233         }
234
235         if (sess->dir == DIR_ENC) {
236                 sge++;
237                 DPAA2_SET_FLE_ADDR(sge, digest->iova);
238                 sge->length = icv_len;
239         }
240         DPAA2_SET_FLE_FIN(sge);
241
242         sge++;
243
244         /* Configure Input FLE with Scatter/Gather Entry */
245         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
246         DPAA2_SET_FLE_SG_EXT(ip_fle);
247         DPAA2_SET_FLE_FIN(ip_fle);
248         ip_fle->length = (sess->dir == DIR_ENC) ?
249                 (aead_len + sess->iv.length + auth_only_len) :
250                 (aead_len + sess->iv.length + auth_only_len +
251                 icv_len);
252
253         /* Configure Input SGE for Encap/Decap */
254         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
255         sge->length = sess->iv.length;
256
257         sge++;
258         if (auth_only_len) {
259                 DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
260                 sge->length = auth_only_len;
261                 sge++;
262         }
263
264         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
265         DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head);
266         sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
267
268         /* i/p segs */
269         for (i = 1; i < sgl->num; i++) {
270                 sge++;
271                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
272                 DPAA2_SET_FLE_OFFSET(sge, 0);
273                 sge->length = sgl->vec[i].len;
274         }
275
276         if (sess->dir == DIR_DEC) {
277                 sge++;
278                 old_icv = (uint8_t *)(sge + 1);
279                 memcpy(old_icv,  digest->va, icv_len);
280                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
281                 sge->length = icv_len;
282         }
283
284         DPAA2_SET_FLE_FIN(sge);
285         if (auth_only_len) {
286                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
287                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
288         }
289         DPAA2_SET_FD_LEN(fd, ip_fle->length);
290
291         return 0;
292 }
293
294 static int
295 build_raw_dp_auth_fd(uint8_t *drv_ctx,
296                        struct rte_crypto_sgl *sgl,
297                        struct rte_crypto_va_iova_ptr *iv,
298                        struct rte_crypto_va_iova_ptr *digest,
299                        struct rte_crypto_va_iova_ptr *auth_iv,
300                        union rte_crypto_sym_ofs ofs,
301                        void *userdata,
302                        struct qbman_fd *fd)
303 {
304         RTE_SET_USED(iv);
305         RTE_SET_USED(auth_iv);
306
307         dpaa2_sec_session *sess =
308                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
309         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
310         struct sec_flow_context *flc;
311         int total_len = 0, data_len = 0, data_offset;
312         uint8_t *old_digest;
313         struct ctxt_priv *priv = sess->ctxt;
314         unsigned int i;
315
316         for (i = 0; i < sgl->num; i++)
317                 total_len += sgl->vec[i].len;
318
319         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
320         data_offset = ofs.ofs.auth.head;
321
322         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
323                 sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
324                 if ((data_len & 7) || (data_offset & 7)) {
325                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
326                         return -ENOTSUP;
327                 }
328
329                 data_len = data_len >> 3;
330                 data_offset = data_offset >> 3;
331         }
332         fle = (struct qbman_fle *)rte_malloc(NULL,
333                 FLE_SG_MEM_SIZE(2 * sgl->num),
334                         RTE_CACHE_LINE_SIZE);
335         if (unlikely(!fle)) {
336                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
337                 return -ENOMEM;
338         }
339         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
340         /* first FLE entry used to store mbuf and session ctxt */
341         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
342         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
343         op_fle = fle + 1;
344         ip_fle = fle + 2;
345         sge = fle + 3;
346
347         flc = &priv->flc_desc[DESC_INITFINAL].flc;
348
349         /* sg FD */
350         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
351         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
352         DPAA2_SET_FD_COMPOUND_FMT(fd);
353
354         /* o/p fle */
355         DPAA2_SET_FLE_ADDR(op_fle,
356                         DPAA2_VADDR_TO_IOVA(digest->va));
357         op_fle->length = sess->digest_length;
358
359         /* i/p fle */
360         DPAA2_SET_FLE_SG_EXT(ip_fle);
361         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
362         ip_fle->length = data_len;
363
364         if (sess->iv.length) {
365                 uint8_t *iv_ptr;
366
367                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
368                                                 sess->iv.offset);
369
370                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
371                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
372                         sge->length = 12;
373                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
374                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
375                         sge->length = 8;
376                 } else {
377                         sge->length = sess->iv.length;
378                 }
379                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
380                 ip_fle->length += sge->length;
381                 sge++;
382         }
383         /* i/p 1st seg */
384         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
385         DPAA2_SET_FLE_OFFSET(sge, data_offset);
386
387         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
388                 sge->length = data_len;
389                 data_len = 0;
390         } else {
391                 sge->length = sgl->vec[0].len - data_offset;
392                 for (i = 1; i < sgl->num; i++) {
393                         sge++;
394                         DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
395                         DPAA2_SET_FLE_OFFSET(sge, 0);
396                         sge->length = sgl->vec[i].len;
397                 }
398         }
399         if (sess->dir == DIR_DEC) {
400                 /* Digest verification case */
401                 sge++;
402                 old_digest = (uint8_t *)(sge + 1);
403                 rte_memcpy(old_digest, digest->va,
404                         sess->digest_length);
405                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
406                 sge->length = sess->digest_length;
407                 ip_fle->length += sess->digest_length;
408         }
409         DPAA2_SET_FLE_FIN(sge);
410         DPAA2_SET_FLE_FIN(ip_fle);
411         DPAA2_SET_FD_LEN(fd, ip_fle->length);
412
413         return 0;
414 }
415
416 static int
417 build_raw_dp_proto_fd(uint8_t *drv_ctx,
418                        struct rte_crypto_sgl *sgl,
419                        struct rte_crypto_va_iova_ptr *iv,
420                        struct rte_crypto_va_iova_ptr *digest,
421                        struct rte_crypto_va_iova_ptr *auth_iv,
422                        union rte_crypto_sym_ofs ofs,
423                        void *userdata,
424                        struct qbman_fd *fd)
425 {
426         RTE_SET_USED(iv);
427         RTE_SET_USED(digest);
428         RTE_SET_USED(auth_iv);
429         RTE_SET_USED(ofs);
430
431         dpaa2_sec_session *sess =
432                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
433         struct ctxt_priv *priv = sess->ctxt;
434         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
435         struct sec_flow_context *flc;
436         uint32_t in_len = 0, out_len = 0, i;
437
438         /* first FLE entry used to store mbuf and session ctxt */
439         fle = (struct qbman_fle *)rte_malloc(NULL,
440                         FLE_SG_MEM_SIZE(2 * sgl->num),
441                         RTE_CACHE_LINE_SIZE);
442         if (unlikely(!fle)) {
443                 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
444                 return -ENOMEM;
445         }
446         memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
447         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
448         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
449
450         /* Save the shared descriptor */
451         flc = &priv->flc_desc[0].flc;
452         op_fle = fle + 1;
453         ip_fle = fle + 2;
454         sge = fle + 3;
455
456         DPAA2_SET_FD_IVP(fd);
457         DPAA2_SET_FLE_IVP(op_fle);
458         DPAA2_SET_FLE_IVP(ip_fle);
459
460         /* Configure FD as a FRAME LIST */
461         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
462         DPAA2_SET_FD_COMPOUND_FMT(fd);
463         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
464
465         /* Configure Output FLE with Scatter/Gather Entry */
466         DPAA2_SET_FLE_SG_EXT(op_fle);
467         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
468
469         /* Configure Output SGE for Encap/Decap */
470         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
471         DPAA2_SET_FLE_OFFSET(sge, 0);
472         sge->length = sgl->vec[0].len;
473         out_len += sge->length;
474         /* o/p segs */
475         for (i = 1; i < sgl->num; i++) {
476                 sge++;
477                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
478                 DPAA2_SET_FLE_OFFSET(sge, 0);
479                 sge->length = sgl->vec[i].len;
480                 out_len += sge->length;
481         }
482         sge->length = sgl->vec[i - 1].tot_len;
483         out_len += sge->length;
484
485         DPAA2_SET_FLE_FIN(sge);
486         op_fle->length = out_len;
487
488         sge++;
489
490         /* Configure Input FLE with Scatter/Gather Entry */
491         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
492         DPAA2_SET_FLE_SG_EXT(ip_fle);
493         DPAA2_SET_FLE_FIN(ip_fle);
494
495         /* Configure input SGE for Encap/Decap */
496         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
497         DPAA2_SET_FLE_OFFSET(sge, 0);
498         sge->length = sgl->vec[0].len;
499         in_len += sge->length;
500         /* i/p segs */
501         for (i = 1; i < sgl->num; i++) {
502                 sge++;
503                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
504                 DPAA2_SET_FLE_OFFSET(sge, 0);
505                 sge->length = sgl->vec[i].len;
506                 in_len += sge->length;
507         }
508
509         ip_fle->length = in_len;
510         DPAA2_SET_FLE_FIN(sge);
511
512         /* In case of PDCP, per packet HFN is stored in
513          * mbuf priv after sym_op.
514          */
515         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
516                 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
517                                 sess->pdcp.hfn_ovd_offset);
518                 /* enable HFN override */
519                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
520                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
521                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
522         }
523         DPAA2_SET_FD_LEN(fd, ip_fle->length);
524
525         return 0;
526 }
527
528 static int
529 build_raw_dp_cipher_fd(uint8_t *drv_ctx,
530                        struct rte_crypto_sgl *sgl,
531                        struct rte_crypto_va_iova_ptr *iv,
532                        struct rte_crypto_va_iova_ptr *digest,
533                        struct rte_crypto_va_iova_ptr *auth_iv,
534                        union rte_crypto_sym_ofs ofs,
535                        void *userdata,
536                        struct qbman_fd *fd)
537 {
538         RTE_SET_USED(digest);
539         RTE_SET_USED(auth_iv);
540
541         dpaa2_sec_session *sess =
542                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
543         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
544         int total_len = 0, data_len = 0, data_offset;
545         struct sec_flow_context *flc;
546         struct ctxt_priv *priv = sess->ctxt;
547         unsigned int i;
548
549         for (i = 0; i < sgl->num; i++)
550                 total_len += sgl->vec[i].len;
551
552         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
553         data_offset = ofs.ofs.cipher.head;
554
555         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
556                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
557                 if ((data_len & 7) || (data_offset & 7)) {
558                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
559                         return -ENOTSUP;
560                 }
561
562                 data_len = data_len >> 3;
563                 data_offset = data_offset >> 3;
564         }
565
566         /* first FLE entry used to store mbuf and session ctxt */
567         fle = (struct qbman_fle *)rte_malloc(NULL,
568                         FLE_SG_MEM_SIZE(2*sgl->num),
569                         RTE_CACHE_LINE_SIZE);
570         if (!fle) {
571                 DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
572                 return -ENOMEM;
573         }
574         memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
575         /* first FLE entry used to store userdata and session ctxt */
576         DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
577         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
578
579         op_fle = fle + 1;
580         ip_fle = fle + 2;
581         sge = fle + 3;
582
583         flc = &priv->flc_desc[0].flc;
584
585         DPAA2_SEC_DP_DEBUG(
586                 "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n",
587                 data_offset,
588                 data_len,
589                 sess->iv.length);
590
591         /* o/p fle */
592         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
593         op_fle->length = data_len;
594         DPAA2_SET_FLE_SG_EXT(op_fle);
595
596         /* o/p 1st seg */
597         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
598         DPAA2_SET_FLE_OFFSET(sge, data_offset);
599         sge->length = sgl->vec[0].len - data_offset;
600
601         /* o/p segs */
602         for (i = 1; i < sgl->num; i++) {
603                 sge++;
604                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
605                 DPAA2_SET_FLE_OFFSET(sge, 0);
606                 sge->length = sgl->vec[i].len;
607         }
608         DPAA2_SET_FLE_FIN(sge);
609
610         DPAA2_SEC_DP_DEBUG(
611                 "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
612                 flc, fle, fle->addr_hi, fle->addr_lo,
613                 fle->length);
614
615         /* i/p fle */
616         sge++;
617         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
618         ip_fle->length = sess->iv.length + data_len;
619         DPAA2_SET_FLE_SG_EXT(ip_fle);
620
621         /* i/p IV */
622         DPAA2_SET_FLE_ADDR(sge, iv->iova);
623         DPAA2_SET_FLE_OFFSET(sge, 0);
624         sge->length = sess->iv.length;
625
626         sge++;
627
628         /* i/p 1st seg */
629         DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
630         DPAA2_SET_FLE_OFFSET(sge, data_offset);
631         sge->length = sgl->vec[0].len - data_offset;
632
633         /* i/p segs */
634         for (i = 1; i < sgl->num; i++) {
635                 sge++;
636                 DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
637                 DPAA2_SET_FLE_OFFSET(sge, 0);
638                 sge->length = sgl->vec[i].len;
639         }
640         DPAA2_SET_FLE_FIN(sge);
641         DPAA2_SET_FLE_FIN(ip_fle);
642
643         /* sg fd */
644         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
645         DPAA2_SET_FD_LEN(fd, ip_fle->length);
646         DPAA2_SET_FD_COMPOUND_FMT(fd);
647         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
648
649         DPAA2_SEC_DP_DEBUG(
650                 "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n",
651                 DPAA2_GET_FD_ADDR(fd),
652                 DPAA2_GET_FD_OFFSET(fd),
653                 DPAA2_GET_FD_LEN(fd));
654
655         return 0;
656 }
657
658 static __rte_always_inline uint32_t
659 dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
660         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
661         void *user_data[], int *status)
662 {
663         RTE_SET_USED(user_data);
664         uint32_t loop;
665         int32_t ret;
666         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
667         uint32_t frames_to_send, retry_count;
668         struct qbman_eq_desc eqdesc;
669         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
670         dpaa2_sec_session *sess =
671                 ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
672         struct qbman_swp *swp;
673         uint16_t num_tx = 0;
674         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
675
676         if (unlikely(vec->num == 0))
677                 return 0;
678
679         if (sess == NULL) {
680                 DPAA2_SEC_ERR("sessionless raw crypto not supported");
681                 return 0;
682         }
683         /*Prepare enqueue descriptor*/
684         qbman_eq_desc_clear(&eqdesc);
685         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
686         qbman_eq_desc_set_response(&eqdesc, 0, 0);
687         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
688
689         if (!DPAA2_PER_LCORE_DPIO) {
690                 ret = dpaa2_affine_qbman_swp();
691                 if (ret) {
692                         DPAA2_SEC_ERR(
693                                 "Failed to allocate IO portal, tid: %d\n",
694                                 rte_gettid());
695                         return 0;
696                 }
697         }
698         swp = DPAA2_PER_LCORE_PORTAL;
699
700         while (vec->num) {
701                 frames_to_send = (vec->num > dpaa2_eqcr_size) ?
702                         dpaa2_eqcr_size : vec->num;
703
704                 for (loop = 0; loop < frames_to_send; loop++) {
705                         /*Clear the unused FD fields before sending*/
706                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
707                         ret = sess->build_raw_dp_fd(drv_ctx,
708                                                     &vec->src_sgl[loop],
709                                                     &vec->iv[loop],
710                                                     &vec->digest[loop],
711                                                     &vec->auth_iv[loop],
712                                                     ofs,
713                                                     user_data[loop],
714                                                     &fd_arr[loop]);
715                         if (ret) {
716                                 DPAA2_SEC_ERR("error: Improper packet contents"
717                                               " for crypto operation");
718                                 goto skip_tx;
719                         }
720                         status[loop] = 1;
721                 }
722
723                 loop = 0;
724                 retry_count = 0;
725                 while (loop < frames_to_send) {
726                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
727                                                          &fd_arr[loop],
728                                                          &flags[loop],
729                                                          frames_to_send - loop);
730                         if (unlikely(ret < 0)) {
731                                 retry_count++;
732                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
733                                         num_tx += loop;
734                                         vec->num -= loop;
735                                         goto skip_tx;
736                                 }
737                         } else {
738                                 loop += ret;
739                                 retry_count = 0;
740                         }
741                 }
742
743                 num_tx += loop;
744                 vec->num -= loop;
745         }
746 skip_tx:
747         dpaa2_qp->tx_vq.tx_pkts += num_tx;
748         dpaa2_qp->tx_vq.err_pkts += vec->num;
749
750         return num_tx;
751 }
752
753 static __rte_always_inline int
754 dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
755         struct rte_crypto_vec *data_vec,
756         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
757         struct rte_crypto_va_iova_ptr *iv,
758         struct rte_crypto_va_iova_ptr *digest,
759         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
760         void *user_data)
761 {
762         RTE_SET_USED(qp_data);
763         RTE_SET_USED(drv_ctx);
764         RTE_SET_USED(data_vec);
765         RTE_SET_USED(n_data_vecs);
766         RTE_SET_USED(ofs);
767         RTE_SET_USED(iv);
768         RTE_SET_USED(digest);
769         RTE_SET_USED(aad_or_auth_iv);
770         RTE_SET_USED(user_data);
771
772         return 0;
773 }
774
775 static inline void *
776 sec_fd_to_userdata(const struct qbman_fd *fd)
777 {
778         struct qbman_fle *fle;
779         void *userdata;
780         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
781
782         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
783                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
784         userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
785         /* free the fle memory */
786         rte_free((void *)(fle-1));
787
788         return userdata;
789 }
790
791 static __rte_always_inline uint32_t
792 dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
793         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
794         uint32_t max_nb_to_dequeue,
795         rte_cryptodev_raw_post_dequeue_t post_dequeue,
796         void **out_user_data, uint8_t is_user_data_array,
797         uint32_t *n_success, int *dequeue_status)
798 {
799         RTE_SET_USED(drv_ctx);
800         RTE_SET_USED(get_dequeue_count);
801
802         /* Function is responsible to receive frames for a given device and VQ*/
803         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
804         struct qbman_result *dq_storage;
805         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
806         int ret, num_rx = 0;
807         uint8_t is_last = 0, status;
808         struct qbman_swp *swp;
809         const struct qbman_fd *fd;
810         struct qbman_pull_desc pulldesc;
811         void *user_data;
812         uint32_t nb_ops = max_nb_to_dequeue;
813
814         if (!DPAA2_PER_LCORE_DPIO) {
815                 ret = dpaa2_affine_qbman_swp();
816                 if (ret) {
817                         DPAA2_SEC_ERR(
818                                 "Failed to allocate IO portal, tid: %d\n",
819                                 rte_gettid());
820                         return 0;
821                 }
822         }
823         swp = DPAA2_PER_LCORE_PORTAL;
824         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
825
826         qbman_pull_desc_clear(&pulldesc);
827         qbman_pull_desc_set_numframes(&pulldesc,
828                                       (nb_ops > dpaa2_dqrr_size) ?
829                                       dpaa2_dqrr_size : nb_ops);
830         qbman_pull_desc_set_fq(&pulldesc, fqid);
831         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
832                                     (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
833                                     1);
834
835         /*Issue a volatile dequeue command. */
836         while (1) {
837                 if (qbman_swp_pull(swp, &pulldesc)) {
838                         DPAA2_SEC_WARN(
839                                 "SEC VDQ command is not issued : QBMAN busy");
840                         /* Portal was busy, try again */
841                         continue;
842                 }
843                 break;
844         };
845
846         /* Receive the packets till Last Dequeue entry is found with
847          * respect to the above issues PULL command.
848          */
849         while (!is_last) {
850                 /* Check if the previous issued command is completed.
851                  * Also seems like the SWP is shared between the Ethernet Driver
852                  * and the SEC driver.
853                  */
854                 while (!qbman_check_command_complete(dq_storage))
855                         ;
856
857                 /* Loop until the dq_storage is updated with
858                  * new token by QBMAN
859                  */
860                 while (!qbman_check_new_result(dq_storage))
861                         ;
862                 /* Check whether Last Pull command is Expired and
863                  * setting Condition for Loop termination
864                  */
865                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
866                         is_last = 1;
867                         /* Check for valid frame. */
868                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
869                         if (unlikely(
870                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
871                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
872                                 continue;
873                         }
874                 }
875
876                 fd = qbman_result_DQ_fd(dq_storage);
877                 user_data = sec_fd_to_userdata(fd);
878                 if (is_user_data_array)
879                         out_user_data[num_rx] = user_data;
880                 else
881                         out_user_data[0] = user_data;
882                 if (unlikely(fd->simple.frc)) {
883                         /* TODO Parse SEC errors */
884                         DPAA2_SEC_ERR("SEC returned Error - %x",
885                                       fd->simple.frc);
886                         status = RTE_CRYPTO_OP_STATUS_ERROR;
887                 } else {
888                         status = RTE_CRYPTO_OP_STATUS_SUCCESS;
889                 }
890                 post_dequeue(user_data, num_rx, status);
891
892                 num_rx++;
893                 dq_storage++;
894         } /* End of Packet Rx loop */
895
896         dpaa2_qp->rx_vq.rx_pkts += num_rx;
897         *dequeue_status = 1;
898         *n_success = num_rx;
899
900         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
901         /*Return the total number of packets received to DPAA2 app*/
902         return num_rx;
903 }
904
905 static __rte_always_inline void *
906 dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
907                 enum rte_crypto_op_status *op_status)
908 {
909         RTE_SET_USED(qp_data);
910         RTE_SET_USED(drv_ctx);
911         RTE_SET_USED(dequeue_status);
912         RTE_SET_USED(op_status);
913
914         return NULL;
915 }
916
917 static __rte_always_inline int
918 dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
919 {
920         RTE_SET_USED(qp_data);
921         RTE_SET_USED(drv_ctx);
922         RTE_SET_USED(n);
923
924         return 0;
925 }
926
927 static __rte_always_inline int
928 dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
929 {
930         RTE_SET_USED(qp_data);
931         RTE_SET_USED(drv_ctx);
932         RTE_SET_USED(n);
933
934         return 0;
935 }
936
937 int
938 dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
939         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
940         enum rte_crypto_op_sess_type sess_type,
941         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
942 {
943         dpaa2_sec_session *sess;
944         struct dpaa2_sec_raw_dp_ctx *dp_ctx;
945         RTE_SET_USED(qp_id);
946
947         if (!is_update) {
948                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
949                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
950         }
951
952         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
953                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
954                                 session_ctx.sec_sess);
955         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
956                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
957                         session_ctx.crypto_sess, cryptodev_driver_id);
958         else
959                 return -ENOTSUP;
960         raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
961         raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
962         raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
963         raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
964         raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
965         raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
966
967         if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
968                 sess->build_raw_dp_fd = build_raw_dp_chain_fd;
969         else if (sess->ctxt_type == DPAA2_SEC_AEAD)
970                 sess->build_raw_dp_fd = build_raw_dp_aead_fd;
971         else if (sess->ctxt_type == DPAA2_SEC_AUTH)
972                 sess->build_raw_dp_fd = build_raw_dp_auth_fd;
973         else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
974                 sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
975         else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
976                 sess->ctxt_type == DPAA2_SEC_PDCP)
977                 sess->build_raw_dp_fd = build_raw_dp_proto_fd;
978         else
979                 return -ENOTSUP;
980         dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
981         dp_ctx->session = sess;
982
983         return 0;
984 }
985
986 int
987 dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
988 {
989         return sizeof(struct dpaa2_sec_raw_dp_ctx);
990 }