test/bonding: fix RSS test when disable RSS
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2022 NXP
3  */
4
5 #include <rte_byteorder.h>
6 #include <rte_common.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIB_SECURITY
11 #include <rte_security_driver.h>
12 #endif
13
14 /* RTA header files */
15 #include <desc/algo.h>
16 #include <desc/ipsec.h>
17
18 #include <rte_dpaa_bus.h>
19 #include <dpaa_sec.h>
20 #include <dpaa_sec_log.h>
21
22 struct dpaa_sec_raw_dp_ctx {
23         dpaa_sec_session *session;
24         uint32_t tail;
25         uint32_t head;
26         uint16_t cached_enqueue;
27         uint16_t cached_dequeue;
28 };
29
30 static inline int
31 is_encode(dpaa_sec_session *ses)
32 {
33         return ses->dir == DIR_ENC;
34 }
35
36 static inline int is_decode(dpaa_sec_session *ses)
37 {
38         return ses->dir == DIR_DEC;
39 }
40
41 static __rte_always_inline int
42 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
43 {
44         RTE_SET_USED(qp_data);
45         RTE_SET_USED(drv_ctx);
46         RTE_SET_USED(n);
47
48         return 0;
49 }
50
51 static __rte_always_inline int
52 dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
53 {
54         RTE_SET_USED(qp_data);
55         RTE_SET_USED(drv_ctx);
56         RTE_SET_USED(n);
57
58         return 0;
59 }
60
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
63 {
64         struct dpaa_sec_op_ctx *ctx;
65         int i, retval;
66
67         retval = rte_mempool_get(
68                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
69                         (void **)(&ctx));
70         if (!ctx || retval) {
71                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81                 dcbz_64(&ctx->job.sg[i]);
82
83         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static struct dpaa_sec_job *
90 build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
91                         struct rte_crypto_sgl *sgl,
92                         struct rte_crypto_sgl *dest_sgl,
93                         struct rte_crypto_va_iova_ptr *iv,
94                         struct rte_crypto_va_iova_ptr *digest,
95                         struct rte_crypto_va_iova_ptr *auth_iv,
96                         union rte_crypto_sym_ofs ofs,
97                         void *userdata,
98                         struct qm_fd *fd)
99 {
100         RTE_SET_USED(dest_sgl);
101         RTE_SET_USED(iv);
102         RTE_SET_USED(auth_iv);
103         RTE_SET_USED(fd);
104
105         dpaa_sec_session *ses =
106                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
107         struct dpaa_sec_job *cf;
108         struct dpaa_sec_op_ctx *ctx;
109         struct qm_sg_entry *sg, *out_sg, *in_sg;
110         phys_addr_t start_addr;
111         uint8_t *old_digest, extra_segs;
112         int data_len, data_offset, total_len = 0;
113         unsigned int i;
114
115         for (i = 0; i < sgl->num; i++)
116                 total_len += sgl->vec[i].len;
117
118         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
119         data_offset =  ofs.ofs.auth.head;
120
121         /* Support only length in bits for SNOW3G and ZUC */
122
123         if (is_decode(ses))
124                 extra_segs = 3;
125         else
126                 extra_segs = 2;
127
128         if (sgl->num > MAX_SG_ENTRIES) {
129                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
130                                 MAX_SG_ENTRIES);
131                 return NULL;
132         }
133         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
134         if (!ctx)
135                 return NULL;
136
137         cf = &ctx->job;
138         ctx->userdata = (void *)userdata;
139         old_digest = ctx->digest;
140
141         /* output */
142         out_sg = &cf->sg[0];
143         qm_sg_entry_set64(out_sg, digest->iova);
144         out_sg->length = ses->digest_length;
145         cpu_to_hw_sg(out_sg);
146
147         /* input */
148         in_sg = &cf->sg[1];
149         /* need to extend the input to a compound frame */
150         in_sg->extension = 1;
151         in_sg->final = 1;
152         in_sg->length = data_len;
153         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
154
155         /* 1st seg */
156         sg = in_sg + 1;
157
158         if (ses->iv.length) {
159                 uint8_t *iv_ptr;
160
161                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
162                                                    ses->iv.offset);
163
164                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
165                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
166                         sg->length = 12;
167                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
168                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
169                         sg->length = 8;
170                 } else {
171                         sg->length = ses->iv.length;
172                 }
173                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
174                 in_sg->length += sg->length;
175                 cpu_to_hw_sg(sg);
176                 sg++;
177         }
178
179         qm_sg_entry_set64(sg, sgl->vec[0].iova);
180         sg->offset = data_offset;
181
182         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
183                 sg->length = data_len;
184         } else {
185                 sg->length = sgl->vec[0].len - data_offset;
186
187                 /* remaining i/p segs */
188                 for (i = 1; i < sgl->num; i++) {
189                         cpu_to_hw_sg(sg);
190                         sg++;
191                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
192                         if (data_len > (int)sgl->vec[i].len)
193                                 sg->length = sgl->vec[0].len;
194                         else
195                                 sg->length = data_len;
196
197                         data_len = data_len - sg->length;
198                         if (data_len < 1)
199                                 break;
200                 }
201         }
202
203         if (is_decode(ses)) {
204                 /* Digest verification case */
205                 cpu_to_hw_sg(sg);
206                 sg++;
207                 rte_memcpy(old_digest, digest->va,
208                                 ses->digest_length);
209                 start_addr = rte_dpaa_mem_vtop(old_digest);
210                 qm_sg_entry_set64(sg, start_addr);
211                 sg->length = ses->digest_length;
212                 in_sg->length += ses->digest_length;
213         }
214         sg->final = 1;
215         cpu_to_hw_sg(sg);
216         cpu_to_hw_sg(in_sg);
217
218         return cf;
219 }
220
221 static inline struct dpaa_sec_job *
222 build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
223                         struct rte_crypto_sgl *sgl,
224                         struct rte_crypto_sgl *dest_sgl,
225                         struct rte_crypto_va_iova_ptr *iv,
226                         struct rte_crypto_va_iova_ptr *digest,
227                         struct rte_crypto_va_iova_ptr *auth_iv,
228                         union rte_crypto_sym_ofs ofs,
229                         void *userdata,
230                         struct qm_fd *fd)
231 {
232         dpaa_sec_session *ses =
233                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
234         struct dpaa_sec_job *cf;
235         struct dpaa_sec_op_ctx *ctx;
236         struct qm_sg_entry *sg, *out_sg, *in_sg;
237         uint8_t extra_req_segs;
238         uint8_t *IV_ptr = iv->va;
239         int data_len = 0, aead_len = 0;
240         unsigned int i;
241
242         for (i = 0; i < sgl->num; i++)
243                 data_len += sgl->vec[i].len;
244
245         extra_req_segs = 4;
246         aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
247
248         if (ses->auth_only_len)
249                 extra_req_segs++;
250
251         if (sgl->num > MAX_SG_ENTRIES) {
252                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
253                                 MAX_SG_ENTRIES);
254                 return NULL;
255         }
256
257         ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
258         if (!ctx)
259                 return NULL;
260
261         cf = &ctx->job;
262         ctx->userdata = (void *)userdata;
263
264         rte_prefetch0(cf->sg);
265
266         /* output */
267         out_sg = &cf->sg[0];
268         out_sg->extension = 1;
269         if (is_encode(ses))
270                 out_sg->length = aead_len + ses->digest_length;
271         else
272                 out_sg->length = aead_len;
273
274         /* output sg entries */
275         sg = &cf->sg[2];
276         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
277         cpu_to_hw_sg(out_sg);
278
279         if (dest_sgl) {
280                 /* 1st seg */
281                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
282                 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
283                 sg->offset = ofs.ofs.cipher.head;
284
285                 /* Successive segs */
286                 for (i = 1; i < dest_sgl->num; i++) {
287                         cpu_to_hw_sg(sg);
288                         sg++;
289                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
290                         sg->length = dest_sgl->vec[i].len;
291                 }
292         } else {
293                 /* 1st seg */
294                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
295                 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
296                 sg->offset = ofs.ofs.cipher.head;
297
298                 /* Successive segs */
299                 for (i = 1; i < sgl->num; i++) {
300                         cpu_to_hw_sg(sg);
301                         sg++;
302                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
303                         sg->length = sgl->vec[i].len;
304                 }
305
306         }
307
308         if (is_encode(ses)) {
309                 cpu_to_hw_sg(sg);
310                 /* set auth output */
311                 sg++;
312                 qm_sg_entry_set64(sg, digest->iova);
313                 sg->length = ses->digest_length;
314         }
315         sg->final = 1;
316         cpu_to_hw_sg(sg);
317
318         /* input */
319         in_sg = &cf->sg[1];
320         in_sg->extension = 1;
321         in_sg->final = 1;
322         if (is_encode(ses))
323                 in_sg->length = ses->iv.length + aead_len
324                                                 + ses->auth_only_len;
325         else
326                 in_sg->length = ses->iv.length + aead_len
327                                 + ses->auth_only_len + ses->digest_length;
328
329         /* input sg entries */
330         sg++;
331         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
332         cpu_to_hw_sg(in_sg);
333
334         /* 1st seg IV */
335         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
336         sg->length = ses->iv.length;
337         cpu_to_hw_sg(sg);
338
339         /* 2 seg auth only */
340         if (ses->auth_only_len) {
341                 sg++;
342                 qm_sg_entry_set64(sg, auth_iv->iova);
343                 sg->length = ses->auth_only_len;
344                 cpu_to_hw_sg(sg);
345         }
346
347         /* 3rd seg */
348         sg++;
349         qm_sg_entry_set64(sg, sgl->vec[0].iova);
350         sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
351         sg->offset = ofs.ofs.cipher.head;
352
353         /* Successive segs */
354         for (i = 1; i < sgl->num; i++) {
355                 cpu_to_hw_sg(sg);
356                 sg++;
357                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
358                 sg->length =  sgl->vec[i].len;
359         }
360
361         if (is_decode(ses)) {
362                 cpu_to_hw_sg(sg);
363                 sg++;
364                 memcpy(ctx->digest, digest->va,
365                         ses->digest_length);
366                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
367                 sg->length = ses->digest_length;
368         }
369         sg->final = 1;
370         cpu_to_hw_sg(sg);
371
372         if (ses->auth_only_len)
373                 fd->cmd = 0x80000000 | ses->auth_only_len;
374
375         return cf;
376 }
377
378 static inline struct dpaa_sec_job *
379 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
380                         struct rte_crypto_sgl *sgl,
381                         struct rte_crypto_sgl *dest_sgl,
382                         struct rte_crypto_va_iova_ptr *iv,
383                         struct rte_crypto_va_iova_ptr *digest,
384                         struct rte_crypto_va_iova_ptr *auth_iv,
385                         union rte_crypto_sym_ofs ofs,
386                         void *userdata,
387                         struct qm_fd *fd)
388 {
389         RTE_SET_USED(auth_iv);
390
391         dpaa_sec_session *ses =
392                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
393         struct dpaa_sec_job *cf;
394         struct dpaa_sec_op_ctx *ctx;
395         struct qm_sg_entry *sg, *out_sg, *in_sg;
396         uint8_t *IV_ptr = iv->va;
397         unsigned int i;
398         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
399                                 ofs.ofs.auth.head;
400         uint16_t auth_tail_len;
401         uint32_t auth_only_len;
402         int data_len = 0, auth_len = 0, cipher_len = 0;
403
404         for (i = 0; i < sgl->num; i++)
405                 data_len += sgl->vec[i].len;
406
407         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
408         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
409         auth_tail_len = auth_len - cipher_len - auth_hdr_len;
410         auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
411
412         if (sgl->num > MAX_SG_ENTRIES) {
413                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
414                                 MAX_SG_ENTRIES);
415                 return NULL;
416         }
417
418         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
419         if (!ctx)
420                 return NULL;
421
422         cf = &ctx->job;
423         ctx->userdata = (void *)userdata;
424
425         rte_prefetch0(cf->sg);
426
427         /* output */
428         out_sg = &cf->sg[0];
429         out_sg->extension = 1;
430         if (is_encode(ses))
431                 out_sg->length = cipher_len + ses->digest_length;
432         else
433                 out_sg->length = cipher_len;
434
435         /* output sg entries */
436         sg = &cf->sg[2];
437         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
438         cpu_to_hw_sg(out_sg);
439
440         /* 1st seg */
441         if (dest_sgl) {
442                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
443                 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
444                 sg->offset = ofs.ofs.cipher.head;
445
446                 /* Successive segs */
447                 for (i = 1; i < dest_sgl->num; i++) {
448                         cpu_to_hw_sg(sg);
449                         sg++;
450                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
451                         sg->length = dest_sgl->vec[i].len;
452                 }
453                 sg->length -= ofs.ofs.cipher.tail;
454         } else {
455                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
456                 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
457                 sg->offset = ofs.ofs.cipher.head;
458
459                 /* Successive segs */
460                 for (i = 1; i < sgl->num; i++) {
461                         cpu_to_hw_sg(sg);
462                         sg++;
463                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
464                         sg->length = sgl->vec[i].len;
465                 }
466                 sg->length -= ofs.ofs.cipher.tail;
467         }
468
469         if (is_encode(ses)) {
470                 cpu_to_hw_sg(sg);
471                 /* set auth output */
472                 sg++;
473                 qm_sg_entry_set64(sg, digest->iova);
474                 sg->length = ses->digest_length;
475         }
476         sg->final = 1;
477         cpu_to_hw_sg(sg);
478
479         /* input */
480         in_sg = &cf->sg[1];
481         in_sg->extension = 1;
482         in_sg->final = 1;
483         if (is_encode(ses))
484                 in_sg->length = ses->iv.length + auth_len;
485         else
486                 in_sg->length = ses->iv.length + auth_len
487                                                 + ses->digest_length;
488
489         /* input sg entries */
490         sg++;
491         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
492         cpu_to_hw_sg(in_sg);
493
494         /* 1st seg IV */
495         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
496         sg->length = ses->iv.length;
497         cpu_to_hw_sg(sg);
498
499         /* 2 seg */
500         sg++;
501         qm_sg_entry_set64(sg, sgl->vec[0].iova);
502         sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
503         sg->offset = ofs.ofs.auth.head;
504
505         /* Successive segs */
506         for (i = 1; i < sgl->num; i++) {
507                 cpu_to_hw_sg(sg);
508                 sg++;
509                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
510                 sg->length = sgl->vec[i].len;
511         }
512
513         if (is_decode(ses)) {
514                 cpu_to_hw_sg(sg);
515                 sg++;
516                 memcpy(ctx->digest, digest->va,
517                         ses->digest_length);
518                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
519                 sg->length = ses->digest_length;
520         }
521         sg->final = 1;
522         cpu_to_hw_sg(sg);
523
524         if (auth_only_len)
525                 fd->cmd = 0x80000000 | auth_only_len;
526
527         return cf;
528 }
529
530 static struct dpaa_sec_job *
531 build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
532                         struct rte_crypto_sgl *sgl,
533                         struct rte_crypto_sgl *dest_sgl,
534                         struct rte_crypto_va_iova_ptr *iv,
535                         struct rte_crypto_va_iova_ptr *digest,
536                         struct rte_crypto_va_iova_ptr *auth_iv,
537                         union rte_crypto_sym_ofs ofs,
538                         void *userdata,
539                         struct qm_fd *fd)
540 {
541         RTE_SET_USED(digest);
542         RTE_SET_USED(auth_iv);
543         RTE_SET_USED(fd);
544
545         dpaa_sec_session *ses =
546                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
547         struct dpaa_sec_job *cf;
548         struct dpaa_sec_op_ctx *ctx;
549         struct qm_sg_entry *sg, *out_sg, *in_sg;
550         unsigned int i;
551         uint8_t *IV_ptr = iv->va;
552         int data_len, total_len = 0, data_offset;
553
554         for (i = 0; i < sgl->num; i++)
555                 total_len += sgl->vec[i].len;
556
557         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
558         data_offset = ofs.ofs.cipher.head;
559
560         /* Support lengths in bits only for SNOW3G and ZUC */
561         if (sgl->num > MAX_SG_ENTRIES) {
562                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
563                                 MAX_SG_ENTRIES);
564                 return NULL;
565         }
566
567         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
568         if (!ctx)
569                 return NULL;
570
571         cf = &ctx->job;
572         ctx->userdata = (void *)userdata;
573
574         /* output */
575         out_sg = &cf->sg[0];
576         out_sg->extension = 1;
577         out_sg->length = data_len;
578         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
579         cpu_to_hw_sg(out_sg);
580
581         if (dest_sgl) {
582                 /* 1st seg */
583                 sg = &cf->sg[2];
584                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
585                 sg->length = dest_sgl->vec[0].len - data_offset;
586                 sg->offset = data_offset;
587
588                 /* Successive segs */
589                 for (i = 1; i < dest_sgl->num; i++) {
590                         cpu_to_hw_sg(sg);
591                         sg++;
592                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
593                         sg->length = dest_sgl->vec[i].len;
594                 }
595         } else {
596                 /* 1st seg */
597                 sg = &cf->sg[2];
598                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
599                 sg->length = sgl->vec[0].len - data_offset;
600                 sg->offset = data_offset;
601
602                 /* Successive segs */
603                 for (i = 1; i < sgl->num; i++) {
604                         cpu_to_hw_sg(sg);
605                         sg++;
606                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
607                         sg->length = sgl->vec[i].len;
608                 }
609
610         }
611         sg->final = 1;
612         cpu_to_hw_sg(sg);
613
614         /* input */
615         in_sg = &cf->sg[1];
616         in_sg->extension = 1;
617         in_sg->final = 1;
618         in_sg->length = data_len + ses->iv.length;
619
620         sg++;
621         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
622         cpu_to_hw_sg(in_sg);
623
624         /* IV */
625         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
626         sg->length = ses->iv.length;
627         cpu_to_hw_sg(sg);
628
629         /* 1st seg */
630         sg++;
631         qm_sg_entry_set64(sg, sgl->vec[0].iova);
632         sg->length = sgl->vec[0].len - data_offset;
633         sg->offset = data_offset;
634
635         /* Successive segs */
636         for (i = 1; i < sgl->num; i++) {
637                 cpu_to_hw_sg(sg);
638                 sg++;
639                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
640                 sg->length = sgl->vec[i].len;
641         }
642         sg->final = 1;
643         cpu_to_hw_sg(sg);
644
645         return cf;
646 }
647
648 #ifdef RTE_LIB_SECURITY
649 static inline struct dpaa_sec_job *
650 build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
651                         struct rte_crypto_sgl *sgl,
652                         struct rte_crypto_sgl *dest_sgl,
653                         struct rte_crypto_va_iova_ptr *iv,
654                         struct rte_crypto_va_iova_ptr *digest,
655                         struct rte_crypto_va_iova_ptr *auth_iv,
656                         union rte_crypto_sym_ofs ofs,
657                         void *userdata,
658                         struct qm_fd *fd)
659 {
660         RTE_SET_USED(iv);
661         RTE_SET_USED(digest);
662         RTE_SET_USED(auth_iv);
663         RTE_SET_USED(ofs);
664
665         dpaa_sec_session *ses =
666                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
667         struct dpaa_sec_job *cf;
668         struct dpaa_sec_op_ctx *ctx;
669         struct qm_sg_entry *sg, *out_sg, *in_sg;
670         uint32_t in_len = 0, out_len = 0;
671         unsigned int i;
672
673         if (sgl->num > MAX_SG_ENTRIES) {
674                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
675                                 MAX_SG_ENTRIES);
676                 return NULL;
677         }
678
679         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
680         if (!ctx)
681                 return NULL;
682         cf = &ctx->job;
683         ctx->userdata = (void *)userdata;
684         /* output */
685         out_sg = &cf->sg[0];
686         out_sg->extension = 1;
687         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
688
689         if (dest_sgl) {
690                 /* 1st seg */
691                 sg = &cf->sg[2];
692                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
693                 sg->offset = 0;
694                 sg->length = dest_sgl->vec[0].len;
695                 out_len += sg->length;
696
697                 for (i = 1; i < dest_sgl->num; i++) {
698                 /* Successive segs */
699                         cpu_to_hw_sg(sg);
700                         sg++;
701                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
702                         sg->offset = 0;
703                         sg->length = dest_sgl->vec[i].len;
704                         out_len += sg->length;
705                 }
706                 sg->length = dest_sgl->vec[i - 1].tot_len;
707         } else {
708                 /* 1st seg */
709                 sg = &cf->sg[2];
710                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
711                 sg->offset = 0;
712                 sg->length = sgl->vec[0].len;
713                 out_len += sg->length;
714
715                 for (i = 1; i < sgl->num; i++) {
716                 /* Successive segs */
717                         cpu_to_hw_sg(sg);
718                         sg++;
719                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
720                         sg->offset = 0;
721                         sg->length = sgl->vec[i].len;
722                         out_len += sg->length;
723                 }
724                 sg->length = sgl->vec[i - 1].tot_len;
725
726         }
727         out_len += sg->length;
728         sg->final = 1;
729         cpu_to_hw_sg(sg);
730
731         out_sg->length = out_len;
732         cpu_to_hw_sg(out_sg);
733
734         /* input */
735         in_sg = &cf->sg[1];
736         in_sg->extension = 1;
737         in_sg->final = 1;
738         in_len = sgl->vec[0].len;
739
740         sg++;
741         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
742
743         /* 1st seg */
744         qm_sg_entry_set64(sg, sgl->vec[0].iova);
745         sg->length = sgl->vec[0].len;
746         sg->offset = 0;
747
748         /* Successive segs */
749         for (i = 1; i < sgl->num; i++) {
750                 cpu_to_hw_sg(sg);
751                 sg++;
752                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
753                 sg->length = sgl->vec[i].len;
754                 sg->offset = 0;
755                 in_len += sg->length;
756         }
757         sg->final = 1;
758         cpu_to_hw_sg(sg);
759
760         in_sg->length = in_len;
761         cpu_to_hw_sg(in_sg);
762
763         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
764                 fd->cmd = 0x80000000 |
765                         *((uint32_t *)((uint8_t *)userdata +
766                         ses->pdcp.hfn_ovd_offset));
767                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
768                         *((uint32_t *)((uint8_t *)userdata +
769                         ses->pdcp.hfn_ovd_offset)),
770                         ses->pdcp.hfn_ovd);
771         }
772
773         return cf;
774 }
775 #endif
776
777 static uint32_t
778 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
779         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
780         void *user_data[], int *status)
781 {
782         /* Function to transmit the frames to given device and queuepair */
783         uint32_t loop;
784         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
785         uint16_t num_tx = 0;
786         struct qm_fd fds[DPAA_SEC_BURST], *fd;
787         uint32_t frames_to_send;
788         struct dpaa_sec_job *cf;
789         dpaa_sec_session *ses =
790                         ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
791         uint32_t flags[DPAA_SEC_BURST] = {0};
792         struct qman_fq *inq[DPAA_SEC_BURST];
793
794         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
795                 if (rte_dpaa_portal_init((void *)0)) {
796                         DPAA_SEC_ERR("Failure in affining portal");
797                         return 0;
798                 }
799         }
800
801         while (vec->num) {
802                 frames_to_send = (vec->num > DPAA_SEC_BURST) ?
803                                 DPAA_SEC_BURST : vec->num;
804                 for (loop = 0; loop < frames_to_send; loop++) {
805                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
806                                 if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
807                                         frames_to_send = loop;
808                                         goto send_pkts;
809                                 }
810                         } else if (unlikely(ses->qp[rte_lcore_id() %
811                                                 MAX_DPAA_CORES] != dpaa_qp)) {
812                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
813                                         " New qp = %p\n",
814                                         ses->qp[rte_lcore_id() %
815                                         MAX_DPAA_CORES], dpaa_qp);
816                                 frames_to_send = loop;
817                                 goto send_pkts;
818                         }
819
820                         /*Clear the unused FD fields before sending*/
821                         fd = &fds[loop];
822                         memset(fd, 0, sizeof(struct qm_fd));
823                         cf = ses->build_raw_dp_fd(drv_ctx,
824                                                 &vec->src_sgl[loop],
825                                                 &vec->dest_sgl[loop],
826                                                 &vec->iv[loop],
827                                                 &vec->digest[loop],
828                                                 &vec->auth_iv[loop],
829                                                 ofs,
830                                                 user_data[loop],
831                                                 fd);
832                         if (!cf) {
833                                 DPAA_SEC_ERR("error: Improper packet contents"
834                                         " for crypto operation");
835                                 goto skip_tx;
836                         }
837                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
838                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
839                         fd->_format1 = qm_fd_compound;
840                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
841
842                         status[loop] = 1;
843                 }
844 send_pkts:
845                 loop = 0;
846                 while (loop < frames_to_send) {
847                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
848                                         &flags[loop], frames_to_send - loop);
849                 }
850                 vec->num -= frames_to_send;
851                 num_tx += frames_to_send;
852         }
853
854 skip_tx:
855         dpaa_qp->tx_pkts += num_tx;
856         dpaa_qp->tx_errs += vec->num - num_tx;
857
858         return num_tx;
859 }
860
861 static int
862 dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
863                 uint8_t is_user_data_array,
864                 rte_cryptodev_raw_post_dequeue_t post_dequeue,
865                 int nb_ops)
866 {
867         struct qman_fq *fq;
868         unsigned int pkts = 0;
869         int num_rx_bufs, ret;
870         struct qm_dqrr_entry *dq;
871         uint32_t vdqcr_flags = 0;
872         uint8_t is_success = 0;
873
874         fq = &qp->outq;
875         /*
876          * Until request for four buffers, we provide exact number of buffers.
877          * Otherwise we do not set the QM_VDQCR_EXACT flag.
878          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
879          * requested, so we request two less in this case.
880          */
881         if (nb_ops < 4) {
882                 vdqcr_flags = QM_VDQCR_EXACT;
883                 num_rx_bufs = nb_ops;
884         } else {
885                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
886                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
887         }
888         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
889         if (ret)
890                 return 0;
891
892         do {
893                 const struct qm_fd *fd;
894                 struct dpaa_sec_job *job;
895                 struct dpaa_sec_op_ctx *ctx;
896
897                 dq = qman_dequeue(fq);
898                 if (!dq)
899                         continue;
900
901                 fd = &dq->fd;
902                 /* sg is embedded in an op ctx,
903                  * sg[0] is for output
904                  * sg[1] for input
905                  */
906                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
907
908                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
909                 ctx->fd_status = fd->status;
910                 if (is_user_data_array)
911                         out_user_data[pkts] = ctx->userdata;
912                 else
913                         out_user_data[0] = ctx->userdata;
914
915                 if (!ctx->fd_status) {
916                         is_success = true;
917                 } else {
918                         is_success = false;
919                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
920                 }
921                 post_dequeue(ctx->op, pkts, is_success);
922                 pkts++;
923
924                 /* report op status to sym->op and then free the ctx memory */
925                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
926
927                 qman_dqrr_consume(fq, dq);
928         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
929
930         return pkts;
931 }
932
933
934 static __rte_always_inline uint32_t
935 dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
936         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
937         uint32_t max_nb_to_dequeue,
938         rte_cryptodev_raw_post_dequeue_t post_dequeue,
939         void **out_user_data, uint8_t is_user_data_array,
940         uint32_t *n_success, int *dequeue_status)
941 {
942         RTE_SET_USED(drv_ctx);
943         RTE_SET_USED(get_dequeue_count);
944         uint16_t num_rx;
945         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
946         uint32_t nb_ops = max_nb_to_dequeue;
947
948         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
949                 if (rte_dpaa_portal_init((void *)0)) {
950                         DPAA_SEC_ERR("Failure in affining portal");
951                         return 0;
952                 }
953         }
954
955         num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
956                         is_user_data_array, post_dequeue, nb_ops);
957
958         dpaa_qp->rx_pkts += num_rx;
959         *dequeue_status = 1;
960         *n_success = num_rx;
961
962         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
963
964         return num_rx;
965 }
966
967 static __rte_always_inline int
968 dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
969         struct rte_crypto_vec *data_vec,
970         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
971         struct rte_crypto_va_iova_ptr *iv,
972         struct rte_crypto_va_iova_ptr *digest,
973         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
974         void *user_data)
975 {
976         RTE_SET_USED(qp_data);
977         RTE_SET_USED(drv_ctx);
978         RTE_SET_USED(data_vec);
979         RTE_SET_USED(n_data_vecs);
980         RTE_SET_USED(ofs);
981         RTE_SET_USED(iv);
982         RTE_SET_USED(digest);
983         RTE_SET_USED(aad_or_auth_iv);
984         RTE_SET_USED(user_data);
985
986         return 0;
987 }
988
989 static __rte_always_inline void *
990 dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
991         enum rte_crypto_op_status *op_status)
992 {
993         RTE_SET_USED(qp_data);
994         RTE_SET_USED(drv_ctx);
995         RTE_SET_USED(dequeue_status);
996         RTE_SET_USED(op_status);
997
998         return NULL;
999 }
1000
1001 int
1002 dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
1003         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
1004         enum rte_crypto_op_sess_type sess_type,
1005         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1006 {
1007         dpaa_sec_session *sess;
1008         struct dpaa_sec_raw_dp_ctx *dp_ctx;
1009         RTE_SET_USED(qp_id);
1010
1011         if (!is_update) {
1012                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1013                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1014         }
1015
1016         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1017                 sess = (dpaa_sec_session *)get_sec_session_private_data(
1018                                 session_ctx.sec_sess);
1019         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1020                 sess = (dpaa_sec_session *)get_sym_session_private_data(
1021                         session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
1022         else
1023                 return -ENOTSUP;
1024         raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
1025         raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
1026         raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
1027         raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
1028         raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
1029         raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
1030
1031         if (sess->ctxt == DPAA_SEC_CIPHER)
1032                 sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
1033         else if (sess->ctxt == DPAA_SEC_AUTH)
1034                 sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
1035         else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
1036                 sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
1037         else if (sess->ctxt == DPAA_SEC_AEAD)
1038                 sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
1039 #ifdef RTE_LIB_SECURITY
1040         else if (sess->ctxt == DPAA_SEC_IPSEC ||
1041                         sess->ctxt == DPAA_SEC_PDCP)
1042                 sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
1043 #endif
1044         else
1045                 return -ENOTSUP;
1046         dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1047         dp_ctx->session = sess;
1048
1049         return 0;
1050 }
1051
1052 int
1053 dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1054 {
1055         return sizeof(struct dpaa_sec_raw_dp_ctx);
1056 }