net/txgbe: fix queue statistics mapping
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec_raw_dp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 NXP
3  */
4
5 #include <rte_byteorder.h>
6 #include <rte_common.h>
7 #include <cryptodev_pmd.h>
8 #include <rte_crypto.h>
9 #include <rte_cryptodev.h>
10 #ifdef RTE_LIB_SECURITY
11 #include <rte_security_driver.h>
12 #endif
13
14 /* RTA header files */
15 #include <desc/algo.h>
16 #include <desc/ipsec.h>
17
18 #include <rte_dpaa_bus.h>
19 #include <dpaa_sec.h>
20 #include <dpaa_sec_log.h>
21
22 struct dpaa_sec_raw_dp_ctx {
23         dpaa_sec_session *session;
24         uint32_t tail;
25         uint32_t head;
26         uint16_t cached_enqueue;
27         uint16_t cached_dequeue;
28 };
29
30 static inline int
31 is_encode(dpaa_sec_session *ses)
32 {
33         return ses->dir == DIR_ENC;
34 }
35
36 static inline int is_decode(dpaa_sec_session *ses)
37 {
38         return ses->dir == DIR_DEC;
39 }
40
41 static __rte_always_inline int
42 dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
43 {
44         RTE_SET_USED(qp_data);
45         RTE_SET_USED(drv_ctx);
46         RTE_SET_USED(n);
47
48         return 0;
49 }
50
51 static __rte_always_inline int
52 dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
53 {
54         RTE_SET_USED(qp_data);
55         RTE_SET_USED(drv_ctx);
56         RTE_SET_USED(n);
57
58         return 0;
59 }
60
61 static inline struct dpaa_sec_op_ctx *
62 dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
63 {
64         struct dpaa_sec_op_ctx *ctx;
65         int i, retval;
66
67         retval = rte_mempool_get(
68                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
69                         (void **)(&ctx));
70         if (!ctx || retval) {
71                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
81                 dcbz_64(&ctx->job.sg[i]);
82
83         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
84         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static struct dpaa_sec_job *
90 build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
91                         struct rte_crypto_sgl *sgl,
92                         struct rte_crypto_sgl *dest_sgl,
93                         struct rte_crypto_va_iova_ptr *iv,
94                         struct rte_crypto_va_iova_ptr *digest,
95                         struct rte_crypto_va_iova_ptr *auth_iv,
96                         union rte_crypto_sym_ofs ofs,
97                         void *userdata,
98                         struct qm_fd *fd)
99 {
100         RTE_SET_USED(dest_sgl);
101         RTE_SET_USED(iv);
102         RTE_SET_USED(auth_iv);
103         RTE_SET_USED(fd);
104
105         dpaa_sec_session *ses =
106                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
107         struct dpaa_sec_job *cf;
108         struct dpaa_sec_op_ctx *ctx;
109         struct qm_sg_entry *sg, *out_sg, *in_sg;
110         phys_addr_t start_addr;
111         uint8_t *old_digest, extra_segs;
112         int data_len, data_offset, total_len = 0;
113         unsigned int i;
114
115         for (i = 0; i < sgl->num; i++)
116                 total_len += sgl->vec[i].len;
117
118         data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
119         data_offset =  ofs.ofs.auth.head;
120
121         /* Support only length in bits for SNOW3G and ZUC */
122
123         if (is_decode(ses))
124                 extra_segs = 3;
125         else
126                 extra_segs = 2;
127
128         if (sgl->num > MAX_SG_ENTRIES) {
129                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
130                                 MAX_SG_ENTRIES);
131                 return NULL;
132         }
133         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
134         if (!ctx)
135                 return NULL;
136
137         cf = &ctx->job;
138         ctx->userdata = (void *)userdata;
139         old_digest = ctx->digest;
140
141         /* output */
142         out_sg = &cf->sg[0];
143         qm_sg_entry_set64(out_sg, digest->iova);
144         out_sg->length = ses->digest_length;
145         cpu_to_hw_sg(out_sg);
146
147         /* input */
148         in_sg = &cf->sg[1];
149         /* need to extend the input to a compound frame */
150         in_sg->extension = 1;
151         in_sg->final = 1;
152         in_sg->length = data_len;
153         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
154
155         /* 1st seg */
156         sg = in_sg + 1;
157
158         if (ses->iv.length) {
159                 uint8_t *iv_ptr;
160
161                 iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
162                                                    ses->iv.offset);
163
164                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
165                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
166                         sg->length = 12;
167                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
168                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
169                         sg->length = 8;
170                 } else {
171                         sg->length = ses->iv.length;
172                 }
173                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
174                 in_sg->length += sg->length;
175                 cpu_to_hw_sg(sg);
176                 sg++;
177         }
178
179         qm_sg_entry_set64(sg, sgl->vec[0].iova);
180         sg->offset = data_offset;
181
182         if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
183                 sg->length = data_len;
184         } else {
185                 sg->length = sgl->vec[0].len - data_offset;
186
187                 /* remaining i/p segs */
188                 for (i = 1; i < sgl->num; i++) {
189                         cpu_to_hw_sg(sg);
190                         sg++;
191                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
192                         if (data_len > (int)sgl->vec[i].len)
193                                 sg->length = sgl->vec[0].len;
194                         else
195                                 sg->length = data_len;
196
197                         data_len = data_len - sg->length;
198                         if (data_len < 1)
199                                 break;
200                 }
201         }
202
203         if (is_decode(ses)) {
204                 /* Digest verification case */
205                 cpu_to_hw_sg(sg);
206                 sg++;
207                 rte_memcpy(old_digest, digest->va,
208                                 ses->digest_length);
209                 start_addr = rte_dpaa_mem_vtop(old_digest);
210                 qm_sg_entry_set64(sg, start_addr);
211                 sg->length = ses->digest_length;
212                 in_sg->length += ses->digest_length;
213         }
214         sg->final = 1;
215         cpu_to_hw_sg(sg);
216         cpu_to_hw_sg(in_sg);
217
218         return cf;
219 }
220
221 static inline struct dpaa_sec_job *
222 build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
223                         struct rte_crypto_sgl *sgl,
224                         struct rte_crypto_sgl *dest_sgl,
225                         struct rte_crypto_va_iova_ptr *iv,
226                         struct rte_crypto_va_iova_ptr *digest,
227                         struct rte_crypto_va_iova_ptr *auth_iv,
228                         union rte_crypto_sym_ofs ofs,
229                         void *userdata,
230                         struct qm_fd *fd)
231 {
232         dpaa_sec_session *ses =
233                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
234         struct dpaa_sec_job *cf;
235         struct dpaa_sec_op_ctx *ctx;
236         struct qm_sg_entry *sg, *out_sg, *in_sg;
237         uint8_t extra_req_segs;
238         uint8_t *IV_ptr = iv->va;
239         int data_len = 0, aead_len = 0;
240         unsigned int i;
241
242         for (i = 0; i < sgl->num; i++)
243                 data_len += sgl->vec[i].len;
244
245         extra_req_segs = 4;
246         aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
247
248         if (ses->auth_only_len)
249                 extra_req_segs++;
250
251         if (sgl->num > MAX_SG_ENTRIES) {
252                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
253                                 MAX_SG_ENTRIES);
254                 return NULL;
255         }
256
257         ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
258         if (!ctx)
259                 return NULL;
260
261         cf = &ctx->job;
262         ctx->userdata = (void *)userdata;
263
264         rte_prefetch0(cf->sg);
265
266         /* output */
267         out_sg = &cf->sg[0];
268         out_sg->extension = 1;
269         if (is_encode(ses))
270                 out_sg->length = aead_len + ses->digest_length;
271         else
272                 out_sg->length = aead_len;
273
274         /* output sg entries */
275         sg = &cf->sg[2];
276         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
277         cpu_to_hw_sg(out_sg);
278
279         if (dest_sgl) {
280                 /* 1st seg */
281                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
282                 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
283                 sg->offset = ofs.ofs.cipher.head;
284
285                 /* Successive segs */
286                 for (i = 1; i < dest_sgl->num; i++) {
287                         cpu_to_hw_sg(sg);
288                         sg++;
289                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
290                         sg->length = dest_sgl->vec[i].len;
291                 }
292         } else {
293                 /* 1st seg */
294                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
295                 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
296                 sg->offset = ofs.ofs.cipher.head;
297
298                 /* Successive segs */
299                 for (i = 1; i < sgl->num; i++) {
300                         cpu_to_hw_sg(sg);
301                         sg++;
302                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
303                         sg->length = sgl->vec[i].len;
304                 }
305
306         }
307
308         if (is_encode(ses)) {
309                 cpu_to_hw_sg(sg);
310                 /* set auth output */
311                 sg++;
312                 qm_sg_entry_set64(sg, digest->iova);
313                 sg->length = ses->digest_length;
314         }
315         sg->final = 1;
316         cpu_to_hw_sg(sg);
317
318         /* input */
319         in_sg = &cf->sg[1];
320         in_sg->extension = 1;
321         in_sg->final = 1;
322         if (is_encode(ses))
323                 in_sg->length = ses->iv.length + aead_len
324                                                 + ses->auth_only_len;
325         else
326                 in_sg->length = ses->iv.length + aead_len
327                                 + ses->auth_only_len + ses->digest_length;
328
329         /* input sg entries */
330         sg++;
331         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
332         cpu_to_hw_sg(in_sg);
333
334         /* 1st seg IV */
335         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
336         sg->length = ses->iv.length;
337         cpu_to_hw_sg(sg);
338
339         /* 2 seg auth only */
340         if (ses->auth_only_len) {
341                 sg++;
342                 qm_sg_entry_set64(sg, auth_iv->iova);
343                 sg->length = ses->auth_only_len;
344                 cpu_to_hw_sg(sg);
345         }
346
347         /* 3rd seg */
348         sg++;
349         qm_sg_entry_set64(sg, sgl->vec[0].iova);
350         sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
351         sg->offset = ofs.ofs.cipher.head;
352
353         /* Successive segs */
354         for (i = 1; i < sgl->num; i++) {
355                 cpu_to_hw_sg(sg);
356                 sg++;
357                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
358                 sg->length =  sgl->vec[i].len;
359         }
360
361         if (is_decode(ses)) {
362                 cpu_to_hw_sg(sg);
363                 sg++;
364                 memcpy(ctx->digest, digest->va,
365                         ses->digest_length);
366                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
367                 sg->length = ses->digest_length;
368         }
369         sg->final = 1;
370         cpu_to_hw_sg(sg);
371
372         if (ses->auth_only_len)
373                 fd->cmd = 0x80000000 | ses->auth_only_len;
374
375         return cf;
376 }
377
378 static inline struct dpaa_sec_job *
379 build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
380                         struct rte_crypto_sgl *sgl,
381                         struct rte_crypto_sgl *dest_sgl,
382                         struct rte_crypto_va_iova_ptr *iv,
383                         struct rte_crypto_va_iova_ptr *digest,
384                         struct rte_crypto_va_iova_ptr *auth_iv,
385                         union rte_crypto_sym_ofs ofs,
386                         void *userdata,
387                         struct qm_fd *fd)
388 {
389         RTE_SET_USED(auth_iv);
390
391         dpaa_sec_session *ses =
392                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
393         struct dpaa_sec_job *cf;
394         struct dpaa_sec_op_ctx *ctx;
395         struct qm_sg_entry *sg, *out_sg, *in_sg;
396         uint8_t *IV_ptr = iv->va;
397         unsigned int i;
398         uint16_t auth_hdr_len = ofs.ofs.cipher.head -
399                                 ofs.ofs.auth.head;
400         uint16_t auth_tail_len = ofs.ofs.auth.tail;
401         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
402         int data_len = 0, auth_len = 0, cipher_len = 0;
403
404         for (i = 0; i < sgl->num; i++)
405                 data_len += sgl->vec[i].len;
406
407         cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
408         auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
409
410         if (sgl->num > MAX_SG_ENTRIES) {
411                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
412                                 MAX_SG_ENTRIES);
413                 return NULL;
414         }
415
416         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
417         if (!ctx)
418                 return NULL;
419
420         cf = &ctx->job;
421         ctx->userdata = (void *)userdata;
422
423         rte_prefetch0(cf->sg);
424
425         /* output */
426         out_sg = &cf->sg[0];
427         out_sg->extension = 1;
428         if (is_encode(ses))
429                 out_sg->length = cipher_len + ses->digest_length;
430         else
431                 out_sg->length = cipher_len;
432
433         /* output sg entries */
434         sg = &cf->sg[2];
435         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
436         cpu_to_hw_sg(out_sg);
437
438         /* 1st seg */
439         if (dest_sgl) {
440                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
441                 sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
442                 sg->offset = ofs.ofs.cipher.head;
443
444                 /* Successive segs */
445                 for (i = 1; i < dest_sgl->num; i++) {
446                         cpu_to_hw_sg(sg);
447                         sg++;
448                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
449                         sg->length = dest_sgl->vec[i].len;
450                 }
451         } else {
452                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
453                 sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
454                 sg->offset = ofs.ofs.cipher.head;
455
456                 /* Successive segs */
457                 for (i = 1; i < sgl->num; i++) {
458                         cpu_to_hw_sg(sg);
459                         sg++;
460                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
461                         sg->length = sgl->vec[i].len;
462                 }
463         }
464
465         if (is_encode(ses)) {
466                 cpu_to_hw_sg(sg);
467                 /* set auth output */
468                 sg++;
469                 qm_sg_entry_set64(sg, digest->iova);
470                 sg->length = ses->digest_length;
471         }
472         sg->final = 1;
473         cpu_to_hw_sg(sg);
474
475         /* input */
476         in_sg = &cf->sg[1];
477         in_sg->extension = 1;
478         in_sg->final = 1;
479         if (is_encode(ses))
480                 in_sg->length = ses->iv.length + auth_len;
481         else
482                 in_sg->length = ses->iv.length + auth_len
483                                                 + ses->digest_length;
484
485         /* input sg entries */
486         sg++;
487         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
488         cpu_to_hw_sg(in_sg);
489
490         /* 1st seg IV */
491         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
492         sg->length = ses->iv.length;
493         cpu_to_hw_sg(sg);
494
495         /* 2 seg */
496         sg++;
497         qm_sg_entry_set64(sg, sgl->vec[0].iova);
498         sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
499         sg->offset = ofs.ofs.auth.head;
500
501         /* Successive segs */
502         for (i = 1; i < sgl->num; i++) {
503                 cpu_to_hw_sg(sg);
504                 sg++;
505                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
506                 sg->length = sgl->vec[i].len;
507         }
508
509         if (is_decode(ses)) {
510                 cpu_to_hw_sg(sg);
511                 sg++;
512                 memcpy(ctx->digest, digest->va,
513                         ses->digest_length);
514                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
515                 sg->length = ses->digest_length;
516         }
517         sg->final = 1;
518         cpu_to_hw_sg(sg);
519
520         if (auth_only_len)
521                 fd->cmd = 0x80000000 | auth_only_len;
522
523         return cf;
524 }
525
526 static struct dpaa_sec_job *
527 build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
528                         struct rte_crypto_sgl *sgl,
529                         struct rte_crypto_sgl *dest_sgl,
530                         struct rte_crypto_va_iova_ptr *iv,
531                         struct rte_crypto_va_iova_ptr *digest,
532                         struct rte_crypto_va_iova_ptr *auth_iv,
533                         union rte_crypto_sym_ofs ofs,
534                         void *userdata,
535                         struct qm_fd *fd)
536 {
537         RTE_SET_USED(digest);
538         RTE_SET_USED(auth_iv);
539         RTE_SET_USED(fd);
540
541         dpaa_sec_session *ses =
542                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
543         struct dpaa_sec_job *cf;
544         struct dpaa_sec_op_ctx *ctx;
545         struct qm_sg_entry *sg, *out_sg, *in_sg;
546         unsigned int i;
547         uint8_t *IV_ptr = iv->va;
548         int data_len, total_len = 0, data_offset;
549
550         for (i = 0; i < sgl->num; i++)
551                 total_len += sgl->vec[i].len;
552
553         data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
554         data_offset = ofs.ofs.cipher.head;
555
556         /* Support lengths in bits only for SNOW3G and ZUC */
557         if (sgl->num > MAX_SG_ENTRIES) {
558                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
559                                 MAX_SG_ENTRIES);
560                 return NULL;
561         }
562
563         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
564         if (!ctx)
565                 return NULL;
566
567         cf = &ctx->job;
568         ctx->userdata = (void *)userdata;
569
570         /* output */
571         out_sg = &cf->sg[0];
572         out_sg->extension = 1;
573         out_sg->length = data_len;
574         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
575         cpu_to_hw_sg(out_sg);
576
577         if (dest_sgl) {
578                 /* 1st seg */
579                 sg = &cf->sg[2];
580                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
581                 sg->length = dest_sgl->vec[0].len - data_offset;
582                 sg->offset = data_offset;
583
584                 /* Successive segs */
585                 for (i = 1; i < dest_sgl->num; i++) {
586                         cpu_to_hw_sg(sg);
587                         sg++;
588                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
589                         sg->length = dest_sgl->vec[i].len;
590                 }
591         } else {
592                 /* 1st seg */
593                 sg = &cf->sg[2];
594                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
595                 sg->length = sgl->vec[0].len - data_offset;
596                 sg->offset = data_offset;
597
598                 /* Successive segs */
599                 for (i = 1; i < sgl->num; i++) {
600                         cpu_to_hw_sg(sg);
601                         sg++;
602                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
603                         sg->length = sgl->vec[i].len;
604                 }
605
606         }
607         sg->final = 1;
608         cpu_to_hw_sg(sg);
609
610         /* input */
611         in_sg = &cf->sg[1];
612         in_sg->extension = 1;
613         in_sg->final = 1;
614         in_sg->length = data_len + ses->iv.length;
615
616         sg++;
617         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
618         cpu_to_hw_sg(in_sg);
619
620         /* IV */
621         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
622         sg->length = ses->iv.length;
623         cpu_to_hw_sg(sg);
624
625         /* 1st seg */
626         sg++;
627         qm_sg_entry_set64(sg, sgl->vec[0].iova);
628         sg->length = sgl->vec[0].len - data_offset;
629         sg->offset = data_offset;
630
631         /* Successive segs */
632         for (i = 1; i < sgl->num; i++) {
633                 cpu_to_hw_sg(sg);
634                 sg++;
635                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
636                 sg->length = sgl->vec[i].len;
637         }
638         sg->final = 1;
639         cpu_to_hw_sg(sg);
640
641         return cf;
642 }
643
644 #ifdef RTE_LIBRTE_SECURITY
645 static inline struct dpaa_sec_job *
646 build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
647                         struct rte_crypto_sgl *sgl,
648                         struct rte_crypto_sgl *dest_sgl,
649                         struct rte_crypto_va_iova_ptr *iv,
650                         struct rte_crypto_va_iova_ptr *digest,
651                         struct rte_crypto_va_iova_ptr *auth_iv,
652                         union rte_crypto_sym_ofs ofs,
653                         void *userdata,
654                         struct qm_fd *fd)
655 {
656         RTE_SET_USED(iv);
657         RTE_SET_USED(digest);
658         RTE_SET_USED(auth_iv);
659         RTE_SET_USED(ofs);
660
661         dpaa_sec_session *ses =
662                 ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
663         struct dpaa_sec_job *cf;
664         struct dpaa_sec_op_ctx *ctx;
665         struct qm_sg_entry *sg, *out_sg, *in_sg;
666         uint32_t in_len = 0, out_len = 0;
667         unsigned int i;
668
669         if (sgl->num > MAX_SG_ENTRIES) {
670                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
671                                 MAX_SG_ENTRIES);
672                 return NULL;
673         }
674
675         ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
676         if (!ctx)
677                 return NULL;
678         cf = &ctx->job;
679         ctx->userdata = (void *)userdata;
680         /* output */
681         out_sg = &cf->sg[0];
682         out_sg->extension = 1;
683         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
684
685         if (dest_sgl) {
686                 /* 1st seg */
687                 sg = &cf->sg[2];
688                 qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
689                 sg->offset = 0;
690                 sg->length = dest_sgl->vec[0].len;
691                 out_len += sg->length;
692
693                 for (i = 1; i < dest_sgl->num; i++) {
694                 /* Successive segs */
695                         cpu_to_hw_sg(sg);
696                         sg++;
697                         qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
698                         sg->offset = 0;
699                         sg->length = dest_sgl->vec[i].len;
700                         out_len += sg->length;
701                 }
702                 sg->length = dest_sgl->vec[i - 1].tot_len;
703         } else {
704                 /* 1st seg */
705                 sg = &cf->sg[2];
706                 qm_sg_entry_set64(sg, sgl->vec[0].iova);
707                 sg->offset = 0;
708                 sg->length = sgl->vec[0].len;
709                 out_len += sg->length;
710
711                 for (i = 1; i < sgl->num; i++) {
712                 /* Successive segs */
713                         cpu_to_hw_sg(sg);
714                         sg++;
715                         qm_sg_entry_set64(sg, sgl->vec[i].iova);
716                         sg->offset = 0;
717                         sg->length = sgl->vec[i].len;
718                         out_len += sg->length;
719                 }
720                 sg->length = sgl->vec[i - 1].tot_len;
721
722         }
723         out_len += sg->length;
724         sg->final = 1;
725         cpu_to_hw_sg(sg);
726
727         out_sg->length = out_len;
728         cpu_to_hw_sg(out_sg);
729
730         /* input */
731         in_sg = &cf->sg[1];
732         in_sg->extension = 1;
733         in_sg->final = 1;
734         in_len = sgl->vec[0].len;
735
736         sg++;
737         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
738
739         /* 1st seg */
740         qm_sg_entry_set64(sg, sgl->vec[0].iova);
741         sg->length = sgl->vec[0].len;
742         sg->offset = 0;
743
744         /* Successive segs */
745         for (i = 1; i < sgl->num; i++) {
746                 cpu_to_hw_sg(sg);
747                 sg++;
748                 qm_sg_entry_set64(sg, sgl->vec[i].iova);
749                 sg->length = sgl->vec[i].len;
750                 sg->offset = 0;
751                 in_len += sg->length;
752         }
753         sg->final = 1;
754         cpu_to_hw_sg(sg);
755
756         in_sg->length = in_len;
757         cpu_to_hw_sg(in_sg);
758
759         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
760                 fd->cmd = 0x80000000 |
761                         *((uint32_t *)((uint8_t *)userdata +
762                         ses->pdcp.hfn_ovd_offset));
763                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
764                         *((uint32_t *)((uint8_t *)userdata +
765                         ses->pdcp.hfn_ovd_offset)),
766                         ses->pdcp.hfn_ovd);
767         }
768
769         return cf;
770 }
771 #endif
772
773 static uint32_t
774 dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
775         struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
776         void *user_data[], int *status)
777 {
778         /* Function to transmit the frames to given device and queuepair */
779         uint32_t loop;
780         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
781         uint16_t num_tx = 0;
782         struct qm_fd fds[DPAA_SEC_BURST], *fd;
783         uint32_t frames_to_send;
784         struct dpaa_sec_job *cf;
785         dpaa_sec_session *ses =
786                         ((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
787         uint32_t flags[DPAA_SEC_BURST] = {0};
788         struct qman_fq *inq[DPAA_SEC_BURST];
789
790         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
791                 if (rte_dpaa_portal_init((void *)0)) {
792                         DPAA_SEC_ERR("Failure in affining portal");
793                         return 0;
794                 }
795         }
796
797         while (vec->num) {
798                 frames_to_send = (vec->num > DPAA_SEC_BURST) ?
799                                 DPAA_SEC_BURST : vec->num;
800                 for (loop = 0; loop < frames_to_send; loop++) {
801                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
802                                 if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
803                                         frames_to_send = loop;
804                                         goto send_pkts;
805                                 }
806                         } else if (unlikely(ses->qp[rte_lcore_id() %
807                                                 MAX_DPAA_CORES] != dpaa_qp)) {
808                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
809                                         " New qp = %p\n",
810                                         ses->qp[rte_lcore_id() %
811                                         MAX_DPAA_CORES], dpaa_qp);
812                                 frames_to_send = loop;
813                                 goto send_pkts;
814                         }
815
816                         /*Clear the unused FD fields before sending*/
817                         fd = &fds[loop];
818                         memset(fd, 0, sizeof(struct qm_fd));
819                         cf = ses->build_raw_dp_fd(drv_ctx,
820                                                 &vec->src_sgl[loop],
821                                                 &vec->dest_sgl[loop],
822                                                 &vec->iv[loop],
823                                                 &vec->digest[loop],
824                                                 &vec->auth_iv[loop],
825                                                 ofs,
826                                                 user_data[loop],
827                                                 fd);
828                         if (!cf) {
829                                 DPAA_SEC_ERR("error: Improper packet contents"
830                                         " for crypto operation");
831                                 goto skip_tx;
832                         }
833                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
834                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
835                         fd->_format1 = qm_fd_compound;
836                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
837
838                         status[loop] = 1;
839                 }
840 send_pkts:
841                 loop = 0;
842                 while (loop < frames_to_send) {
843                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
844                                         &flags[loop], frames_to_send - loop);
845                 }
846                 vec->num -= frames_to_send;
847                 num_tx += frames_to_send;
848         }
849
850 skip_tx:
851         dpaa_qp->tx_pkts += num_tx;
852         dpaa_qp->tx_errs += vec->num - num_tx;
853
854         return num_tx;
855 }
856
857 static int
858 dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
859                 uint8_t is_user_data_array,
860                 rte_cryptodev_raw_post_dequeue_t post_dequeue,
861                 int nb_ops)
862 {
863         struct qman_fq *fq;
864         unsigned int pkts = 0;
865         int num_rx_bufs, ret;
866         struct qm_dqrr_entry *dq;
867         uint32_t vdqcr_flags = 0;
868         uint8_t is_success = 0;
869
870         fq = &qp->outq;
871         /*
872          * Until request for four buffers, we provide exact number of buffers.
873          * Otherwise we do not set the QM_VDQCR_EXACT flag.
874          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
875          * requested, so we request two less in this case.
876          */
877         if (nb_ops < 4) {
878                 vdqcr_flags = QM_VDQCR_EXACT;
879                 num_rx_bufs = nb_ops;
880         } else {
881                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
882                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
883         }
884         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
885         if (ret)
886                 return 0;
887
888         do {
889                 const struct qm_fd *fd;
890                 struct dpaa_sec_job *job;
891                 struct dpaa_sec_op_ctx *ctx;
892
893                 dq = qman_dequeue(fq);
894                 if (!dq)
895                         continue;
896
897                 fd = &dq->fd;
898                 /* sg is embedded in an op ctx,
899                  * sg[0] is for output
900                  * sg[1] for input
901                  */
902                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
903
904                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
905                 ctx->fd_status = fd->status;
906                 if (is_user_data_array)
907                         out_user_data[pkts] = ctx->userdata;
908                 else
909                         out_user_data[0] = ctx->userdata;
910
911                 if (!ctx->fd_status) {
912                         is_success = true;
913                 } else {
914                         is_success = false;
915                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
916                 }
917                 post_dequeue(ctx->op, pkts, is_success);
918                 pkts++;
919
920                 /* report op status to sym->op and then free the ctx memory */
921                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
922
923                 qman_dqrr_consume(fq, dq);
924         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
925
926         return pkts;
927 }
928
929
930 static __rte_always_inline uint32_t
931 dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
932         rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
933         uint32_t max_nb_to_dequeue,
934         rte_cryptodev_raw_post_dequeue_t post_dequeue,
935         void **out_user_data, uint8_t is_user_data_array,
936         uint32_t *n_success, int *dequeue_status)
937 {
938         RTE_SET_USED(drv_ctx);
939         RTE_SET_USED(get_dequeue_count);
940         uint16_t num_rx;
941         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
942         uint32_t nb_ops = max_nb_to_dequeue;
943
944         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
945                 if (rte_dpaa_portal_init((void *)0)) {
946                         DPAA_SEC_ERR("Failure in affining portal");
947                         return 0;
948                 }
949         }
950
951         num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
952                         is_user_data_array, post_dequeue, nb_ops);
953
954         dpaa_qp->rx_pkts += num_rx;
955         *dequeue_status = 1;
956         *n_success = num_rx;
957
958         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
959
960         return num_rx;
961 }
962
963 static __rte_always_inline int
964 dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
965         struct rte_crypto_vec *data_vec,
966         uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
967         struct rte_crypto_va_iova_ptr *iv,
968         struct rte_crypto_va_iova_ptr *digest,
969         struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
970         void *user_data)
971 {
972         RTE_SET_USED(qp_data);
973         RTE_SET_USED(drv_ctx);
974         RTE_SET_USED(data_vec);
975         RTE_SET_USED(n_data_vecs);
976         RTE_SET_USED(ofs);
977         RTE_SET_USED(iv);
978         RTE_SET_USED(digest);
979         RTE_SET_USED(aad_or_auth_iv);
980         RTE_SET_USED(user_data);
981
982         return 0;
983 }
984
985 static __rte_always_inline void *
986 dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
987         enum rte_crypto_op_status *op_status)
988 {
989         RTE_SET_USED(qp_data);
990         RTE_SET_USED(drv_ctx);
991         RTE_SET_USED(dequeue_status);
992         RTE_SET_USED(op_status);
993
994         return NULL;
995 }
996
997 int
998 dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
999         struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
1000         enum rte_crypto_op_sess_type sess_type,
1001         union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
1002 {
1003         dpaa_sec_session *sess;
1004         struct dpaa_sec_raw_dp_ctx *dp_ctx;
1005         RTE_SET_USED(qp_id);
1006
1007         if (!is_update) {
1008                 memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
1009                 raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
1010         }
1011
1012         if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1013                 sess = (dpaa_sec_session *)get_sec_session_private_data(
1014                                 session_ctx.sec_sess);
1015         else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1016                 sess = (dpaa_sec_session *)get_sym_session_private_data(
1017                         session_ctx.crypto_sess, dpaa_cryptodev_driver_id);
1018         else
1019                 return -ENOTSUP;
1020         raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
1021         raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
1022         raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
1023         raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
1024         raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
1025         raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
1026
1027         if (sess->ctxt == DPAA_SEC_CIPHER)
1028                 sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
1029         else if (sess->ctxt == DPAA_SEC_AUTH)
1030                 sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
1031         else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
1032                 sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
1033         else if (sess->ctxt == DPAA_SEC_AEAD)
1034                 sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
1035 #ifdef RTE_LIBRTE_SECURITY
1036         else if (sess->ctxt == DPAA_SEC_IPSEC ||
1037                         sess->ctxt == DPAA_SEC_PDCP)
1038                 sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
1039 #endif
1040         else
1041                 return -ENOTSUP;
1042         dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
1043         dp_ctx->session = sess;
1044
1045         return 0;
1046 }
1047
1048 int
1049 dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
1050 {
1051         return sizeof(struct dpaa_sec_raw_dp_ctx);
1052 }