b811f2f1bba81f34ada296335c99daba7219d315
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35
36 /* Required types */
37 typedef uint64_t        dma_addr_t;
38
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
43
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS       32000
56 #define FLE_POOL_BUF_SIZE       256
57 #define FLE_POOL_CACHE_SIZE     512
58 #define FLE_SG_MEM_SIZE(num)    (FLE_POOL_BUF_SIZE + ((num) * 32))
59 #define SEC_FLC_DHR_OUTBOUND    -114
60 #define SEC_FLC_DHR_INBOUND     0
61
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63
64 static uint8_t cryptodev_driver_id;
65
66 int dpaa2_logtype_sec;
67
68 static inline int
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70                            struct rte_crypto_op *op,
71                            struct qbman_fd *fd, uint16_t bpid)
72 {
73         struct rte_crypto_sym_op *sym_op = op->sym;
74         struct ctxt_priv *priv = sess->ctxt;
75         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76         struct sec_flow_context *flc;
77         struct rte_mbuf *mbuf;
78         uint32_t in_len = 0, out_len = 0;
79
80         if (sym_op->m_dst)
81                 mbuf = sym_op->m_dst;
82         else
83                 mbuf = sym_op->m_src;
84
85         /* first FLE entry used to store mbuf and session ctxt */
86         fle = (struct qbman_fle *)rte_malloc(NULL,
87                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
88                         RTE_CACHE_LINE_SIZE);
89         if (unlikely(!fle)) {
90                 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
91                 return -1;
92         }
93         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96
97         /* Save the shared descriptor */
98         flc = &priv->flc_desc[0].flc;
99
100         op_fle = fle + 1;
101         ip_fle = fle + 2;
102         sge = fle + 3;
103
104         if (likely(bpid < MAX_BPID)) {
105                 DPAA2_SET_FD_BPID(fd, bpid);
106                 DPAA2_SET_FLE_BPID(op_fle, bpid);
107                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
108         } else {
109                 DPAA2_SET_FD_IVP(fd);
110                 DPAA2_SET_FLE_IVP(op_fle);
111                 DPAA2_SET_FLE_IVP(ip_fle);
112         }
113
114         /* Configure FD as a FRAME LIST */
115         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116         DPAA2_SET_FD_COMPOUND_FMT(fd);
117         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
118
119         /* Configure Output FLE with Scatter/Gather Entry */
120         DPAA2_SET_FLE_SG_EXT(op_fle);
121         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
122
123         /* Configure Output SGE for Encap/Decap */
124         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
126         /* o/p segs */
127         while (mbuf->next) {
128                 sge->length = mbuf->data_len;
129                 out_len += sge->length;
130                 sge++;
131                 mbuf = mbuf->next;
132                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
134         }
135         /* using buf_len for last buf - so that extra data can be added */
136         sge->length = mbuf->buf_len - mbuf->data_off;
137         out_len += sge->length;
138
139         DPAA2_SET_FLE_FIN(sge);
140         op_fle->length = out_len;
141
142         sge++;
143         mbuf = sym_op->m_src;
144
145         /* Configure Input FLE with Scatter/Gather Entry */
146         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147         DPAA2_SET_FLE_SG_EXT(ip_fle);
148         DPAA2_SET_FLE_FIN(ip_fle);
149
150         /* Configure input SGE for Encap/Decap */
151         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153         sge->length = mbuf->data_len;
154         in_len += sge->length;
155
156         mbuf = mbuf->next;
157         /* i/p segs */
158         while (mbuf) {
159                 sge++;
160                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162                 sge->length = mbuf->data_len;
163                 in_len += sge->length;
164                 mbuf = mbuf->next;
165         }
166         ip_fle->length = in_len;
167         DPAA2_SET_FLE_FIN(sge);
168
169         /* In case of PDCP, per packet HFN is stored in
170          * mbuf priv after sym_op.
171          */
172         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173                 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
174                 /*enable HFN override override */
175                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
176                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
177                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
178         }
179         DPAA2_SET_FD_LEN(fd, ip_fle->length);
180
181         return 0;
182 }
183
184 static inline int
185 build_proto_compound_fd(dpaa2_sec_session *sess,
186                struct rte_crypto_op *op,
187                struct qbman_fd *fd, uint16_t bpid)
188 {
189         struct rte_crypto_sym_op *sym_op = op->sym;
190         struct ctxt_priv *priv = sess->ctxt;
191         struct qbman_fle *fle, *ip_fle, *op_fle;
192         struct sec_flow_context *flc;
193         struct rte_mbuf *src_mbuf = sym_op->m_src;
194         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
195         int retval;
196
197         if (!dst_mbuf)
198                 dst_mbuf = src_mbuf;
199
200         /* Save the shared descriptor */
201         flc = &priv->flc_desc[0].flc;
202
203         /* we are using the first FLE entry to store Mbuf */
204         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
205         if (retval) {
206                 DPAA2_SEC_DP_ERR("Memory alloc failed");
207                 return -1;
208         }
209         memset(fle, 0, FLE_POOL_BUF_SIZE);
210         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
211         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
212
213         op_fle = fle + 1;
214         ip_fle = fle + 2;
215
216         if (likely(bpid < MAX_BPID)) {
217                 DPAA2_SET_FD_BPID(fd, bpid);
218                 DPAA2_SET_FLE_BPID(op_fle, bpid);
219                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
220         } else {
221                 DPAA2_SET_FD_IVP(fd);
222                 DPAA2_SET_FLE_IVP(op_fle);
223                 DPAA2_SET_FLE_IVP(ip_fle);
224         }
225
226         /* Configure FD as a FRAME LIST */
227         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
228         DPAA2_SET_FD_COMPOUND_FMT(fd);
229         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
230
231         /* Configure Output FLE with dst mbuf data  */
232         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
233         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
234         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
235
236         /* Configure Input FLE with src mbuf data */
237         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
238         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
239         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
240
241         DPAA2_SET_FD_LEN(fd, ip_fle->length);
242         DPAA2_SET_FLE_FIN(ip_fle);
243
244         /* In case of PDCP, per packet HFN is stored in
245          * mbuf priv after sym_op.
246          */
247         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
248                 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
249                 /*enable HFN override override */
250                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
251                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
252                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
253         }
254
255         return 0;
256
257 }
258
259 static inline int
260 build_proto_fd(dpaa2_sec_session *sess,
261                struct rte_crypto_op *op,
262                struct qbman_fd *fd, uint16_t bpid)
263 {
264         struct rte_crypto_sym_op *sym_op = op->sym;
265         if (sym_op->m_dst)
266                 return build_proto_compound_fd(sess, op, fd, bpid);
267
268         struct ctxt_priv *priv = sess->ctxt;
269         struct sec_flow_context *flc;
270         struct rte_mbuf *mbuf = sym_op->m_src;
271
272         if (likely(bpid < MAX_BPID))
273                 DPAA2_SET_FD_BPID(fd, bpid);
274         else
275                 DPAA2_SET_FD_IVP(fd);
276
277         /* Save the shared descriptor */
278         flc = &priv->flc_desc[0].flc;
279
280         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
281         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
282         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
283         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
284
285         /* save physical address of mbuf */
286         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
287         mbuf->buf_iova = (size_t)op;
288
289         return 0;
290 }
291
292 static inline int
293 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
294                  struct rte_crypto_op *op,
295                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
296 {
297         struct rte_crypto_sym_op *sym_op = op->sym;
298         struct ctxt_priv *priv = sess->ctxt;
299         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
300         struct sec_flow_context *flc;
301         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
302         int icv_len = sess->digest_length;
303         uint8_t *old_icv;
304         struct rte_mbuf *mbuf;
305         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
306                         sess->iv.offset);
307
308         PMD_INIT_FUNC_TRACE();
309
310         if (sym_op->m_dst)
311                 mbuf = sym_op->m_dst;
312         else
313                 mbuf = sym_op->m_src;
314
315         /* first FLE entry used to store mbuf and session ctxt */
316         fle = (struct qbman_fle *)rte_malloc(NULL,
317                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
318                         RTE_CACHE_LINE_SIZE);
319         if (unlikely(!fle)) {
320                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
321                 return -1;
322         }
323         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
324         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
325         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
326
327         op_fle = fle + 1;
328         ip_fle = fle + 2;
329         sge = fle + 3;
330
331         /* Save the shared descriptor */
332         flc = &priv->flc_desc[0].flc;
333
334         /* Configure FD as a FRAME LIST */
335         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
336         DPAA2_SET_FD_COMPOUND_FMT(fd);
337         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
338
339         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
340                    "iv-len=%d data_off: 0x%x\n",
341                    sym_op->aead.data.offset,
342                    sym_op->aead.data.length,
343                    sess->digest_length,
344                    sess->iv.length,
345                    sym_op->m_src->data_off);
346
347         /* Configure Output FLE with Scatter/Gather Entry */
348         DPAA2_SET_FLE_SG_EXT(op_fle);
349         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
350
351         if (auth_only_len)
352                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
353
354         op_fle->length = (sess->dir == DIR_ENC) ?
355                         (sym_op->aead.data.length + icv_len + auth_only_len) :
356                         sym_op->aead.data.length + auth_only_len;
357
358         /* Configure Output SGE for Encap/Decap */
359         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
360         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
361                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
362         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
363
364         mbuf = mbuf->next;
365         /* o/p segs */
366         while (mbuf) {
367                 sge++;
368                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
369                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
370                 sge->length = mbuf->data_len;
371                 mbuf = mbuf->next;
372         }
373         sge->length -= icv_len;
374
375         if (sess->dir == DIR_ENC) {
376                 sge++;
377                 DPAA2_SET_FLE_ADDR(sge,
378                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
379                 sge->length = icv_len;
380         }
381         DPAA2_SET_FLE_FIN(sge);
382
383         sge++;
384         mbuf = sym_op->m_src;
385
386         /* Configure Input FLE with Scatter/Gather Entry */
387         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
388         DPAA2_SET_FLE_SG_EXT(ip_fle);
389         DPAA2_SET_FLE_FIN(ip_fle);
390         ip_fle->length = (sess->dir == DIR_ENC) ?
391                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
392                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
393                  icv_len);
394
395         /* Configure Input SGE for Encap/Decap */
396         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
397         sge->length = sess->iv.length;
398
399         sge++;
400         if (auth_only_len) {
401                 DPAA2_SET_FLE_ADDR(sge,
402                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
403                 sge->length = auth_only_len;
404                 sge++;
405         }
406
407         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
408         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
409                                 mbuf->data_off);
410         sge->length = mbuf->data_len - sym_op->aead.data.offset;
411
412         mbuf = mbuf->next;
413         /* i/p segs */
414         while (mbuf) {
415                 sge++;
416                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
417                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
418                 sge->length = mbuf->data_len;
419                 mbuf = mbuf->next;
420         }
421
422         if (sess->dir == DIR_DEC) {
423                 sge++;
424                 old_icv = (uint8_t *)(sge + 1);
425                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
426                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
427                 sge->length = icv_len;
428         }
429
430         DPAA2_SET_FLE_FIN(sge);
431         if (auth_only_len) {
432                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
433                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
434         }
435         DPAA2_SET_FD_LEN(fd, ip_fle->length);
436
437         return 0;
438 }
439
440 static inline int
441 build_authenc_gcm_fd(dpaa2_sec_session *sess,
442                      struct rte_crypto_op *op,
443                      struct qbman_fd *fd, uint16_t bpid)
444 {
445         struct rte_crypto_sym_op *sym_op = op->sym;
446         struct ctxt_priv *priv = sess->ctxt;
447         struct qbman_fle *fle, *sge;
448         struct sec_flow_context *flc;
449         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
450         int icv_len = sess->digest_length, retval;
451         uint8_t *old_icv;
452         struct rte_mbuf *dst;
453         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
454                         sess->iv.offset);
455
456         PMD_INIT_FUNC_TRACE();
457
458         if (sym_op->m_dst)
459                 dst = sym_op->m_dst;
460         else
461                 dst = sym_op->m_src;
462
463         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
464          * Currently we donot know which FLE has the mbuf stored.
465          * So while retreiving we can go back 1 FLE from the FD -ADDR
466          * to get the MBUF Addr from the previous FLE.
467          * We can have a better approach to use the inline Mbuf
468          */
469         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
470         if (retval) {
471                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
472                 return -1;
473         }
474         memset(fle, 0, FLE_POOL_BUF_SIZE);
475         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
476         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
477         fle = fle + 1;
478         sge = fle + 2;
479         if (likely(bpid < MAX_BPID)) {
480                 DPAA2_SET_FD_BPID(fd, bpid);
481                 DPAA2_SET_FLE_BPID(fle, bpid);
482                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
483                 DPAA2_SET_FLE_BPID(sge, bpid);
484                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
485                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
486                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
487         } else {
488                 DPAA2_SET_FD_IVP(fd);
489                 DPAA2_SET_FLE_IVP(fle);
490                 DPAA2_SET_FLE_IVP((fle + 1));
491                 DPAA2_SET_FLE_IVP(sge);
492                 DPAA2_SET_FLE_IVP((sge + 1));
493                 DPAA2_SET_FLE_IVP((sge + 2));
494                 DPAA2_SET_FLE_IVP((sge + 3));
495         }
496
497         /* Save the shared descriptor */
498         flc = &priv->flc_desc[0].flc;
499         /* Configure FD as a FRAME LIST */
500         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
501         DPAA2_SET_FD_COMPOUND_FMT(fd);
502         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
503
504         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
505                    "iv-len=%d data_off: 0x%x\n",
506                    sym_op->aead.data.offset,
507                    sym_op->aead.data.length,
508                    sess->digest_length,
509                    sess->iv.length,
510                    sym_op->m_src->data_off);
511
512         /* Configure Output FLE with Scatter/Gather Entry */
513         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
514         if (auth_only_len)
515                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
516         fle->length = (sess->dir == DIR_ENC) ?
517                         (sym_op->aead.data.length + icv_len + auth_only_len) :
518                         sym_op->aead.data.length + auth_only_len;
519
520         DPAA2_SET_FLE_SG_EXT(fle);
521
522         /* Configure Output SGE for Encap/Decap */
523         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
524         DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
525                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
526         sge->length = sym_op->aead.data.length + auth_only_len;
527
528         if (sess->dir == DIR_ENC) {
529                 sge++;
530                 DPAA2_SET_FLE_ADDR(sge,
531                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
532                 sge->length = sess->digest_length;
533                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
534                                         sess->iv.length + auth_only_len));
535         }
536         DPAA2_SET_FLE_FIN(sge);
537
538         sge++;
539         fle++;
540
541         /* Configure Input FLE with Scatter/Gather Entry */
542         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
543         DPAA2_SET_FLE_SG_EXT(fle);
544         DPAA2_SET_FLE_FIN(fle);
545         fle->length = (sess->dir == DIR_ENC) ?
546                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
547                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
548                  sess->digest_length);
549
550         /* Configure Input SGE for Encap/Decap */
551         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
552         sge->length = sess->iv.length;
553         sge++;
554         if (auth_only_len) {
555                 DPAA2_SET_FLE_ADDR(sge,
556                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
557                 sge->length = auth_only_len;
558                 DPAA2_SET_FLE_BPID(sge, bpid);
559                 sge++;
560         }
561
562         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
563         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
564                                 sym_op->m_src->data_off);
565         sge->length = sym_op->aead.data.length;
566         if (sess->dir == DIR_DEC) {
567                 sge++;
568                 old_icv = (uint8_t *)(sge + 1);
569                 memcpy(old_icv, sym_op->aead.digest.data,
570                        sess->digest_length);
571                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
572                 sge->length = sess->digest_length;
573                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
574                                  sess->digest_length +
575                                  sess->iv.length +
576                                  auth_only_len));
577         }
578         DPAA2_SET_FLE_FIN(sge);
579
580         if (auth_only_len) {
581                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
582                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
583         }
584
585         return 0;
586 }
587
588 static inline int
589 build_authenc_sg_fd(dpaa2_sec_session *sess,
590                  struct rte_crypto_op *op,
591                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
592 {
593         struct rte_crypto_sym_op *sym_op = op->sym;
594         struct ctxt_priv *priv = sess->ctxt;
595         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
596         struct sec_flow_context *flc;
597         uint32_t auth_only_len = sym_op->auth.data.length -
598                                 sym_op->cipher.data.length;
599         int icv_len = sess->digest_length;
600         uint8_t *old_icv;
601         struct rte_mbuf *mbuf;
602         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
603                         sess->iv.offset);
604
605         PMD_INIT_FUNC_TRACE();
606
607         if (sym_op->m_dst)
608                 mbuf = sym_op->m_dst;
609         else
610                 mbuf = sym_op->m_src;
611
612         /* first FLE entry used to store mbuf and session ctxt */
613         fle = (struct qbman_fle *)rte_malloc(NULL,
614                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
615                         RTE_CACHE_LINE_SIZE);
616         if (unlikely(!fle)) {
617                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
618                 return -1;
619         }
620         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
621         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
622         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
623
624         op_fle = fle + 1;
625         ip_fle = fle + 2;
626         sge = fle + 3;
627
628         /* Save the shared descriptor */
629         flc = &priv->flc_desc[0].flc;
630
631         /* Configure FD as a FRAME LIST */
632         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
633         DPAA2_SET_FD_COMPOUND_FMT(fd);
634         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
635
636         DPAA2_SEC_DP_DEBUG(
637                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
638                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
639                 sym_op->auth.data.offset,
640                 sym_op->auth.data.length,
641                 sess->digest_length,
642                 sym_op->cipher.data.offset,
643                 sym_op->cipher.data.length,
644                 sess->iv.length,
645                 sym_op->m_src->data_off);
646
647         /* Configure Output FLE with Scatter/Gather Entry */
648         DPAA2_SET_FLE_SG_EXT(op_fle);
649         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
650
651         if (auth_only_len)
652                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
653
654         op_fle->length = (sess->dir == DIR_ENC) ?
655                         (sym_op->cipher.data.length + icv_len) :
656                         sym_op->cipher.data.length;
657
658         /* Configure Output SGE for Encap/Decap */
659         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
660         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
661         sge->length = mbuf->data_len - sym_op->auth.data.offset;
662
663         mbuf = mbuf->next;
664         /* o/p segs */
665         while (mbuf) {
666                 sge++;
667                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
668                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
669                 sge->length = mbuf->data_len;
670                 mbuf = mbuf->next;
671         }
672         sge->length -= icv_len;
673
674         if (sess->dir == DIR_ENC) {
675                 sge++;
676                 DPAA2_SET_FLE_ADDR(sge,
677                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
678                 sge->length = icv_len;
679         }
680         DPAA2_SET_FLE_FIN(sge);
681
682         sge++;
683         mbuf = sym_op->m_src;
684
685         /* Configure Input FLE with Scatter/Gather Entry */
686         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
687         DPAA2_SET_FLE_SG_EXT(ip_fle);
688         DPAA2_SET_FLE_FIN(ip_fle);
689         ip_fle->length = (sess->dir == DIR_ENC) ?
690                         (sym_op->auth.data.length + sess->iv.length) :
691                         (sym_op->auth.data.length + sess->iv.length +
692                          icv_len);
693
694         /* Configure Input SGE for Encap/Decap */
695         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
696         sge->length = sess->iv.length;
697
698         sge++;
699         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
700         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
701                                 mbuf->data_off);
702         sge->length = mbuf->data_len - sym_op->auth.data.offset;
703
704         mbuf = mbuf->next;
705         /* i/p segs */
706         while (mbuf) {
707                 sge++;
708                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
709                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
710                 sge->length = mbuf->data_len;
711                 mbuf = mbuf->next;
712         }
713         sge->length -= icv_len;
714
715         if (sess->dir == DIR_DEC) {
716                 sge++;
717                 old_icv = (uint8_t *)(sge + 1);
718                 memcpy(old_icv, sym_op->auth.digest.data,
719                        icv_len);
720                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
721                 sge->length = icv_len;
722         }
723
724         DPAA2_SET_FLE_FIN(sge);
725         if (auth_only_len) {
726                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
727                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
728         }
729         DPAA2_SET_FD_LEN(fd, ip_fle->length);
730
731         return 0;
732 }
733
734 static inline int
735 build_authenc_fd(dpaa2_sec_session *sess,
736                  struct rte_crypto_op *op,
737                  struct qbman_fd *fd, uint16_t bpid)
738 {
739         struct rte_crypto_sym_op *sym_op = op->sym;
740         struct ctxt_priv *priv = sess->ctxt;
741         struct qbman_fle *fle, *sge;
742         struct sec_flow_context *flc;
743         uint32_t auth_only_len = sym_op->auth.data.length -
744                                 sym_op->cipher.data.length;
745         int icv_len = sess->digest_length, retval;
746         uint8_t *old_icv;
747         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
748                         sess->iv.offset);
749         struct rte_mbuf *dst;
750
751         PMD_INIT_FUNC_TRACE();
752
753         if (sym_op->m_dst)
754                 dst = sym_op->m_dst;
755         else
756                 dst = sym_op->m_src;
757
758         /* we are using the first FLE entry to store Mbuf.
759          * Currently we donot know which FLE has the mbuf stored.
760          * So while retreiving we can go back 1 FLE from the FD -ADDR
761          * to get the MBUF Addr from the previous FLE.
762          * We can have a better approach to use the inline Mbuf
763          */
764         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
765         if (retval) {
766                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
767                 return -1;
768         }
769         memset(fle, 0, FLE_POOL_BUF_SIZE);
770         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
771         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
772         fle = fle + 1;
773         sge = fle + 2;
774         if (likely(bpid < MAX_BPID)) {
775                 DPAA2_SET_FD_BPID(fd, bpid);
776                 DPAA2_SET_FLE_BPID(fle, bpid);
777                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
778                 DPAA2_SET_FLE_BPID(sge, bpid);
779                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
780                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
781                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
782         } else {
783                 DPAA2_SET_FD_IVP(fd);
784                 DPAA2_SET_FLE_IVP(fle);
785                 DPAA2_SET_FLE_IVP((fle + 1));
786                 DPAA2_SET_FLE_IVP(sge);
787                 DPAA2_SET_FLE_IVP((sge + 1));
788                 DPAA2_SET_FLE_IVP((sge + 2));
789                 DPAA2_SET_FLE_IVP((sge + 3));
790         }
791
792         /* Save the shared descriptor */
793         flc = &priv->flc_desc[0].flc;
794         /* Configure FD as a FRAME LIST */
795         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
796         DPAA2_SET_FD_COMPOUND_FMT(fd);
797         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
798
799         DPAA2_SEC_DP_DEBUG(
800                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
801                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
802                 sym_op->auth.data.offset,
803                 sym_op->auth.data.length,
804                 sess->digest_length,
805                 sym_op->cipher.data.offset,
806                 sym_op->cipher.data.length,
807                 sess->iv.length,
808                 sym_op->m_src->data_off);
809
810         /* Configure Output FLE with Scatter/Gather Entry */
811         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
812         if (auth_only_len)
813                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
814         fle->length = (sess->dir == DIR_ENC) ?
815                         (sym_op->cipher.data.length + icv_len) :
816                         sym_op->cipher.data.length;
817
818         DPAA2_SET_FLE_SG_EXT(fle);
819
820         /* Configure Output SGE for Encap/Decap */
821         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
822         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
823                                 dst->data_off);
824         sge->length = sym_op->cipher.data.length;
825
826         if (sess->dir == DIR_ENC) {
827                 sge++;
828                 DPAA2_SET_FLE_ADDR(sge,
829                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
830                 sge->length = sess->digest_length;
831                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
832                                         sess->iv.length));
833         }
834         DPAA2_SET_FLE_FIN(sge);
835
836         sge++;
837         fle++;
838
839         /* Configure Input FLE with Scatter/Gather Entry */
840         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
841         DPAA2_SET_FLE_SG_EXT(fle);
842         DPAA2_SET_FLE_FIN(fle);
843         fle->length = (sess->dir == DIR_ENC) ?
844                         (sym_op->auth.data.length + sess->iv.length) :
845                         (sym_op->auth.data.length + sess->iv.length +
846                          sess->digest_length);
847
848         /* Configure Input SGE for Encap/Decap */
849         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
850         sge->length = sess->iv.length;
851         sge++;
852
853         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
854         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
855                                 sym_op->m_src->data_off);
856         sge->length = sym_op->auth.data.length;
857         if (sess->dir == DIR_DEC) {
858                 sge++;
859                 old_icv = (uint8_t *)(sge + 1);
860                 memcpy(old_icv, sym_op->auth.digest.data,
861                        sess->digest_length);
862                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
863                 sge->length = sess->digest_length;
864                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
865                                  sess->digest_length +
866                                  sess->iv.length));
867         }
868         DPAA2_SET_FLE_FIN(sge);
869         if (auth_only_len) {
870                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
871                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
872         }
873         return 0;
874 }
875
876 static inline int build_auth_sg_fd(
877                 dpaa2_sec_session *sess,
878                 struct rte_crypto_op *op,
879                 struct qbman_fd *fd,
880                 __rte_unused uint16_t bpid)
881 {
882         struct rte_crypto_sym_op *sym_op = op->sym;
883         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
884         struct sec_flow_context *flc;
885         struct ctxt_priv *priv = sess->ctxt;
886         int data_len, data_offset;
887         uint8_t *old_digest;
888         struct rte_mbuf *mbuf;
889
890         PMD_INIT_FUNC_TRACE();
891
892         data_len = sym_op->auth.data.length;
893         data_offset = sym_op->auth.data.offset;
894
895         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
896             sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
897                 if ((data_len & 7) || (data_offset & 7)) {
898                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
899                         return -1;
900                 }
901
902                 data_len = data_len >> 3;
903                 data_offset = data_offset >> 3;
904         }
905
906         mbuf = sym_op->m_src;
907         fle = (struct qbman_fle *)rte_malloc(NULL,
908                         FLE_SG_MEM_SIZE(mbuf->nb_segs),
909                         RTE_CACHE_LINE_SIZE);
910         if (unlikely(!fle)) {
911                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
912                 return -1;
913         }
914         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
915         /* first FLE entry used to store mbuf and session ctxt */
916         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
917         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
918         op_fle = fle + 1;
919         ip_fle = fle + 2;
920         sge = fle + 3;
921
922         flc = &priv->flc_desc[DESC_INITFINAL].flc;
923         /* sg FD */
924         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
925         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
926         DPAA2_SET_FD_COMPOUND_FMT(fd);
927
928         /* o/p fle */
929         DPAA2_SET_FLE_ADDR(op_fle,
930                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
931         op_fle->length = sess->digest_length;
932
933         /* i/p fle */
934         DPAA2_SET_FLE_SG_EXT(ip_fle);
935         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
936         ip_fle->length = data_len;
937
938         if (sess->iv.length) {
939                 uint8_t *iv_ptr;
940
941                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
942                                                    sess->iv.offset);
943
944                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
945                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
946                         sge->length = 12;
947                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
948                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
949                         sge->length = 8;
950                 } else {
951                         sge->length = sess->iv.length;
952                 }
953                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
954                 ip_fle->length += sge->length;
955                 sge++;
956         }
957         /* i/p 1st seg */
958         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
959         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
960
961         if (data_len <= (mbuf->data_len - data_offset)) {
962                 sge->length = data_len;
963                 data_len = 0;
964         } else {
965                 sge->length = mbuf->data_len - data_offset;
966
967                 /* remaining i/p segs */
968                 while ((data_len = data_len - sge->length) &&
969                        (mbuf = mbuf->next)) {
970                         sge++;
971                         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
972                         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
973                         if (data_len > mbuf->data_len)
974                                 sge->length = mbuf->data_len;
975                         else
976                                 sge->length = data_len;
977                 }
978         }
979
980         if (sess->dir == DIR_DEC) {
981                 /* Digest verification case */
982                 sge++;
983                 old_digest = (uint8_t *)(sge + 1);
984                 rte_memcpy(old_digest, sym_op->auth.digest.data,
985                            sess->digest_length);
986                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
987                 sge->length = sess->digest_length;
988                 ip_fle->length += sess->digest_length;
989         }
990         DPAA2_SET_FLE_FIN(sge);
991         DPAA2_SET_FLE_FIN(ip_fle);
992         DPAA2_SET_FD_LEN(fd, ip_fle->length);
993
994         return 0;
995 }
996
997 static inline int
998 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
999               struct qbman_fd *fd, uint16_t bpid)
1000 {
1001         struct rte_crypto_sym_op *sym_op = op->sym;
1002         struct qbman_fle *fle, *sge;
1003         struct sec_flow_context *flc;
1004         struct ctxt_priv *priv = sess->ctxt;
1005         int data_len, data_offset;
1006         uint8_t *old_digest;
1007         int retval;
1008
1009         PMD_INIT_FUNC_TRACE();
1010
1011         data_len = sym_op->auth.data.length;
1012         data_offset = sym_op->auth.data.offset;
1013
1014         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1015             sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1016                 if ((data_len & 7) || (data_offset & 7)) {
1017                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1018                         return -1;
1019                 }
1020
1021                 data_len = data_len >> 3;
1022                 data_offset = data_offset >> 3;
1023         }
1024
1025         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1026         if (retval) {
1027                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1028                 return -1;
1029         }
1030         memset(fle, 0, FLE_POOL_BUF_SIZE);
1031         /* TODO we are using the first FLE entry to store Mbuf.
1032          * Currently we donot know which FLE has the mbuf stored.
1033          * So while retreiving we can go back 1 FLE from the FD -ADDR
1034          * to get the MBUF Addr from the previous FLE.
1035          * We can have a better approach to use the inline Mbuf
1036          */
1037         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1038         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1039         fle = fle + 1;
1040         sge = fle + 2;
1041
1042         if (likely(bpid < MAX_BPID)) {
1043                 DPAA2_SET_FD_BPID(fd, bpid);
1044                 DPAA2_SET_FLE_BPID(fle, bpid);
1045                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1046                 DPAA2_SET_FLE_BPID(sge, bpid);
1047                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1048         } else {
1049                 DPAA2_SET_FD_IVP(fd);
1050                 DPAA2_SET_FLE_IVP(fle);
1051                 DPAA2_SET_FLE_IVP((fle + 1));
1052                 DPAA2_SET_FLE_IVP(sge);
1053                 DPAA2_SET_FLE_IVP((sge + 1));
1054         }
1055
1056         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1057         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1058         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1059         DPAA2_SET_FD_COMPOUND_FMT(fd);
1060
1061         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1062         fle->length = sess->digest_length;
1063         fle++;
1064
1065         /* Setting input FLE */
1066         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1067         DPAA2_SET_FLE_SG_EXT(fle);
1068         fle->length = data_len;
1069
1070         if (sess->iv.length) {
1071                 uint8_t *iv_ptr;
1072
1073                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1074                                                    sess->iv.offset);
1075
1076                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1077                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1078                         sge->length = 12;
1079                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1080                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1081                         sge->length = 8;
1082                 } else {
1083                         sge->length = sess->iv.length;
1084                 }
1085
1086                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1087                 fle->length = fle->length + sge->length;
1088                 sge++;
1089         }
1090
1091         /* Setting data to authenticate */
1092         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1093         DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1094         sge->length = data_len;
1095
1096         if (sess->dir == DIR_DEC) {
1097                 sge++;
1098                 old_digest = (uint8_t *)(sge + 1);
1099                 rte_memcpy(old_digest, sym_op->auth.digest.data,
1100                            sess->digest_length);
1101                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1102                 sge->length = sess->digest_length;
1103                 fle->length = fle->length + sess->digest_length;
1104         }
1105
1106         DPAA2_SET_FLE_FIN(sge);
1107         DPAA2_SET_FLE_FIN(fle);
1108         DPAA2_SET_FD_LEN(fd, fle->length);
1109
1110         return 0;
1111 }
1112
1113 static int
1114 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1115                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1116 {
1117         struct rte_crypto_sym_op *sym_op = op->sym;
1118         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1119         int data_len, data_offset;
1120         struct sec_flow_context *flc;
1121         struct ctxt_priv *priv = sess->ctxt;
1122         struct rte_mbuf *mbuf;
1123         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1124                         sess->iv.offset);
1125
1126         PMD_INIT_FUNC_TRACE();
1127
1128         data_len = sym_op->cipher.data.length;
1129         data_offset = sym_op->cipher.data.offset;
1130
1131         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1132                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1133                 if ((data_len & 7) || (data_offset & 7)) {
1134                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1135                         return -1;
1136                 }
1137
1138                 data_len = data_len >> 3;
1139                 data_offset = data_offset >> 3;
1140         }
1141
1142         if (sym_op->m_dst)
1143                 mbuf = sym_op->m_dst;
1144         else
1145                 mbuf = sym_op->m_src;
1146
1147         /* first FLE entry used to store mbuf and session ctxt */
1148         fle = (struct qbman_fle *)rte_malloc(NULL,
1149                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1150                         RTE_CACHE_LINE_SIZE);
1151         if (!fle) {
1152                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1153                 return -1;
1154         }
1155         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1156         /* first FLE entry used to store mbuf and session ctxt */
1157         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1158         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1159
1160         op_fle = fle + 1;
1161         ip_fle = fle + 2;
1162         sge = fle + 3;
1163
1164         flc = &priv->flc_desc[0].flc;
1165
1166         DPAA2_SEC_DP_DEBUG(
1167                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1168                 " data_off: 0x%x\n",
1169                 data_offset,
1170                 data_len,
1171                 sess->iv.length,
1172                 sym_op->m_src->data_off);
1173
1174         /* o/p fle */
1175         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1176         op_fle->length = data_len;
1177         DPAA2_SET_FLE_SG_EXT(op_fle);
1178
1179         /* o/p 1st seg */
1180         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1181         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1182         sge->length = mbuf->data_len - data_offset;
1183
1184         mbuf = mbuf->next;
1185         /* o/p segs */
1186         while (mbuf) {
1187                 sge++;
1188                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1189                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1190                 sge->length = mbuf->data_len;
1191                 mbuf = mbuf->next;
1192         }
1193         DPAA2_SET_FLE_FIN(sge);
1194
1195         DPAA2_SEC_DP_DEBUG(
1196                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1197                 flc, fle, fle->addr_hi, fle->addr_lo,
1198                 fle->length);
1199
1200         /* i/p fle */
1201         mbuf = sym_op->m_src;
1202         sge++;
1203         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1204         ip_fle->length = sess->iv.length + data_len;
1205         DPAA2_SET_FLE_SG_EXT(ip_fle);
1206
1207         /* i/p IV */
1208         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1209         DPAA2_SET_FLE_OFFSET(sge, 0);
1210         sge->length = sess->iv.length;
1211
1212         sge++;
1213
1214         /* i/p 1st seg */
1215         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1216         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1217         sge->length = mbuf->data_len - data_offset;
1218
1219         mbuf = mbuf->next;
1220         /* i/p segs */
1221         while (mbuf) {
1222                 sge++;
1223                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1224                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1225                 sge->length = mbuf->data_len;
1226                 mbuf = mbuf->next;
1227         }
1228         DPAA2_SET_FLE_FIN(sge);
1229         DPAA2_SET_FLE_FIN(ip_fle);
1230
1231         /* sg fd */
1232         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1233         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1234         DPAA2_SET_FD_COMPOUND_FMT(fd);
1235         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1236
1237         DPAA2_SEC_DP_DEBUG(
1238                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1239                 " off =%d, len =%d\n",
1240                 DPAA2_GET_FD_ADDR(fd),
1241                 DPAA2_GET_FD_BPID(fd),
1242                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1243                 DPAA2_GET_FD_OFFSET(fd),
1244                 DPAA2_GET_FD_LEN(fd));
1245         return 0;
1246 }
1247
1248 static int
1249 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1250                 struct qbman_fd *fd, uint16_t bpid)
1251 {
1252         struct rte_crypto_sym_op *sym_op = op->sym;
1253         struct qbman_fle *fle, *sge;
1254         int retval, data_len, data_offset;
1255         struct sec_flow_context *flc;
1256         struct ctxt_priv *priv = sess->ctxt;
1257         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1258                         sess->iv.offset);
1259         struct rte_mbuf *dst;
1260
1261         PMD_INIT_FUNC_TRACE();
1262
1263         data_len = sym_op->cipher.data.length;
1264         data_offset = sym_op->cipher.data.offset;
1265
1266         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1267                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1268                 if ((data_len & 7) || (data_offset & 7)) {
1269                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1270                         return -1;
1271                 }
1272
1273                 data_len = data_len >> 3;
1274                 data_offset = data_offset >> 3;
1275         }
1276
1277         if (sym_op->m_dst)
1278                 dst = sym_op->m_dst;
1279         else
1280                 dst = sym_op->m_src;
1281
1282         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1283         if (retval) {
1284                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1285                 return -1;
1286         }
1287         memset(fle, 0, FLE_POOL_BUF_SIZE);
1288         /* TODO we are using the first FLE entry to store Mbuf.
1289          * Currently we donot know which FLE has the mbuf stored.
1290          * So while retreiving we can go back 1 FLE from the FD -ADDR
1291          * to get the MBUF Addr from the previous FLE.
1292          * We can have a better approach to use the inline Mbuf
1293          */
1294         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1295         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1296         fle = fle + 1;
1297         sge = fle + 2;
1298
1299         if (likely(bpid < MAX_BPID)) {
1300                 DPAA2_SET_FD_BPID(fd, bpid);
1301                 DPAA2_SET_FLE_BPID(fle, bpid);
1302                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1303                 DPAA2_SET_FLE_BPID(sge, bpid);
1304                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1305         } else {
1306                 DPAA2_SET_FD_IVP(fd);
1307                 DPAA2_SET_FLE_IVP(fle);
1308                 DPAA2_SET_FLE_IVP((fle + 1));
1309                 DPAA2_SET_FLE_IVP(sge);
1310                 DPAA2_SET_FLE_IVP((sge + 1));
1311         }
1312
1313         flc = &priv->flc_desc[0].flc;
1314         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1315         DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1316         DPAA2_SET_FD_COMPOUND_FMT(fd);
1317         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1318
1319         DPAA2_SEC_DP_DEBUG(
1320                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1321                 " data_off: 0x%x\n",
1322                 data_offset,
1323                 data_len,
1324                 sess->iv.length,
1325                 sym_op->m_src->data_off);
1326
1327         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1328         DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1329
1330         fle->length = data_len + sess->iv.length;
1331
1332         DPAA2_SEC_DP_DEBUG(
1333                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1334                 flc, fle, fle->addr_hi, fle->addr_lo,
1335                 fle->length);
1336
1337         fle++;
1338
1339         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1340         fle->length = data_len + sess->iv.length;
1341
1342         DPAA2_SET_FLE_SG_EXT(fle);
1343
1344         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1345         sge->length = sess->iv.length;
1346
1347         sge++;
1348         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1349         DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1350
1351         sge->length = data_len;
1352         DPAA2_SET_FLE_FIN(sge);
1353         DPAA2_SET_FLE_FIN(fle);
1354
1355         DPAA2_SEC_DP_DEBUG(
1356                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1357                 " off =%d, len =%d\n",
1358                 DPAA2_GET_FD_ADDR(fd),
1359                 DPAA2_GET_FD_BPID(fd),
1360                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1361                 DPAA2_GET_FD_OFFSET(fd),
1362                 DPAA2_GET_FD_LEN(fd));
1363
1364         return 0;
1365 }
1366
1367 static inline int
1368 build_sec_fd(struct rte_crypto_op *op,
1369              struct qbman_fd *fd, uint16_t bpid)
1370 {
1371         int ret = -1;
1372         dpaa2_sec_session *sess;
1373
1374         PMD_INIT_FUNC_TRACE();
1375
1376         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1377                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1378                                 op->sym->session, cryptodev_driver_id);
1379         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1380                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1381                                 op->sym->sec_session);
1382         else
1383                 return -1;
1384
1385         /* Any of the buffer is segmented*/
1386         if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1387                   ((op->sym->m_dst != NULL) &&
1388                    !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1389                 switch (sess->ctxt_type) {
1390                 case DPAA2_SEC_CIPHER:
1391                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1392                         break;
1393                 case DPAA2_SEC_AUTH:
1394                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1395                         break;
1396                 case DPAA2_SEC_AEAD:
1397                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1398                         break;
1399                 case DPAA2_SEC_CIPHER_HASH:
1400                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1401                         break;
1402                 case DPAA2_SEC_IPSEC:
1403                 case DPAA2_SEC_PDCP:
1404                         ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1405                         break;
1406                 case DPAA2_SEC_HASH_CIPHER:
1407                 default:
1408                         DPAA2_SEC_ERR("error: Unsupported session");
1409                 }
1410         } else {
1411                 switch (sess->ctxt_type) {
1412                 case DPAA2_SEC_CIPHER:
1413                         ret = build_cipher_fd(sess, op, fd, bpid);
1414                         break;
1415                 case DPAA2_SEC_AUTH:
1416                         ret = build_auth_fd(sess, op, fd, bpid);
1417                         break;
1418                 case DPAA2_SEC_AEAD:
1419                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1420                         break;
1421                 case DPAA2_SEC_CIPHER_HASH:
1422                         ret = build_authenc_fd(sess, op, fd, bpid);
1423                         break;
1424                 case DPAA2_SEC_IPSEC:
1425                         ret = build_proto_fd(sess, op, fd, bpid);
1426                         break;
1427                 case DPAA2_SEC_PDCP:
1428                         ret = build_proto_compound_fd(sess, op, fd, bpid);
1429                         break;
1430                 case DPAA2_SEC_HASH_CIPHER:
1431                 default:
1432                         DPAA2_SEC_ERR("error: Unsupported session");
1433                 }
1434         }
1435         return ret;
1436 }
1437
1438 static uint16_t
1439 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1440                         uint16_t nb_ops)
1441 {
1442         /* Function to transmit the frames to given device and VQ*/
1443         uint32_t loop;
1444         int32_t ret;
1445         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1446         uint32_t frames_to_send;
1447         struct qbman_eq_desc eqdesc;
1448         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1449         struct qbman_swp *swp;
1450         uint16_t num_tx = 0;
1451         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1452         /*todo - need to support multiple buffer pools */
1453         uint16_t bpid;
1454         struct rte_mempool *mb_pool;
1455
1456         if (unlikely(nb_ops == 0))
1457                 return 0;
1458
1459         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1460                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1461                 return 0;
1462         }
1463         /*Prepare enqueue descriptor*/
1464         qbman_eq_desc_clear(&eqdesc);
1465         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1466         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1467         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1468
1469         if (!DPAA2_PER_LCORE_DPIO) {
1470                 ret = dpaa2_affine_qbman_swp();
1471                 if (ret) {
1472                         DPAA2_SEC_ERR("Failure in affining portal");
1473                         return 0;
1474                 }
1475         }
1476         swp = DPAA2_PER_LCORE_PORTAL;
1477
1478         while (nb_ops) {
1479                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1480                         dpaa2_eqcr_size : nb_ops;
1481
1482                 for (loop = 0; loop < frames_to_send; loop++) {
1483                         if ((*ops)->sym->m_src->seqn) {
1484                          uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1485
1486                          flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1487                          DPAA2_PER_LCORE_DQRR_SIZE--;
1488                          DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1489                          (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1490                         }
1491
1492                         /*Clear the unused FD fields before sending*/
1493                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1494                         mb_pool = (*ops)->sym->m_src->pool;
1495                         bpid = mempool_to_bpid(mb_pool);
1496                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1497                         if (ret) {
1498                                 DPAA2_SEC_ERR("error: Improper packet contents"
1499                                               " for crypto operation");
1500                                 goto skip_tx;
1501                         }
1502                         ops++;
1503                 }
1504                 loop = 0;
1505                 while (loop < frames_to_send) {
1506                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1507                                                         &fd_arr[loop],
1508                                                         &flags[loop],
1509                                                         frames_to_send - loop);
1510                 }
1511
1512                 num_tx += frames_to_send;
1513                 nb_ops -= frames_to_send;
1514         }
1515 skip_tx:
1516         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1517         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1518         return num_tx;
1519 }
1520
1521 static inline struct rte_crypto_op *
1522 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1523 {
1524         struct rte_crypto_op *op;
1525         uint16_t len = DPAA2_GET_FD_LEN(fd);
1526         uint16_t diff = 0;
1527         dpaa2_sec_session *sess_priv;
1528
1529         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1530                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1531                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1532
1533         diff = len - mbuf->pkt_len;
1534         mbuf->pkt_len += diff;
1535         mbuf->data_len += diff;
1536         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1537         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1538         op->sym->aead.digest.phys_addr = 0L;
1539
1540         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1541                                 op->sym->sec_session);
1542         if (sess_priv->dir == DIR_ENC)
1543                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1544         else
1545                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1546
1547         return op;
1548 }
1549
1550 static inline struct rte_crypto_op *
1551 sec_fd_to_mbuf(const struct qbman_fd *fd)
1552 {
1553         struct qbman_fle *fle;
1554         struct rte_crypto_op *op;
1555         struct ctxt_priv *priv;
1556         struct rte_mbuf *dst, *src;
1557
1558         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1559                 return sec_simple_fd_to_mbuf(fd);
1560
1561         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1562
1563         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1564                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1565
1566         /* we are using the first FLE entry to store Mbuf.
1567          * Currently we donot know which FLE has the mbuf stored.
1568          * So while retreiving we can go back 1 FLE from the FD -ADDR
1569          * to get the MBUF Addr from the previous FLE.
1570          * We can have a better approach to use the inline Mbuf
1571          */
1572
1573         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1574                 /* TODO complete it. */
1575                 DPAA2_SEC_ERR("error: non inline buffer");
1576                 return NULL;
1577         }
1578         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1579
1580         /* Prefeth op */
1581         src = op->sym->m_src;
1582         rte_prefetch0(src);
1583
1584         if (op->sym->m_dst) {
1585                 dst = op->sym->m_dst;
1586                 rte_prefetch0(dst);
1587         } else
1588                 dst = src;
1589
1590         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1591                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1592                         get_sec_session_private_data(op->sym->sec_session);
1593                 if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1594                                 sess->ctxt_type == DPAA2_SEC_PDCP) {
1595                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1596                         dst->pkt_len = len;
1597                         while (dst->next != NULL) {
1598                                 len -= dst->data_len;
1599                                 dst = dst->next;
1600                         }
1601                         dst->data_len = len;
1602                 }
1603         }
1604
1605         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1606                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1607                 (void *)dst,
1608                 dst->buf_addr,
1609                 DPAA2_GET_FD_ADDR(fd),
1610                 DPAA2_GET_FD_BPID(fd),
1611                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1612                 DPAA2_GET_FD_OFFSET(fd),
1613                 DPAA2_GET_FD_LEN(fd));
1614
1615         /* free the fle memory */
1616         if (likely(rte_pktmbuf_is_contiguous(src))) {
1617                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1618                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1619         } else
1620                 rte_free((void *)(fle-1));
1621
1622         return op;
1623 }
1624
1625 static uint16_t
1626 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1627                         uint16_t nb_ops)
1628 {
1629         /* Function is responsible to receive frames for a given device and VQ*/
1630         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1631         struct qbman_result *dq_storage;
1632         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1633         int ret, num_rx = 0;
1634         uint8_t is_last = 0, status;
1635         struct qbman_swp *swp;
1636         const struct qbman_fd *fd;
1637         struct qbman_pull_desc pulldesc;
1638
1639         if (!DPAA2_PER_LCORE_DPIO) {
1640                 ret = dpaa2_affine_qbman_swp();
1641                 if (ret) {
1642                         DPAA2_SEC_ERR("Failure in affining portal");
1643                         return 0;
1644                 }
1645         }
1646         swp = DPAA2_PER_LCORE_PORTAL;
1647         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1648
1649         qbman_pull_desc_clear(&pulldesc);
1650         qbman_pull_desc_set_numframes(&pulldesc,
1651                                       (nb_ops > dpaa2_dqrr_size) ?
1652                                       dpaa2_dqrr_size : nb_ops);
1653         qbman_pull_desc_set_fq(&pulldesc, fqid);
1654         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1655                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1656                                     1);
1657
1658         /*Issue a volatile dequeue command. */
1659         while (1) {
1660                 if (qbman_swp_pull(swp, &pulldesc)) {
1661                         DPAA2_SEC_WARN(
1662                                 "SEC VDQ command is not issued : QBMAN busy");
1663                         /* Portal was busy, try again */
1664                         continue;
1665                 }
1666                 break;
1667         };
1668
1669         /* Receive the packets till Last Dequeue entry is found with
1670          * respect to the above issues PULL command.
1671          */
1672         while (!is_last) {
1673                 /* Check if the previous issued command is completed.
1674                  * Also seems like the SWP is shared between the Ethernet Driver
1675                  * and the SEC driver.
1676                  */
1677                 while (!qbman_check_command_complete(dq_storage))
1678                         ;
1679
1680                 /* Loop until the dq_storage is updated with
1681                  * new token by QBMAN
1682                  */
1683                 while (!qbman_check_new_result(dq_storage))
1684                         ;
1685                 /* Check whether Last Pull command is Expired and
1686                  * setting Condition for Loop termination
1687                  */
1688                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1689                         is_last = 1;
1690                         /* Check for valid frame. */
1691                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1692                         if (unlikely(
1693                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1694                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1695                                 continue;
1696                         }
1697                 }
1698
1699                 fd = qbman_result_DQ_fd(dq_storage);
1700                 ops[num_rx] = sec_fd_to_mbuf(fd);
1701
1702                 if (unlikely(fd->simple.frc)) {
1703                         /* TODO Parse SEC errors */
1704                         DPAA2_SEC_ERR("SEC returned Error - %x",
1705                                       fd->simple.frc);
1706                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1707                 } else {
1708                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1709                 }
1710
1711                 num_rx++;
1712                 dq_storage++;
1713         } /* End of Packet Rx loop */
1714
1715         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1716
1717         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1718         /*Return the total number of packets received to DPAA2 app*/
1719         return num_rx;
1720 }
1721
1722 /** Release queue pair */
1723 static int
1724 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1725 {
1726         struct dpaa2_sec_qp *qp =
1727                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1728
1729         PMD_INIT_FUNC_TRACE();
1730
1731         if (qp->rx_vq.q_storage) {
1732                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1733                 rte_free(qp->rx_vq.q_storage);
1734         }
1735         rte_free(qp);
1736
1737         dev->data->queue_pairs[queue_pair_id] = NULL;
1738
1739         return 0;
1740 }
1741
1742 /** Setup a queue pair */
1743 static int
1744 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1745                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1746                 __rte_unused int socket_id)
1747 {
1748         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1749         struct dpaa2_sec_qp *qp;
1750         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1751         struct dpseci_rx_queue_cfg cfg;
1752         int32_t retcode;
1753
1754         PMD_INIT_FUNC_TRACE();
1755
1756         /* If qp is already in use free ring memory and qp metadata. */
1757         if (dev->data->queue_pairs[qp_id] != NULL) {
1758                 DPAA2_SEC_INFO("QP already setup");
1759                 return 0;
1760         }
1761
1762         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1763                     dev, qp_id, qp_conf);
1764
1765         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1766
1767         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1768                         RTE_CACHE_LINE_SIZE);
1769         if (!qp) {
1770                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1771                 return -1;
1772         }
1773
1774         qp->rx_vq.crypto_data = dev->data;
1775         qp->tx_vq.crypto_data = dev->data;
1776         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1777                 sizeof(struct queue_storage_info_t),
1778                 RTE_CACHE_LINE_SIZE);
1779         if (!qp->rx_vq.q_storage) {
1780                 DPAA2_SEC_ERR("malloc failed for q_storage");
1781                 return -1;
1782         }
1783         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1784
1785         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1786                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1787                 return -1;
1788         }
1789
1790         dev->data->queue_pairs[qp_id] = qp;
1791
1792         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1793         cfg.user_ctx = (size_t)(&qp->rx_vq);
1794         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1795                                       qp_id, &cfg);
1796         return retcode;
1797 }
1798
1799 /** Return the number of allocated queue pairs */
1800 static uint32_t
1801 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1802 {
1803         PMD_INIT_FUNC_TRACE();
1804
1805         return dev->data->nb_queue_pairs;
1806 }
1807
1808 /** Returns the size of the aesni gcm session structure */
1809 static unsigned int
1810 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1811 {
1812         PMD_INIT_FUNC_TRACE();
1813
1814         return sizeof(dpaa2_sec_session);
1815 }
1816
1817 static int
1818 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1819                       struct rte_crypto_sym_xform *xform,
1820                       dpaa2_sec_session *session)
1821 {
1822         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1823         struct alginfo cipherdata;
1824         int bufsize, i;
1825         struct ctxt_priv *priv;
1826         struct sec_flow_context *flc;
1827
1828         PMD_INIT_FUNC_TRACE();
1829
1830         /* For SEC CIPHER only one descriptor is required. */
1831         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1832                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1833                         RTE_CACHE_LINE_SIZE);
1834         if (priv == NULL) {
1835                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1836                 return -1;
1837         }
1838
1839         priv->fle_pool = dev_priv->fle_pool;
1840
1841         flc = &priv->flc_desc[0].flc;
1842
1843         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1844                         RTE_CACHE_LINE_SIZE);
1845         if (session->cipher_key.data == NULL) {
1846                 DPAA2_SEC_ERR("No Memory for cipher key");
1847                 rte_free(priv);
1848                 return -1;
1849         }
1850         session->cipher_key.length = xform->cipher.key.length;
1851
1852         memcpy(session->cipher_key.data, xform->cipher.key.data,
1853                xform->cipher.key.length);
1854         cipherdata.key = (size_t)session->cipher_key.data;
1855         cipherdata.keylen = session->cipher_key.length;
1856         cipherdata.key_enc_flags = 0;
1857         cipherdata.key_type = RTA_DATA_IMM;
1858
1859         /* Set IV parameters */
1860         session->iv.offset = xform->cipher.iv.offset;
1861         session->iv.length = xform->cipher.iv.length;
1862         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1863                                 DIR_ENC : DIR_DEC;
1864
1865         switch (xform->cipher.algo) {
1866         case RTE_CRYPTO_CIPHER_AES_CBC:
1867                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1868                 cipherdata.algmode = OP_ALG_AAI_CBC;
1869                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1870                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871                                                 SHR_NEVER, &cipherdata, NULL,
1872                                                 session->iv.length,
1873                                                 session->dir);
1874                 break;
1875         case RTE_CRYPTO_CIPHER_3DES_CBC:
1876                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1877                 cipherdata.algmode = OP_ALG_AAI_CBC;
1878                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1879                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880                                                 SHR_NEVER, &cipherdata, NULL,
1881                                                 session->iv.length,
1882                                                 session->dir);
1883                 break;
1884         case RTE_CRYPTO_CIPHER_AES_CTR:
1885                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1886                 cipherdata.algmode = OP_ALG_AAI_CTR;
1887                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1888                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1889                                                 SHR_NEVER, &cipherdata, NULL,
1890                                                 session->iv.length,
1891                                                 session->dir);
1892                 break;
1893         case RTE_CRYPTO_CIPHER_3DES_CTR:
1894                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1895                 cipherdata.algmode = OP_ALG_AAI_CTR;
1896                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
1897                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1898                                                 SHR_NEVER, &cipherdata, NULL,
1899                                                 session->iv.length,
1900                                                 session->dir);
1901                 break;
1902         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1903                 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1904                 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1905                 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1906                                               &cipherdata,
1907                                               session->dir);
1908                 break;
1909         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1910                 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1911                 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1912                 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1913                                               &cipherdata,
1914                                               session->dir);
1915                 break;
1916         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1917         case RTE_CRYPTO_CIPHER_AES_F8:
1918         case RTE_CRYPTO_CIPHER_AES_ECB:
1919         case RTE_CRYPTO_CIPHER_3DES_ECB:
1920         case RTE_CRYPTO_CIPHER_AES_XTS:
1921         case RTE_CRYPTO_CIPHER_ARC4:
1922         case RTE_CRYPTO_CIPHER_NULL:
1923                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1924                         xform->cipher.algo);
1925                 goto error_out;
1926         default:
1927                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1928                         xform->cipher.algo);
1929                 goto error_out;
1930         }
1931
1932         if (bufsize < 0) {
1933                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1934                 goto error_out;
1935         }
1936
1937         flc->word1_sdl = (uint8_t)bufsize;
1938         session->ctxt = priv;
1939
1940         for (i = 0; i < bufsize; i++)
1941                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1942
1943         return 0;
1944
1945 error_out:
1946         rte_free(session->cipher_key.data);
1947         rte_free(priv);
1948         return -1;
1949 }
1950
1951 static int
1952 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1953                     struct rte_crypto_sym_xform *xform,
1954                     dpaa2_sec_session *session)
1955 {
1956         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1957         struct alginfo authdata;
1958         int bufsize, i;
1959         struct ctxt_priv *priv;
1960         struct sec_flow_context *flc;
1961
1962         PMD_INIT_FUNC_TRACE();
1963
1964         /* For SEC AUTH three descriptors are required for various stages */
1965         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1966                         sizeof(struct ctxt_priv) + 3 *
1967                         sizeof(struct sec_flc_desc),
1968                         RTE_CACHE_LINE_SIZE);
1969         if (priv == NULL) {
1970                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1971                 return -1;
1972         }
1973
1974         priv->fle_pool = dev_priv->fle_pool;
1975         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1976
1977         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1978                         RTE_CACHE_LINE_SIZE);
1979         if (session->auth_key.data == NULL) {
1980                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1981                 rte_free(priv);
1982                 return -1;
1983         }
1984         session->auth_key.length = xform->auth.key.length;
1985
1986         memcpy(session->auth_key.data, xform->auth.key.data,
1987                xform->auth.key.length);
1988         authdata.key = (size_t)session->auth_key.data;
1989         authdata.keylen = session->auth_key.length;
1990         authdata.key_enc_flags = 0;
1991         authdata.key_type = RTA_DATA_IMM;
1992
1993         session->digest_length = xform->auth.digest_length;
1994         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1995                                 DIR_ENC : DIR_DEC;
1996
1997         switch (xform->auth.algo) {
1998         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1999                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2000                 authdata.algmode = OP_ALG_AAI_HMAC;
2001                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2002                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2003                                            1, 0, SHR_NEVER, &authdata,
2004                                            !session->dir,
2005                                            session->digest_length);
2006                 break;
2007         case RTE_CRYPTO_AUTH_MD5_HMAC:
2008                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2009                 authdata.algmode = OP_ALG_AAI_HMAC;
2010                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2011                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2012                                            1, 0, SHR_NEVER, &authdata,
2013                                            !session->dir,
2014                                            session->digest_length);
2015                 break;
2016         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2017                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2018                 authdata.algmode = OP_ALG_AAI_HMAC;
2019                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2020                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2021                                            1, 0, SHR_NEVER, &authdata,
2022                                            !session->dir,
2023                                            session->digest_length);
2024                 break;
2025         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2026                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2027                 authdata.algmode = OP_ALG_AAI_HMAC;
2028                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2029                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2030                                            1, 0, SHR_NEVER, &authdata,
2031                                            !session->dir,
2032                                            session->digest_length);
2033                 break;
2034         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2035                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2036                 authdata.algmode = OP_ALG_AAI_HMAC;
2037                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2038                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2039                                            1, 0, SHR_NEVER, &authdata,
2040                                            !session->dir,
2041                                            session->digest_length);
2042                 break;
2043         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2044                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2045                 authdata.algmode = OP_ALG_AAI_HMAC;
2046                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2047                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2048                                            1, 0, SHR_NEVER, &authdata,
2049                                            !session->dir,
2050                                            session->digest_length);
2051                 break;
2052         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2053                 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2054                 authdata.algmode = OP_ALG_AAI_F9;
2055                 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2056                 session->iv.offset = xform->auth.iv.offset;
2057                 session->iv.length = xform->auth.iv.length;
2058                 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2059                                               1, 0, &authdata,
2060                                               !session->dir,
2061                                               session->digest_length);
2062                 break;
2063         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2064                 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2065                 authdata.algmode = OP_ALG_AAI_F9;
2066                 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2067                 session->iv.offset = xform->auth.iv.offset;
2068                 session->iv.length = xform->auth.iv.length;
2069                 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2070                                            1, 0, &authdata,
2071                                            !session->dir,
2072                                            session->digest_length);
2073                 break;
2074         case RTE_CRYPTO_AUTH_KASUMI_F9:
2075         case RTE_CRYPTO_AUTH_NULL:
2076         case RTE_CRYPTO_AUTH_SHA1:
2077         case RTE_CRYPTO_AUTH_SHA256:
2078         case RTE_CRYPTO_AUTH_SHA512:
2079         case RTE_CRYPTO_AUTH_SHA224:
2080         case RTE_CRYPTO_AUTH_SHA384:
2081         case RTE_CRYPTO_AUTH_MD5:
2082         case RTE_CRYPTO_AUTH_AES_GMAC:
2083         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2084         case RTE_CRYPTO_AUTH_AES_CMAC:
2085         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2086                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2087                               xform->auth.algo);
2088                 goto error_out;
2089         default:
2090                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2091                               xform->auth.algo);
2092                 goto error_out;
2093         }
2094
2095         if (bufsize < 0) {
2096                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2097                 goto error_out;
2098         }
2099
2100         flc->word1_sdl = (uint8_t)bufsize;
2101         session->ctxt = priv;
2102         for (i = 0; i < bufsize; i++)
2103                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2104                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2105
2106
2107         return 0;
2108
2109 error_out:
2110         rte_free(session->auth_key.data);
2111         rte_free(priv);
2112         return -1;
2113 }
2114
2115 static int
2116 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2117                     struct rte_crypto_sym_xform *xform,
2118                     dpaa2_sec_session *session)
2119 {
2120         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2121         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2122         struct alginfo aeaddata;
2123         int bufsize, i;
2124         struct ctxt_priv *priv;
2125         struct sec_flow_context *flc;
2126         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2127         int err;
2128
2129         PMD_INIT_FUNC_TRACE();
2130
2131         /* Set IV parameters */
2132         session->iv.offset = aead_xform->iv.offset;
2133         session->iv.length = aead_xform->iv.length;
2134         session->ctxt_type = DPAA2_SEC_AEAD;
2135
2136         /* For SEC AEAD only one descriptor is required */
2137         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2138                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2139                         RTE_CACHE_LINE_SIZE);
2140         if (priv == NULL) {
2141                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2142                 return -1;
2143         }
2144
2145         priv->fle_pool = dev_priv->fle_pool;
2146         flc = &priv->flc_desc[0].flc;
2147
2148         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2149                                                RTE_CACHE_LINE_SIZE);
2150         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2151                 DPAA2_SEC_ERR("No Memory for aead key");
2152                 rte_free(priv);
2153                 return -1;
2154         }
2155         memcpy(session->aead_key.data, aead_xform->key.data,
2156                aead_xform->key.length);
2157
2158         session->digest_length = aead_xform->digest_length;
2159         session->aead_key.length = aead_xform->key.length;
2160         ctxt->auth_only_len = aead_xform->aad_length;
2161
2162         aeaddata.key = (size_t)session->aead_key.data;
2163         aeaddata.keylen = session->aead_key.length;
2164         aeaddata.key_enc_flags = 0;
2165         aeaddata.key_type = RTA_DATA_IMM;
2166
2167         switch (aead_xform->algo) {
2168         case RTE_CRYPTO_AEAD_AES_GCM:
2169                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2170                 aeaddata.algmode = OP_ALG_AAI_GCM;
2171                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2172                 break;
2173         case RTE_CRYPTO_AEAD_AES_CCM:
2174                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2175                               aead_xform->algo);
2176                 goto error_out;
2177         default:
2178                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2179                               aead_xform->algo);
2180                 goto error_out;
2181         }
2182         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2183                                 DIR_ENC : DIR_DEC;
2184
2185         priv->flc_desc[0].desc[0] = aeaddata.keylen;
2186         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2187                                MIN_JOB_DESC_SIZE,
2188                                (unsigned int *)priv->flc_desc[0].desc,
2189                                &priv->flc_desc[0].desc[1], 1);
2190
2191         if (err < 0) {
2192                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2193                 goto error_out;
2194         }
2195         if (priv->flc_desc[0].desc[1] & 1) {
2196                 aeaddata.key_type = RTA_DATA_IMM;
2197         } else {
2198                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2199                 aeaddata.key_type = RTA_DATA_PTR;
2200         }
2201         priv->flc_desc[0].desc[0] = 0;
2202         priv->flc_desc[0].desc[1] = 0;
2203
2204         if (session->dir == DIR_ENC)
2205                 bufsize = cnstr_shdsc_gcm_encap(
2206                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2207                                 &aeaddata, session->iv.length,
2208                                 session->digest_length);
2209         else
2210                 bufsize = cnstr_shdsc_gcm_decap(
2211                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2212                                 &aeaddata, session->iv.length,
2213                                 session->digest_length);
2214         if (bufsize < 0) {
2215                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2216                 goto error_out;
2217         }
2218
2219         flc->word1_sdl = (uint8_t)bufsize;
2220         session->ctxt = priv;
2221         for (i = 0; i < bufsize; i++)
2222                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2223                             i, priv->flc_desc[0].desc[i]);
2224
2225         return 0;
2226
2227 error_out:
2228         rte_free(session->aead_key.data);
2229         rte_free(priv);
2230         return -1;
2231 }
2232
2233
2234 static int
2235 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2236                     struct rte_crypto_sym_xform *xform,
2237                     dpaa2_sec_session *session)
2238 {
2239         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2240         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2241         struct alginfo authdata, cipherdata;
2242         int bufsize, i;
2243         struct ctxt_priv *priv;
2244         struct sec_flow_context *flc;
2245         struct rte_crypto_cipher_xform *cipher_xform;
2246         struct rte_crypto_auth_xform *auth_xform;
2247         int err;
2248
2249         PMD_INIT_FUNC_TRACE();
2250
2251         if (session->ext_params.aead_ctxt.auth_cipher_text) {
2252                 cipher_xform = &xform->cipher;
2253                 auth_xform = &xform->next->auth;
2254                 session->ctxt_type =
2255                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2256                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2257         } else {
2258                 cipher_xform = &xform->next->cipher;
2259                 auth_xform = &xform->auth;
2260                 session->ctxt_type =
2261                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2262                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2263         }
2264
2265         /* Set IV parameters */
2266         session->iv.offset = cipher_xform->iv.offset;
2267         session->iv.length = cipher_xform->iv.length;
2268
2269         /* For SEC AEAD only one descriptor is required */
2270         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2271                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2272                         RTE_CACHE_LINE_SIZE);
2273         if (priv == NULL) {
2274                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2275                 return -1;
2276         }
2277
2278         priv->fle_pool = dev_priv->fle_pool;
2279         flc = &priv->flc_desc[0].flc;
2280
2281         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2282                                                RTE_CACHE_LINE_SIZE);
2283         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2284                 DPAA2_SEC_ERR("No Memory for cipher key");
2285                 rte_free(priv);
2286                 return -1;
2287         }
2288         session->cipher_key.length = cipher_xform->key.length;
2289         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2290                                              RTE_CACHE_LINE_SIZE);
2291         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2292                 DPAA2_SEC_ERR("No Memory for auth key");
2293                 rte_free(session->cipher_key.data);
2294                 rte_free(priv);
2295                 return -1;
2296         }
2297         session->auth_key.length = auth_xform->key.length;
2298         memcpy(session->cipher_key.data, cipher_xform->key.data,
2299                cipher_xform->key.length);
2300         memcpy(session->auth_key.data, auth_xform->key.data,
2301                auth_xform->key.length);
2302
2303         authdata.key = (size_t)session->auth_key.data;
2304         authdata.keylen = session->auth_key.length;
2305         authdata.key_enc_flags = 0;
2306         authdata.key_type = RTA_DATA_IMM;
2307
2308         session->digest_length = auth_xform->digest_length;
2309
2310         switch (auth_xform->algo) {
2311         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2312                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2313                 authdata.algmode = OP_ALG_AAI_HMAC;
2314                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2315                 break;
2316         case RTE_CRYPTO_AUTH_MD5_HMAC:
2317                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2318                 authdata.algmode = OP_ALG_AAI_HMAC;
2319                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2320                 break;
2321         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2322                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2323                 authdata.algmode = OP_ALG_AAI_HMAC;
2324                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2325                 break;
2326         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2327                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2328                 authdata.algmode = OP_ALG_AAI_HMAC;
2329                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2330                 break;
2331         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2332                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2333                 authdata.algmode = OP_ALG_AAI_HMAC;
2334                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2335                 break;
2336         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2337                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2338                 authdata.algmode = OP_ALG_AAI_HMAC;
2339                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2340                 break;
2341         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2342         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2343         case RTE_CRYPTO_AUTH_NULL:
2344         case RTE_CRYPTO_AUTH_SHA1:
2345         case RTE_CRYPTO_AUTH_SHA256:
2346         case RTE_CRYPTO_AUTH_SHA512:
2347         case RTE_CRYPTO_AUTH_SHA224:
2348         case RTE_CRYPTO_AUTH_SHA384:
2349         case RTE_CRYPTO_AUTH_MD5:
2350         case RTE_CRYPTO_AUTH_AES_GMAC:
2351         case RTE_CRYPTO_AUTH_KASUMI_F9:
2352         case RTE_CRYPTO_AUTH_AES_CMAC:
2353         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2354         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2355                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2356                               auth_xform->algo);
2357                 goto error_out;
2358         default:
2359                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2360                               auth_xform->algo);
2361                 goto error_out;
2362         }
2363         cipherdata.key = (size_t)session->cipher_key.data;
2364         cipherdata.keylen = session->cipher_key.length;
2365         cipherdata.key_enc_flags = 0;
2366         cipherdata.key_type = RTA_DATA_IMM;
2367
2368         switch (cipher_xform->algo) {
2369         case RTE_CRYPTO_CIPHER_AES_CBC:
2370                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2371                 cipherdata.algmode = OP_ALG_AAI_CBC;
2372                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2373                 break;
2374         case RTE_CRYPTO_CIPHER_3DES_CBC:
2375                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2376                 cipherdata.algmode = OP_ALG_AAI_CBC;
2377                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2378                 break;
2379         case RTE_CRYPTO_CIPHER_AES_CTR:
2380                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2381                 cipherdata.algmode = OP_ALG_AAI_CTR;
2382                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2383                 break;
2384         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2385         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2386         case RTE_CRYPTO_CIPHER_NULL:
2387         case RTE_CRYPTO_CIPHER_3DES_ECB:
2388         case RTE_CRYPTO_CIPHER_AES_ECB:
2389         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2390                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2391                               cipher_xform->algo);
2392                 goto error_out;
2393         default:
2394                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2395                               cipher_xform->algo);
2396                 goto error_out;
2397         }
2398         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2399                                 DIR_ENC : DIR_DEC;
2400
2401         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2402         priv->flc_desc[0].desc[1] = authdata.keylen;
2403         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2404                                MIN_JOB_DESC_SIZE,
2405                                (unsigned int *)priv->flc_desc[0].desc,
2406                                &priv->flc_desc[0].desc[2], 2);
2407
2408         if (err < 0) {
2409                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2410                 goto error_out;
2411         }
2412         if (priv->flc_desc[0].desc[2] & 1) {
2413                 cipherdata.key_type = RTA_DATA_IMM;
2414         } else {
2415                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2416                 cipherdata.key_type = RTA_DATA_PTR;
2417         }
2418         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2419                 authdata.key_type = RTA_DATA_IMM;
2420         } else {
2421                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2422                 authdata.key_type = RTA_DATA_PTR;
2423         }
2424         priv->flc_desc[0].desc[0] = 0;
2425         priv->flc_desc[0].desc[1] = 0;
2426         priv->flc_desc[0].desc[2] = 0;
2427
2428         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2429                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2430                                               0, SHR_SERIAL,
2431                                               &cipherdata, &authdata,
2432                                               session->iv.length,
2433                                               ctxt->auth_only_len,
2434                                               session->digest_length,
2435                                               session->dir);
2436                 if (bufsize < 0) {
2437                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2438                         goto error_out;
2439                 }
2440         } else {
2441                 DPAA2_SEC_ERR("Hash before cipher not supported");
2442                 goto error_out;
2443         }
2444
2445         flc->word1_sdl = (uint8_t)bufsize;
2446         session->ctxt = priv;
2447         for (i = 0; i < bufsize; i++)
2448                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2449                             i, priv->flc_desc[0].desc[i]);
2450
2451         return 0;
2452
2453 error_out:
2454         rte_free(session->cipher_key.data);
2455         rte_free(session->auth_key.data);
2456         rte_free(priv);
2457         return -1;
2458 }
2459
2460 static int
2461 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2462                             struct rte_crypto_sym_xform *xform, void *sess)
2463 {
2464         dpaa2_sec_session *session = sess;
2465         int ret;
2466
2467         PMD_INIT_FUNC_TRACE();
2468
2469         if (unlikely(sess == NULL)) {
2470                 DPAA2_SEC_ERR("Invalid session struct");
2471                 return -1;
2472         }
2473
2474         memset(session, 0, sizeof(dpaa2_sec_session));
2475         /* Default IV length = 0 */
2476         session->iv.length = 0;
2477
2478         /* Cipher Only */
2479         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2480                 session->ctxt_type = DPAA2_SEC_CIPHER;
2481                 ret = dpaa2_sec_cipher_init(dev, xform, session);
2482
2483         /* Authentication Only */
2484         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2485                    xform->next == NULL) {
2486                 session->ctxt_type = DPAA2_SEC_AUTH;
2487                 ret = dpaa2_sec_auth_init(dev, xform, session);
2488
2489         /* Cipher then Authenticate */
2490         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2491                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2492                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2493                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2494
2495         /* Authenticate then Cipher */
2496         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2497                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2498                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2499                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2500
2501         /* AEAD operation for AES-GCM kind of Algorithms */
2502         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2503                    xform->next == NULL) {
2504                 ret = dpaa2_sec_aead_init(dev, xform, session);
2505
2506         } else {
2507                 DPAA2_SEC_ERR("Invalid crypto type");
2508                 return -EINVAL;
2509         }
2510
2511         return ret;
2512 }
2513
2514 static int
2515 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2516                         dpaa2_sec_session *session,
2517                         struct alginfo *aeaddata)
2518 {
2519         PMD_INIT_FUNC_TRACE();
2520
2521         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2522                                                RTE_CACHE_LINE_SIZE);
2523         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2524                 DPAA2_SEC_ERR("No Memory for aead key");
2525                 return -1;
2526         }
2527         memcpy(session->aead_key.data, aead_xform->key.data,
2528                aead_xform->key.length);
2529
2530         session->digest_length = aead_xform->digest_length;
2531         session->aead_key.length = aead_xform->key.length;
2532
2533         aeaddata->key = (size_t)session->aead_key.data;
2534         aeaddata->keylen = session->aead_key.length;
2535         aeaddata->key_enc_flags = 0;
2536         aeaddata->key_type = RTA_DATA_IMM;
2537
2538         switch (aead_xform->algo) {
2539         case RTE_CRYPTO_AEAD_AES_GCM:
2540                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2541                 aeaddata->algmode = OP_ALG_AAI_GCM;
2542                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2543                 break;
2544         case RTE_CRYPTO_AEAD_AES_CCM:
2545                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2546                 aeaddata->algmode = OP_ALG_AAI_CCM;
2547                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2548                 break;
2549         default:
2550                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2551                               aead_xform->algo);
2552                 return -1;
2553         }
2554         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2555                                 DIR_ENC : DIR_DEC;
2556
2557         return 0;
2558 }
2559
2560 static int
2561 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2562         struct rte_crypto_auth_xform *auth_xform,
2563         dpaa2_sec_session *session,
2564         struct alginfo *cipherdata,
2565         struct alginfo *authdata)
2566 {
2567         if (cipher_xform) {
2568                 session->cipher_key.data = rte_zmalloc(NULL,
2569                                                        cipher_xform->key.length,
2570                                                        RTE_CACHE_LINE_SIZE);
2571                 if (session->cipher_key.data == NULL &&
2572                                 cipher_xform->key.length > 0) {
2573                         DPAA2_SEC_ERR("No Memory for cipher key");
2574                         return -ENOMEM;
2575                 }
2576
2577                 session->cipher_key.length = cipher_xform->key.length;
2578                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2579                                 cipher_xform->key.length);
2580                 session->cipher_alg = cipher_xform->algo;
2581         } else {
2582                 session->cipher_key.data = NULL;
2583                 session->cipher_key.length = 0;
2584                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2585         }
2586
2587         if (auth_xform) {
2588                 session->auth_key.data = rte_zmalloc(NULL,
2589                                                 auth_xform->key.length,
2590                                                 RTE_CACHE_LINE_SIZE);
2591                 if (session->auth_key.data == NULL &&
2592                                 auth_xform->key.length > 0) {
2593                         DPAA2_SEC_ERR("No Memory for auth key");
2594                         return -ENOMEM;
2595                 }
2596                 session->auth_key.length = auth_xform->key.length;
2597                 memcpy(session->auth_key.data, auth_xform->key.data,
2598                                 auth_xform->key.length);
2599                 session->auth_alg = auth_xform->algo;
2600         } else {
2601                 session->auth_key.data = NULL;
2602                 session->auth_key.length = 0;
2603                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2604         }
2605
2606         authdata->key = (size_t)session->auth_key.data;
2607         authdata->keylen = session->auth_key.length;
2608         authdata->key_enc_flags = 0;
2609         authdata->key_type = RTA_DATA_IMM;
2610         switch (session->auth_alg) {
2611         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2612                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2613                 authdata->algmode = OP_ALG_AAI_HMAC;
2614                 break;
2615         case RTE_CRYPTO_AUTH_MD5_HMAC:
2616                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2617                 authdata->algmode = OP_ALG_AAI_HMAC;
2618                 break;
2619         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2620                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2621                 authdata->algmode = OP_ALG_AAI_HMAC;
2622                 break;
2623         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2624                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2625                 authdata->algmode = OP_ALG_AAI_HMAC;
2626                 break;
2627         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2628                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2629                 authdata->algmode = OP_ALG_AAI_HMAC;
2630                 break;
2631         case RTE_CRYPTO_AUTH_AES_CMAC:
2632                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2633                 break;
2634         case RTE_CRYPTO_AUTH_NULL:
2635                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2636                 break;
2637         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2638         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2639         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2640         case RTE_CRYPTO_AUTH_SHA1:
2641         case RTE_CRYPTO_AUTH_SHA256:
2642         case RTE_CRYPTO_AUTH_SHA512:
2643         case RTE_CRYPTO_AUTH_SHA224:
2644         case RTE_CRYPTO_AUTH_SHA384:
2645         case RTE_CRYPTO_AUTH_MD5:
2646         case RTE_CRYPTO_AUTH_AES_GMAC:
2647         case RTE_CRYPTO_AUTH_KASUMI_F9:
2648         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2649         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2650                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2651                               session->auth_alg);
2652                 return -1;
2653         default:
2654                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2655                               session->auth_alg);
2656                 return -1;
2657         }
2658         cipherdata->key = (size_t)session->cipher_key.data;
2659         cipherdata->keylen = session->cipher_key.length;
2660         cipherdata->key_enc_flags = 0;
2661         cipherdata->key_type = RTA_DATA_IMM;
2662
2663         switch (session->cipher_alg) {
2664         case RTE_CRYPTO_CIPHER_AES_CBC:
2665                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2666                 cipherdata->algmode = OP_ALG_AAI_CBC;
2667                 break;
2668         case RTE_CRYPTO_CIPHER_3DES_CBC:
2669                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2670                 cipherdata->algmode = OP_ALG_AAI_CBC;
2671                 break;
2672         case RTE_CRYPTO_CIPHER_AES_CTR:
2673                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2674                 cipherdata->algmode = OP_ALG_AAI_CTR;
2675                 break;
2676         case RTE_CRYPTO_CIPHER_NULL:
2677                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2678                 break;
2679         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2680         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2681         case RTE_CRYPTO_CIPHER_3DES_ECB:
2682         case RTE_CRYPTO_CIPHER_AES_ECB:
2683         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2684                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2685                               session->cipher_alg);
2686                 return -1;
2687         default:
2688                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2689                               session->cipher_alg);
2690                 return -1;
2691         }
2692
2693         return 0;
2694 }
2695
2696 #ifdef RTE_LIBRTE_SECURITY_TEST
2697 static uint8_t aes_cbc_iv[] = {
2698         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2699         0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2700 #endif
2701
2702 static int
2703 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2704                             struct rte_security_session_conf *conf,
2705                             void *sess)
2706 {
2707         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2708         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2709         struct rte_crypto_auth_xform *auth_xform = NULL;
2710         struct rte_crypto_aead_xform *aead_xform = NULL;
2711         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2712         struct ctxt_priv *priv;
2713         struct ipsec_encap_pdb encap_pdb;
2714         struct ipsec_decap_pdb decap_pdb;
2715         struct alginfo authdata, cipherdata;
2716         int bufsize;
2717         struct sec_flow_context *flc;
2718         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2719         int ret = -1;
2720
2721         PMD_INIT_FUNC_TRACE();
2722
2723         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2724                                 sizeof(struct ctxt_priv) +
2725                                 sizeof(struct sec_flc_desc),
2726                                 RTE_CACHE_LINE_SIZE);
2727
2728         if (priv == NULL) {
2729                 DPAA2_SEC_ERR("No memory for priv CTXT");
2730                 return -ENOMEM;
2731         }
2732
2733         priv->fle_pool = dev_priv->fle_pool;
2734         flc = &priv->flc_desc[0].flc;
2735
2736         memset(session, 0, sizeof(dpaa2_sec_session));
2737
2738         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2739                 cipher_xform = &conf->crypto_xform->cipher;
2740                 if (conf->crypto_xform->next)
2741                         auth_xform = &conf->crypto_xform->next->auth;
2742                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2743                                         session, &cipherdata, &authdata);
2744         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2745                 auth_xform = &conf->crypto_xform->auth;
2746                 if (conf->crypto_xform->next)
2747                         cipher_xform = &conf->crypto_xform->next->cipher;
2748                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2749                                         session, &cipherdata, &authdata);
2750         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2751                 aead_xform = &conf->crypto_xform->aead;
2752                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2753                                         session, &cipherdata);
2754         } else {
2755                 DPAA2_SEC_ERR("XFORM not specified");
2756                 ret = -EINVAL;
2757                 goto out;
2758         }
2759         if (ret) {
2760                 DPAA2_SEC_ERR("Failed to process xform");
2761                 goto out;
2762         }
2763
2764         session->ctxt_type = DPAA2_SEC_IPSEC;
2765         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2766                 uint8_t *hdr = NULL;
2767                 struct ip ip4_hdr;
2768                 struct rte_ipv6_hdr ip6_hdr;
2769
2770                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2771                 /* For Sec Proto only one descriptor is required. */
2772                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2773                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2774                         PDBOPTS_ESP_OIHI_PDB_INL |
2775                         PDBOPTS_ESP_IVSRC |
2776                         PDBHMO_ESP_ENCAP_DTTL |
2777                         PDBHMO_ESP_SNR;
2778                 if (ipsec_xform->options.esn)
2779                         encap_pdb.options |= PDBOPTS_ESP_ESN;
2780                 encap_pdb.spi = ipsec_xform->spi;
2781                 session->dir = DIR_ENC;
2782                 if (ipsec_xform->tunnel.type ==
2783                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2784                         encap_pdb.ip_hdr_len = sizeof(struct ip);
2785                         ip4_hdr.ip_v = IPVERSION;
2786                         ip4_hdr.ip_hl = 5;
2787                         ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2788                         ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2789                         ip4_hdr.ip_id = 0;
2790                         ip4_hdr.ip_off = 0;
2791                         ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2792                         ip4_hdr.ip_p = IPPROTO_ESP;
2793                         ip4_hdr.ip_sum = 0;
2794                         ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2795                         ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2796                         ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2797                                         &ip4_hdr, sizeof(struct ip));
2798                         hdr = (uint8_t *)&ip4_hdr;
2799                 } else if (ipsec_xform->tunnel.type ==
2800                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2801                         ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2802                                 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2803                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2804                                         RTE_IPV6_HDR_TC_SHIFT) &
2805                                         RTE_IPV6_HDR_TC_MASK) |
2806                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2807                                         RTE_IPV6_HDR_FL_SHIFT) &
2808                                         RTE_IPV6_HDR_FL_MASK));
2809                         /* Payload length will be updated by HW */
2810                         ip6_hdr.payload_len = 0;
2811                         ip6_hdr.hop_limits =
2812                                         ipsec_xform->tunnel.ipv6.hlimit;
2813                         ip6_hdr.proto = (ipsec_xform->proto ==
2814                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2815                                         IPPROTO_ESP : IPPROTO_AH;
2816                         memcpy(&ip6_hdr.src_addr,
2817                                 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2818                         memcpy(&ip6_hdr.dst_addr,
2819                                 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2820                         encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2821                         hdr = (uint8_t *)&ip6_hdr;
2822                 }
2823
2824                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2825                                 1, 0, SHR_SERIAL, &encap_pdb,
2826                                 hdr, &cipherdata, &authdata);
2827         } else if (ipsec_xform->direction ==
2828                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2829                 flc->dhr = SEC_FLC_DHR_INBOUND;
2830                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2831                 decap_pdb.options = sizeof(struct ip) << 16;
2832                 if (ipsec_xform->options.esn)
2833                         decap_pdb.options |= PDBOPTS_ESP_ESN;
2834                 decap_pdb.options = (ipsec_xform->tunnel.type ==
2835                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2836                                 sizeof(struct ip) << 16 :
2837                                 sizeof(struct rte_ipv6_hdr) << 16;
2838                 session->dir = DIR_DEC;
2839                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2840                                 1, 0, SHR_SERIAL,
2841                                 &decap_pdb, &cipherdata, &authdata);
2842         } else
2843                 goto out;
2844
2845         if (bufsize < 0) {
2846                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2847                 goto out;
2848         }
2849
2850         flc->word1_sdl = (uint8_t)bufsize;
2851
2852         /* Enable the stashing control bit */
2853         DPAA2_SET_FLC_RSC(flc);
2854         flc->word2_rflc_31_0 = lower_32_bits(
2855                         (size_t)&(((struct dpaa2_sec_qp *)
2856                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2857         flc->word3_rflc_63_32 = upper_32_bits(
2858                         (size_t)&(((struct dpaa2_sec_qp *)
2859                         dev->data->queue_pairs[0])->rx_vq));
2860
2861         /* Set EWS bit i.e. enable write-safe */
2862         DPAA2_SET_FLC_EWS(flc);
2863         /* Set BS = 1 i.e reuse input buffers as output buffers */
2864         DPAA2_SET_FLC_REUSE_BS(flc);
2865         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2866         DPAA2_SET_FLC_REUSE_FF(flc);
2867
2868         session->ctxt = priv;
2869
2870         return 0;
2871 out:
2872         rte_free(session->auth_key.data);
2873         rte_free(session->cipher_key.data);
2874         rte_free(priv);
2875         return ret;
2876 }
2877
2878 static int
2879 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2880                            struct rte_security_session_conf *conf,
2881                            void *sess)
2882 {
2883         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2884         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2885         struct rte_crypto_auth_xform *auth_xform = NULL;
2886         struct rte_crypto_cipher_xform *cipher_xform;
2887         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2888         struct ctxt_priv *priv;
2889         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2890         struct alginfo authdata, cipherdata;
2891         struct alginfo *p_authdata = NULL;
2892         int bufsize = -1;
2893         struct sec_flow_context *flc;
2894 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2895         int swap = true;
2896 #else
2897         int swap = false;
2898 #endif
2899
2900         PMD_INIT_FUNC_TRACE();
2901
2902         memset(session, 0, sizeof(dpaa2_sec_session));
2903
2904         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2905                                 sizeof(struct ctxt_priv) +
2906                                 sizeof(struct sec_flc_desc),
2907                                 RTE_CACHE_LINE_SIZE);
2908
2909         if (priv == NULL) {
2910                 DPAA2_SEC_ERR("No memory for priv CTXT");
2911                 return -ENOMEM;
2912         }
2913
2914         priv->fle_pool = dev_priv->fle_pool;
2915         flc = &priv->flc_desc[0].flc;
2916
2917         /* find xfrm types */
2918         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2919                 cipher_xform = &xform->cipher;
2920         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2921                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2922                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2923                 cipher_xform = &xform->cipher;
2924                 auth_xform = &xform->next->auth;
2925         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2926                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2927                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2928                 cipher_xform = &xform->next->cipher;
2929                 auth_xform = &xform->auth;
2930         } else {
2931                 DPAA2_SEC_ERR("Invalid crypto type");
2932                 return -EINVAL;
2933         }
2934
2935         session->ctxt_type = DPAA2_SEC_PDCP;
2936         if (cipher_xform) {
2937                 session->cipher_key.data = rte_zmalloc(NULL,
2938                                                cipher_xform->key.length,
2939                                                RTE_CACHE_LINE_SIZE);
2940                 if (session->cipher_key.data == NULL &&
2941                                 cipher_xform->key.length > 0) {
2942                         DPAA2_SEC_ERR("No Memory for cipher key");
2943                         rte_free(priv);
2944                         return -ENOMEM;
2945                 }
2946                 session->cipher_key.length = cipher_xform->key.length;
2947                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2948                         cipher_xform->key.length);
2949                 session->dir =
2950                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2951                                         DIR_ENC : DIR_DEC;
2952                 session->cipher_alg = cipher_xform->algo;
2953         } else {
2954                 session->cipher_key.data = NULL;
2955                 session->cipher_key.length = 0;
2956                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2957                 session->dir = DIR_ENC;
2958         }
2959
2960         session->pdcp.domain = pdcp_xform->domain;
2961         session->pdcp.bearer = pdcp_xform->bearer;
2962         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2963         session->pdcp.sn_size = pdcp_xform->sn_size;
2964         session->pdcp.hfn = pdcp_xform->hfn;
2965         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2966         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2967         /* hfv ovd offset location is stored in iv.offset value*/
2968         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2969
2970         cipherdata.key = (size_t)session->cipher_key.data;
2971         cipherdata.keylen = session->cipher_key.length;
2972         cipherdata.key_enc_flags = 0;
2973         cipherdata.key_type = RTA_DATA_IMM;
2974
2975         switch (session->cipher_alg) {
2976         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2977                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2978                 break;
2979         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2980                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2981                 break;
2982         case RTE_CRYPTO_CIPHER_AES_CTR:
2983                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2984                 break;
2985         case RTE_CRYPTO_CIPHER_NULL:
2986                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2987                 break;
2988         default:
2989                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2990                               session->cipher_alg);
2991                 goto out;
2992         }
2993
2994         if (auth_xform) {
2995                 session->auth_key.data = rte_zmalloc(NULL,
2996                                                      auth_xform->key.length,
2997                                                      RTE_CACHE_LINE_SIZE);
2998                 if (!session->auth_key.data &&
2999                     auth_xform->key.length > 0) {
3000                         DPAA2_SEC_ERR("No Memory for auth key");
3001                         rte_free(session->cipher_key.data);
3002                         rte_free(priv);
3003                         return -ENOMEM;
3004                 }
3005                 session->auth_key.length = auth_xform->key.length;
3006                 memcpy(session->auth_key.data, auth_xform->key.data,
3007                        auth_xform->key.length);
3008                 session->auth_alg = auth_xform->algo;
3009         } else {
3010                 session->auth_key.data = NULL;
3011                 session->auth_key.length = 0;
3012                 session->auth_alg = 0;
3013         }
3014         authdata.key = (size_t)session->auth_key.data;
3015         authdata.keylen = session->auth_key.length;
3016         authdata.key_enc_flags = 0;
3017         authdata.key_type = RTA_DATA_IMM;
3018
3019         if (session->auth_alg) {
3020                 switch (session->auth_alg) {
3021                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3022                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3023                         break;
3024                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3025                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3026                         break;
3027                 case RTE_CRYPTO_AUTH_AES_CMAC:
3028                         authdata.algtype = PDCP_AUTH_TYPE_AES;
3029                         break;
3030                 case RTE_CRYPTO_AUTH_NULL:
3031                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
3032                         break;
3033                 default:
3034                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3035                                       session->auth_alg);
3036                         goto out;
3037                 }
3038
3039                 p_authdata = &authdata;
3040         } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3041                 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3042                 goto out;
3043         }
3044
3045         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3046                 if (session->dir == DIR_ENC)
3047                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3048                                         priv->flc_desc[0].desc, 1, swap,
3049                                         pdcp_xform->hfn,
3050                                         session->pdcp.sn_size,
3051                                         pdcp_xform->bearer,
3052                                         pdcp_xform->pkt_dir,
3053                                         pdcp_xform->hfn_threshold,
3054                                         &cipherdata, &authdata,
3055                                         0);
3056                 else if (session->dir == DIR_DEC)
3057                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3058                                         priv->flc_desc[0].desc, 1, swap,
3059                                         pdcp_xform->hfn,
3060                                         session->pdcp.sn_size,
3061                                         pdcp_xform->bearer,
3062                                         pdcp_xform->pkt_dir,
3063                                         pdcp_xform->hfn_threshold,
3064                                         &cipherdata, &authdata,
3065                                         0);
3066         } else {
3067                 if (session->dir == DIR_ENC)
3068                         bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3069                                         priv->flc_desc[0].desc, 1, swap,
3070                                         session->pdcp.sn_size,
3071                                         pdcp_xform->hfn,
3072                                         pdcp_xform->bearer,
3073                                         pdcp_xform->pkt_dir,
3074                                         pdcp_xform->hfn_threshold,
3075                                         &cipherdata, p_authdata, 0);
3076                 else if (session->dir == DIR_DEC)
3077                         bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3078                                         priv->flc_desc[0].desc, 1, swap,
3079                                         session->pdcp.sn_size,
3080                                         pdcp_xform->hfn,
3081                                         pdcp_xform->bearer,
3082                                         pdcp_xform->pkt_dir,
3083                                         pdcp_xform->hfn_threshold,
3084                                         &cipherdata, p_authdata, 0);
3085         }
3086
3087         if (bufsize < 0) {
3088                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3089                 goto out;
3090         }
3091
3092         /* Enable the stashing control bit */
3093         DPAA2_SET_FLC_RSC(flc);
3094         flc->word2_rflc_31_0 = lower_32_bits(
3095                         (size_t)&(((struct dpaa2_sec_qp *)
3096                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
3097         flc->word3_rflc_63_32 = upper_32_bits(
3098                         (size_t)&(((struct dpaa2_sec_qp *)
3099                         dev->data->queue_pairs[0])->rx_vq));
3100
3101         flc->word1_sdl = (uint8_t)bufsize;
3102
3103         /* TODO - check the perf impact or
3104          * align as per descriptor type
3105          * Set EWS bit i.e. enable write-safe
3106          * DPAA2_SET_FLC_EWS(flc);
3107          */
3108
3109         /* Set BS = 1 i.e reuse input buffers as output buffers */
3110         DPAA2_SET_FLC_REUSE_BS(flc);
3111         /* Set FF = 10; reuse input buffers if they provide sufficient space */
3112         DPAA2_SET_FLC_REUSE_FF(flc);
3113
3114         session->ctxt = priv;
3115
3116         return 0;
3117 out:
3118         rte_free(session->auth_key.data);
3119         rte_free(session->cipher_key.data);
3120         rte_free(priv);
3121         return -1;
3122 }
3123
3124 static int
3125 dpaa2_sec_security_session_create(void *dev,
3126                                   struct rte_security_session_conf *conf,
3127                                   struct rte_security_session *sess,
3128                                   struct rte_mempool *mempool)
3129 {
3130         void *sess_private_data;
3131         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3132         int ret;
3133
3134         if (rte_mempool_get(mempool, &sess_private_data)) {
3135                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3136                 return -ENOMEM;
3137         }
3138
3139         switch (conf->protocol) {
3140         case RTE_SECURITY_PROTOCOL_IPSEC:
3141                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3142                                 sess_private_data);
3143                 break;
3144         case RTE_SECURITY_PROTOCOL_MACSEC:
3145                 return -ENOTSUP;
3146         case RTE_SECURITY_PROTOCOL_PDCP:
3147                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3148                                 sess_private_data);
3149                 break;
3150         default:
3151                 return -EINVAL;
3152         }
3153         if (ret != 0) {
3154                 DPAA2_SEC_ERR("Failed to configure session parameters");
3155                 /* Return session to mempool */
3156                 rte_mempool_put(mempool, sess_private_data);
3157                 return ret;
3158         }
3159
3160         set_sec_session_private_data(sess, sess_private_data);
3161
3162         return ret;
3163 }
3164
3165 /** Clear the memory of session so it doesn't leave key material behind */
3166 static int
3167 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3168                 struct rte_security_session *sess)
3169 {
3170         PMD_INIT_FUNC_TRACE();
3171         void *sess_priv = get_sec_session_private_data(sess);
3172
3173         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3174
3175         if (sess_priv) {
3176                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3177
3178                 rte_free(s->ctxt);
3179                 rte_free(s->cipher_key.data);
3180                 rte_free(s->auth_key.data);
3181                 memset(s, 0, sizeof(dpaa2_sec_session));
3182                 set_sec_session_private_data(sess, NULL);
3183                 rte_mempool_put(sess_mp, sess_priv);
3184         }
3185         return 0;
3186 }
3187
3188 static int
3189 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3190                 struct rte_crypto_sym_xform *xform,
3191                 struct rte_cryptodev_sym_session *sess,
3192                 struct rte_mempool *mempool)
3193 {
3194         void *sess_private_data;
3195         int ret;
3196
3197         if (rte_mempool_get(mempool, &sess_private_data)) {
3198                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3199                 return -ENOMEM;
3200         }
3201
3202         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3203         if (ret != 0) {
3204                 DPAA2_SEC_ERR("Failed to configure session parameters");
3205                 /* Return session to mempool */
3206                 rte_mempool_put(mempool, sess_private_data);
3207                 return ret;
3208         }
3209
3210         set_sym_session_private_data(sess, dev->driver_id,
3211                 sess_private_data);
3212
3213         return 0;
3214 }
3215
3216 /** Clear the memory of session so it doesn't leave key material behind */
3217 static void
3218 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3219                 struct rte_cryptodev_sym_session *sess)
3220 {
3221         PMD_INIT_FUNC_TRACE();
3222         uint8_t index = dev->driver_id;
3223         void *sess_priv = get_sym_session_private_data(sess, index);
3224         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3225
3226         if (sess_priv) {
3227                 rte_free(s->ctxt);
3228                 rte_free(s->cipher_key.data);
3229                 rte_free(s->auth_key.data);
3230                 memset(s, 0, sizeof(dpaa2_sec_session));
3231                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3232                 set_sym_session_private_data(sess, index, NULL);
3233                 rte_mempool_put(sess_mp, sess_priv);
3234         }
3235 }
3236
3237 static int
3238 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3239                         struct rte_cryptodev_config *config __rte_unused)
3240 {
3241         PMD_INIT_FUNC_TRACE();
3242
3243         return 0;
3244 }
3245
3246 static int
3247 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3248 {
3249         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3250         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3251         struct dpseci_attr attr;
3252         struct dpaa2_queue *dpaa2_q;
3253         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3254                                         dev->data->queue_pairs;
3255         struct dpseci_rx_queue_attr rx_attr;
3256         struct dpseci_tx_queue_attr tx_attr;
3257         int ret, i;
3258
3259         PMD_INIT_FUNC_TRACE();
3260
3261         memset(&attr, 0, sizeof(struct dpseci_attr));
3262
3263         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3264         if (ret) {
3265                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3266                               priv->hw_id);
3267                 goto get_attr_failure;
3268         }
3269         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3270         if (ret) {
3271                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3272                 goto get_attr_failure;
3273         }
3274         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3275                 dpaa2_q = &qp[i]->rx_vq;
3276                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3277                                     &rx_attr);
3278                 dpaa2_q->fqid = rx_attr.fqid;
3279                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3280         }
3281         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3282                 dpaa2_q = &qp[i]->tx_vq;
3283                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3284                                     &tx_attr);
3285                 dpaa2_q->fqid = tx_attr.fqid;
3286                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3287         }
3288
3289         return 0;
3290 get_attr_failure:
3291         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3292         return -1;
3293 }
3294
3295 static void
3296 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3297 {
3298         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3299         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3300         int ret;
3301
3302         PMD_INIT_FUNC_TRACE();
3303
3304         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3305         if (ret) {
3306                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3307                              priv->hw_id);
3308                 return;
3309         }
3310
3311         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3312         if (ret < 0) {
3313                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3314                 return;
3315         }
3316 }
3317
3318 static int
3319 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3320 {
3321         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3322         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3323         int ret;
3324
3325         PMD_INIT_FUNC_TRACE();
3326
3327         /* Function is reverse of dpaa2_sec_dev_init.
3328          * It does the following:
3329          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3330          * 2. Close the DPSECI device
3331          * 3. Free the allocated resources.
3332          */
3333
3334         /*Close the device at underlying layer*/
3335         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3336         if (ret) {
3337                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3338                 return -1;
3339         }
3340
3341         /*Free the allocated memory for ethernet private data and dpseci*/
3342         priv->hw = NULL;
3343         rte_free(dpseci);
3344
3345         return 0;
3346 }
3347
3348 static void
3349 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3350                         struct rte_cryptodev_info *info)
3351 {
3352         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3353
3354         PMD_INIT_FUNC_TRACE();
3355         if (info != NULL) {
3356                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3357                 info->feature_flags = dev->feature_flags;
3358                 info->capabilities = dpaa2_sec_capabilities;
3359                 /* No limit of number of sessions */
3360                 info->sym.max_nb_sessions = 0;
3361                 info->driver_id = cryptodev_driver_id;
3362         }
3363 }
3364
3365 static
3366 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3367                          struct rte_cryptodev_stats *stats)
3368 {
3369         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3370         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3371         struct dpseci_sec_counters counters = {0};
3372         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3373                                         dev->data->queue_pairs;
3374         int ret, i;
3375
3376         PMD_INIT_FUNC_TRACE();
3377         if (stats == NULL) {
3378                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3379                 return;
3380         }
3381         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3382                 if (qp[i] == NULL) {
3383                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3384                         continue;
3385                 }
3386
3387                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3388                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3389                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3390                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3391         }
3392
3393         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3394                                       &counters);
3395         if (ret) {
3396                 DPAA2_SEC_ERR("SEC counters failed");
3397         } else {
3398                 DPAA2_SEC_INFO("dpseci hardware stats:"
3399                             "\n\tNum of Requests Dequeued = %" PRIu64
3400                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3401                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3402                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3403                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3404                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3405                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3406                             counters.dequeued_requests,
3407                             counters.ob_enc_requests,
3408                             counters.ib_dec_requests,
3409                             counters.ob_enc_bytes,
3410                             counters.ob_prot_bytes,
3411                             counters.ib_dec_bytes,
3412                             counters.ib_valid_bytes);
3413         }
3414 }
3415
3416 static
3417 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3418 {
3419         int i;
3420         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3421                                    (dev->data->queue_pairs);
3422
3423         PMD_INIT_FUNC_TRACE();
3424
3425         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3426                 if (qp[i] == NULL) {
3427                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3428                         continue;
3429                 }
3430                 qp[i]->tx_vq.rx_pkts = 0;
3431                 qp[i]->tx_vq.tx_pkts = 0;
3432                 qp[i]->tx_vq.err_pkts = 0;
3433                 qp[i]->rx_vq.rx_pkts = 0;
3434                 qp[i]->rx_vq.tx_pkts = 0;
3435                 qp[i]->rx_vq.err_pkts = 0;
3436         }
3437 }
3438
3439 static void __attribute__((hot))
3440 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3441                                  const struct qbman_fd *fd,
3442                                  const struct qbman_result *dq,
3443                                  struct dpaa2_queue *rxq,
3444                                  struct rte_event *ev)
3445 {
3446         /* Prefetching mbuf */
3447         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3448                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3449
3450         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3451         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3452
3453         ev->flow_id = rxq->ev.flow_id;
3454         ev->sub_event_type = rxq->ev.sub_event_type;
3455         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3456         ev->op = RTE_EVENT_OP_NEW;
3457         ev->sched_type = rxq->ev.sched_type;
3458         ev->queue_id = rxq->ev.queue_id;
3459         ev->priority = rxq->ev.priority;
3460         ev->event_ptr = sec_fd_to_mbuf(fd);
3461
3462         qbman_swp_dqrr_consume(swp, dq);
3463 }
3464 static void
3465 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3466                                  const struct qbman_fd *fd,
3467                                  const struct qbman_result *dq,
3468                                  struct dpaa2_queue *rxq,
3469                                  struct rte_event *ev)
3470 {
3471         uint8_t dqrr_index;
3472         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3473         /* Prefetching mbuf */
3474         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3475                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3476
3477         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3478         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3479
3480         ev->flow_id = rxq->ev.flow_id;
3481         ev->sub_event_type = rxq->ev.sub_event_type;
3482         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3483         ev->op = RTE_EVENT_OP_NEW;
3484         ev->sched_type = rxq->ev.sched_type;
3485         ev->queue_id = rxq->ev.queue_id;
3486         ev->priority = rxq->ev.priority;
3487
3488         ev->event_ptr = sec_fd_to_mbuf(fd);
3489         dqrr_index = qbman_get_dqrr_idx(dq);
3490         crypto_op->sym->m_src->seqn = dqrr_index + 1;
3491         DPAA2_PER_LCORE_DQRR_SIZE++;
3492         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3493         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3494 }
3495
3496 int
3497 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3498                 int qp_id,
3499                 uint16_t dpcon_id,
3500                 const struct rte_event *event)
3501 {
3502         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3503         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3504         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3505         struct dpseci_rx_queue_cfg cfg;
3506         int ret;
3507
3508         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3509                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3510         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3511                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3512         else
3513                 return -EINVAL;
3514
3515         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3516         cfg.options = DPSECI_QUEUE_OPT_DEST;
3517         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3518         cfg.dest_cfg.dest_id = dpcon_id;
3519         cfg.dest_cfg.priority = event->priority;
3520
3521         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3522         cfg.user_ctx = (size_t)(qp);
3523         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3524                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3525                 cfg.order_preservation_en = 1;
3526         }
3527         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3528                                   qp_id, &cfg);
3529         if (ret) {
3530                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3531                 return ret;
3532         }
3533
3534         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3535
3536         return 0;
3537 }
3538
3539 int
3540 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3541                         int qp_id)
3542 {
3543         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3544         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3545         struct dpseci_rx_queue_cfg cfg;
3546         int ret;
3547
3548         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3549         cfg.options = DPSECI_QUEUE_OPT_DEST;
3550         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3551
3552         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3553                                   qp_id, &cfg);
3554         if (ret)
3555                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3556
3557         return ret;
3558 }
3559
3560 static struct rte_cryptodev_ops crypto_ops = {
3561         .dev_configure        = dpaa2_sec_dev_configure,
3562         .dev_start            = dpaa2_sec_dev_start,
3563         .dev_stop             = dpaa2_sec_dev_stop,
3564         .dev_close            = dpaa2_sec_dev_close,
3565         .dev_infos_get        = dpaa2_sec_dev_infos_get,
3566         .stats_get            = dpaa2_sec_stats_get,
3567         .stats_reset          = dpaa2_sec_stats_reset,
3568         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3569         .queue_pair_release   = dpaa2_sec_queue_pair_release,
3570         .queue_pair_count     = dpaa2_sec_queue_pair_count,
3571         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3572         .sym_session_configure    = dpaa2_sec_sym_session_configure,
3573         .sym_session_clear        = dpaa2_sec_sym_session_clear,
3574 };
3575
3576 static const struct rte_security_capability *
3577 dpaa2_sec_capabilities_get(void *device __rte_unused)
3578 {
3579         return dpaa2_sec_security_cap;
3580 }
3581
3582 static const struct rte_security_ops dpaa2_sec_security_ops = {
3583         .session_create = dpaa2_sec_security_session_create,
3584         .session_update = NULL,
3585         .session_stats_get = NULL,
3586         .session_destroy = dpaa2_sec_security_session_destroy,
3587         .set_pkt_metadata = NULL,
3588         .capabilities_get = dpaa2_sec_capabilities_get
3589 };
3590
3591 static int
3592 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3593 {
3594         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3595
3596         rte_free(dev->security_ctx);
3597
3598         rte_mempool_free(internals->fle_pool);
3599
3600         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3601                        dev->data->name, rte_socket_id());
3602
3603         return 0;
3604 }
3605
3606 static int
3607 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3608 {
3609         struct dpaa2_sec_dev_private *internals;
3610         struct rte_device *dev = cryptodev->device;
3611         struct rte_dpaa2_device *dpaa2_dev;
3612         struct rte_security_ctx *security_instance;
3613         struct fsl_mc_io *dpseci;
3614         uint16_t token;
3615         struct dpseci_attr attr;
3616         int retcode, hw_id;
3617         char str[30];
3618
3619         PMD_INIT_FUNC_TRACE();
3620         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3621         if (dpaa2_dev == NULL) {
3622                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3623                 return -1;
3624         }
3625         hw_id = dpaa2_dev->object_id;
3626
3627         cryptodev->driver_id = cryptodev_driver_id;
3628         cryptodev->dev_ops = &crypto_ops;
3629
3630         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3631         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3632         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3633                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3634                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3635                         RTE_CRYPTODEV_FF_SECURITY |
3636                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3637                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3638                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3639                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3640                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3641
3642         internals = cryptodev->data->dev_private;
3643
3644         /*
3645          * For secondary processes, we don't initialise any further as primary
3646          * has already done this work. Only check we don't need a different
3647          * RX function
3648          */
3649         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3650                 DPAA2_SEC_DEBUG("Device already init by primary process");
3651                 return 0;
3652         }
3653
3654         /* Initialize security_ctx only for primary process*/
3655         security_instance = rte_malloc("rte_security_instances_ops",
3656                                 sizeof(struct rte_security_ctx), 0);
3657         if (security_instance == NULL)
3658                 return -ENOMEM;
3659         security_instance->device = (void *)cryptodev;
3660         security_instance->ops = &dpaa2_sec_security_ops;
3661         security_instance->sess_cnt = 0;
3662         cryptodev->security_ctx = security_instance;
3663
3664         /*Open the rte device via MC and save the handle for further use*/
3665         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3666                                 sizeof(struct fsl_mc_io), 0);
3667         if (!dpseci) {
3668                 DPAA2_SEC_ERR(
3669                         "Error in allocating the memory for dpsec object");
3670                 return -1;
3671         }
3672         dpseci->regs = rte_mcp_ptr_list[0];
3673
3674         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3675         if (retcode != 0) {
3676                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3677                               retcode);
3678                 goto init_error;
3679         }
3680         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3681         if (retcode != 0) {
3682                 DPAA2_SEC_ERR(
3683                              "Cannot get dpsec device attributed: Error = %x",
3684                              retcode);
3685                 goto init_error;
3686         }
3687         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3688                         "dpsec-%u", hw_id);
3689
3690         internals->max_nb_queue_pairs = attr.num_tx_queues;
3691         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3692         internals->hw = dpseci;
3693         internals->token = token;
3694
3695         snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3696                         getpid(), cryptodev->data->dev_id);
3697         internals->fle_pool = rte_mempool_create((const char *)str,
3698                         FLE_POOL_NUM_BUFS,
3699                         FLE_POOL_BUF_SIZE,
3700                         FLE_POOL_CACHE_SIZE, 0,
3701                         NULL, NULL, NULL, NULL,
3702                         SOCKET_ID_ANY, 0);
3703         if (!internals->fle_pool) {
3704                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3705                 goto init_error;
3706         }
3707
3708         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3709         return 0;
3710
3711 init_error:
3712         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3713
3714         /* dpaa2_sec_uninit(crypto_dev_name); */
3715         return -EFAULT;
3716 }
3717
3718 static int
3719 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3720                           struct rte_dpaa2_device *dpaa2_dev)
3721 {
3722         struct rte_cryptodev *cryptodev;
3723         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3724
3725         int retval;
3726
3727         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3728                         dpaa2_dev->object_id);
3729
3730         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3731         if (cryptodev == NULL)
3732                 return -ENOMEM;
3733
3734         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3735                 cryptodev->data->dev_private = rte_zmalloc_socket(
3736                                         "cryptodev private structure",
3737                                         sizeof(struct dpaa2_sec_dev_private),
3738                                         RTE_CACHE_LINE_SIZE,
3739                                         rte_socket_id());
3740
3741                 if (cryptodev->data->dev_private == NULL)
3742                         rte_panic("Cannot allocate memzone for private "
3743                                   "device data");
3744         }
3745
3746         dpaa2_dev->cryptodev = cryptodev;
3747         cryptodev->device = &dpaa2_dev->device;
3748
3749         /* init user callbacks */
3750         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3751
3752         if (dpaa2_svr_family == SVR_LX2160A)
3753                 rta_set_sec_era(RTA_SEC_ERA_10);
3754
3755         DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3756
3757         /* Invoke PMD device initialization function */
3758         retval = dpaa2_sec_dev_init(cryptodev);
3759         if (retval == 0)
3760                 return 0;
3761
3762         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3763                 rte_free(cryptodev->data->dev_private);
3764
3765         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3766
3767         return -ENXIO;
3768 }
3769
3770 static int
3771 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3772 {
3773         struct rte_cryptodev *cryptodev;
3774         int ret;
3775
3776         cryptodev = dpaa2_dev->cryptodev;
3777         if (cryptodev == NULL)
3778                 return -ENODEV;
3779
3780         ret = dpaa2_sec_uninit(cryptodev);
3781         if (ret)
3782                 return ret;
3783
3784         return rte_cryptodev_pmd_destroy(cryptodev);
3785 }
3786
3787 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3788         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3789         .drv_type = DPAA2_CRYPTO,
3790         .driver = {
3791                 .name = "DPAA2 SEC PMD"
3792         },
3793         .probe = cryptodev_dpaa2_sec_probe,
3794         .remove = cryptodev_dpaa2_sec_remove,
3795 };
3796
3797 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3798
3799 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3800 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3801                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3802
3803 RTE_INIT(dpaa2_sec_init_log)
3804 {
3805         /* Bus level logs */
3806         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3807         if (dpaa2_logtype_sec >= 0)
3808                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3809 }