ddf3e47c6af11705e9ef5ce7d83b584695d7de7c
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
32
33 /* Required types */
34 typedef uint64_t        dma_addr_t;
35
36 /* RTA header files */
37 #include <hw/desc/ipsec.h>
38 #include <hw/desc/algo.h>
39
40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
41  * a pointer to the shared descriptor
42  */
43 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
44 #define FSL_VENDOR_ID           0x1957
45 #define FSL_DEVICE_ID           0x410
46 #define FSL_SUBSYSTEM_SEC       1
47 #define FSL_MC_DPSECI_DEVID     3
48
49 #define NO_PREFETCH 0
50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
51 #define FLE_POOL_NUM_BUFS       32000
52 #define FLE_POOL_BUF_SIZE       256
53 #define FLE_POOL_CACHE_SIZE     512
54 #define FLE_SG_MEM_SIZE         2048
55 #define SEC_FLC_DHR_OUTBOUND    -114
56 #define SEC_FLC_DHR_INBOUND     0
57
58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
59
60 static uint8_t cryptodev_driver_id;
61
62 int dpaa2_logtype_sec;
63
64 static inline int
65 build_proto_compound_fd(dpaa2_sec_session *sess,
66                struct rte_crypto_op *op,
67                struct qbman_fd *fd, uint16_t bpid)
68 {
69         struct rte_crypto_sym_op *sym_op = op->sym;
70         struct ctxt_priv *priv = sess->ctxt;
71         struct qbman_fle *fle, *ip_fle, *op_fle;
72         struct sec_flow_context *flc;
73         struct rte_mbuf *src_mbuf = sym_op->m_src;
74         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
75         int retval;
76
77         /* Save the shared descriptor */
78         flc = &priv->flc_desc[0].flc;
79
80         /* we are using the first FLE entry to store Mbuf */
81         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
82         if (retval) {
83                 DPAA2_SEC_ERR("Memory alloc failed");
84                 return -1;
85         }
86         memset(fle, 0, FLE_POOL_BUF_SIZE);
87         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
88         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
89
90         op_fle = fle + 1;
91         ip_fle = fle + 2;
92
93         if (likely(bpid < MAX_BPID)) {
94                 DPAA2_SET_FD_BPID(fd, bpid);
95                 DPAA2_SET_FLE_BPID(op_fle, bpid);
96                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
97         } else {
98                 DPAA2_SET_FD_IVP(fd);
99                 DPAA2_SET_FLE_IVP(op_fle);
100                 DPAA2_SET_FLE_IVP(ip_fle);
101         }
102
103         /* Configure FD as a FRAME LIST */
104         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
105         DPAA2_SET_FD_COMPOUND_FMT(fd);
106         DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
107
108         /* Configure Output FLE with dst mbuf data  */
109         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
110         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
111         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
112
113         /* Configure Input FLE with src mbuf data */
114         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
115         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
116         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
117
118         DPAA2_SET_FD_LEN(fd, ip_fle->length);
119         DPAA2_SET_FLE_FIN(ip_fle);
120
121         return 0;
122
123 }
124
125 static inline int
126 build_proto_fd(dpaa2_sec_session *sess,
127                struct rte_crypto_op *op,
128                struct qbman_fd *fd, uint16_t bpid)
129 {
130         struct rte_crypto_sym_op *sym_op = op->sym;
131         if (sym_op->m_dst)
132                 return build_proto_compound_fd(sess, op, fd, bpid);
133
134         struct ctxt_priv *priv = sess->ctxt;
135         struct sec_flow_context *flc;
136         struct rte_mbuf *mbuf = sym_op->m_src;
137
138         if (likely(bpid < MAX_BPID))
139                 DPAA2_SET_FD_BPID(fd, bpid);
140         else
141                 DPAA2_SET_FD_IVP(fd);
142
143         /* Save the shared descriptor */
144         flc = &priv->flc_desc[0].flc;
145
146         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
147         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
148         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
149         DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
150
151         /* save physical address of mbuf */
152         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
153         mbuf->buf_iova = (size_t)op;
154
155         return 0;
156 }
157
158 static inline int
159 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
160                  struct rte_crypto_op *op,
161                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
162 {
163         struct rte_crypto_sym_op *sym_op = op->sym;
164         struct ctxt_priv *priv = sess->ctxt;
165         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
166         struct sec_flow_context *flc;
167         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
168         int icv_len = sess->digest_length;
169         uint8_t *old_icv;
170         struct rte_mbuf *mbuf;
171         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
172                         sess->iv.offset);
173
174         PMD_INIT_FUNC_TRACE();
175
176         if (sym_op->m_dst)
177                 mbuf = sym_op->m_dst;
178         else
179                 mbuf = sym_op->m_src;
180
181         /* first FLE entry used to store mbuf and session ctxt */
182         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
183                         RTE_CACHE_LINE_SIZE);
184         if (unlikely(!fle)) {
185                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
186                 return -1;
187         }
188         memset(fle, 0, FLE_SG_MEM_SIZE);
189         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
190         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
191
192         op_fle = fle + 1;
193         ip_fle = fle + 2;
194         sge = fle + 3;
195
196         /* Save the shared descriptor */
197         flc = &priv->flc_desc[0].flc;
198
199         /* Configure FD as a FRAME LIST */
200         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
201         DPAA2_SET_FD_COMPOUND_FMT(fd);
202         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
203
204         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
205                    "iv-len=%d data_off: 0x%x\n",
206                    sym_op->aead.data.offset,
207                    sym_op->aead.data.length,
208                    sess->digest_length,
209                    sess->iv.length,
210                    sym_op->m_src->data_off);
211
212         /* Configure Output FLE with Scatter/Gather Entry */
213         DPAA2_SET_FLE_SG_EXT(op_fle);
214         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
215
216         if (auth_only_len)
217                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
218
219         op_fle->length = (sess->dir == DIR_ENC) ?
220                         (sym_op->aead.data.length + icv_len + auth_only_len) :
221                         sym_op->aead.data.length + auth_only_len;
222
223         /* Configure Output SGE for Encap/Decap */
224         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
225         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
226                                                                 auth_only_len);
227         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
228
229         mbuf = mbuf->next;
230         /* o/p segs */
231         while (mbuf) {
232                 sge++;
233                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
234                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
235                 sge->length = mbuf->data_len;
236                 mbuf = mbuf->next;
237         }
238         sge->length -= icv_len;
239
240         if (sess->dir == DIR_ENC) {
241                 sge++;
242                 DPAA2_SET_FLE_ADDR(sge,
243                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
244                 sge->length = icv_len;
245         }
246         DPAA2_SET_FLE_FIN(sge);
247
248         sge++;
249         mbuf = sym_op->m_src;
250
251         /* Configure Input FLE with Scatter/Gather Entry */
252         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
253         DPAA2_SET_FLE_SG_EXT(ip_fle);
254         DPAA2_SET_FLE_FIN(ip_fle);
255         ip_fle->length = (sess->dir == DIR_ENC) ?
256                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
257                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
258                  icv_len);
259
260         /* Configure Input SGE for Encap/Decap */
261         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
262         sge->length = sess->iv.length;
263
264         sge++;
265         if (auth_only_len) {
266                 DPAA2_SET_FLE_ADDR(sge,
267                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
268                 sge->length = auth_only_len;
269                 sge++;
270         }
271
272         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
273         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
274                                 mbuf->data_off);
275         sge->length = mbuf->data_len - sym_op->aead.data.offset;
276
277         mbuf = mbuf->next;
278         /* i/p segs */
279         while (mbuf) {
280                 sge++;
281                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
282                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
283                 sge->length = mbuf->data_len;
284                 mbuf = mbuf->next;
285         }
286
287         if (sess->dir == DIR_DEC) {
288                 sge++;
289                 old_icv = (uint8_t *)(sge + 1);
290                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
291                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
292                 sge->length = icv_len;
293         }
294
295         DPAA2_SET_FLE_FIN(sge);
296         if (auth_only_len) {
297                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
298                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
299         }
300         DPAA2_SET_FD_LEN(fd, ip_fle->length);
301
302         return 0;
303 }
304
305 static inline int
306 build_authenc_gcm_fd(dpaa2_sec_session *sess,
307                      struct rte_crypto_op *op,
308                      struct qbman_fd *fd, uint16_t bpid)
309 {
310         struct rte_crypto_sym_op *sym_op = op->sym;
311         struct ctxt_priv *priv = sess->ctxt;
312         struct qbman_fle *fle, *sge;
313         struct sec_flow_context *flc;
314         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
315         int icv_len = sess->digest_length, retval;
316         uint8_t *old_icv;
317         struct rte_mbuf *dst;
318         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
319                         sess->iv.offset);
320
321         PMD_INIT_FUNC_TRACE();
322
323         if (sym_op->m_dst)
324                 dst = sym_op->m_dst;
325         else
326                 dst = sym_op->m_src;
327
328         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
329          * Currently we donot know which FLE has the mbuf stored.
330          * So while retreiving we can go back 1 FLE from the FD -ADDR
331          * to get the MBUF Addr from the previous FLE.
332          * We can have a better approach to use the inline Mbuf
333          */
334         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
335         if (retval) {
336                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
337                 return -1;
338         }
339         memset(fle, 0, FLE_POOL_BUF_SIZE);
340         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
341         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
342         fle = fle + 1;
343         sge = fle + 2;
344         if (likely(bpid < MAX_BPID)) {
345                 DPAA2_SET_FD_BPID(fd, bpid);
346                 DPAA2_SET_FLE_BPID(fle, bpid);
347                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
348                 DPAA2_SET_FLE_BPID(sge, bpid);
349                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
350                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
351                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
352         } else {
353                 DPAA2_SET_FD_IVP(fd);
354                 DPAA2_SET_FLE_IVP(fle);
355                 DPAA2_SET_FLE_IVP((fle + 1));
356                 DPAA2_SET_FLE_IVP(sge);
357                 DPAA2_SET_FLE_IVP((sge + 1));
358                 DPAA2_SET_FLE_IVP((sge + 2));
359                 DPAA2_SET_FLE_IVP((sge + 3));
360         }
361
362         /* Save the shared descriptor */
363         flc = &priv->flc_desc[0].flc;
364         /* Configure FD as a FRAME LIST */
365         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
366         DPAA2_SET_FD_COMPOUND_FMT(fd);
367         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
368
369         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
370                    "iv-len=%d data_off: 0x%x\n",
371                    sym_op->aead.data.offset,
372                    sym_op->aead.data.length,
373                    sess->digest_length,
374                    sess->iv.length,
375                    sym_op->m_src->data_off);
376
377         /* Configure Output FLE with Scatter/Gather Entry */
378         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
379         if (auth_only_len)
380                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
381         fle->length = (sess->dir == DIR_ENC) ?
382                         (sym_op->aead.data.length + icv_len + auth_only_len) :
383                         sym_op->aead.data.length + auth_only_len;
384
385         DPAA2_SET_FLE_SG_EXT(fle);
386
387         /* Configure Output SGE for Encap/Decap */
388         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
389         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
390                                 dst->data_off - auth_only_len);
391         sge->length = sym_op->aead.data.length + auth_only_len;
392
393         if (sess->dir == DIR_ENC) {
394                 sge++;
395                 DPAA2_SET_FLE_ADDR(sge,
396                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
397                 sge->length = sess->digest_length;
398                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
399                                         sess->iv.length + auth_only_len));
400         }
401         DPAA2_SET_FLE_FIN(sge);
402
403         sge++;
404         fle++;
405
406         /* Configure Input FLE with Scatter/Gather Entry */
407         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
408         DPAA2_SET_FLE_SG_EXT(fle);
409         DPAA2_SET_FLE_FIN(fle);
410         fle->length = (sess->dir == DIR_ENC) ?
411                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
412                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
413                  sess->digest_length);
414
415         /* Configure Input SGE for Encap/Decap */
416         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
417         sge->length = sess->iv.length;
418         sge++;
419         if (auth_only_len) {
420                 DPAA2_SET_FLE_ADDR(sge,
421                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
422                 sge->length = auth_only_len;
423                 DPAA2_SET_FLE_BPID(sge, bpid);
424                 sge++;
425         }
426
427         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
428         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
429                                 sym_op->m_src->data_off);
430         sge->length = sym_op->aead.data.length;
431         if (sess->dir == DIR_DEC) {
432                 sge++;
433                 old_icv = (uint8_t *)(sge + 1);
434                 memcpy(old_icv, sym_op->aead.digest.data,
435                        sess->digest_length);
436                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
437                 sge->length = sess->digest_length;
438                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
439                                  sess->digest_length +
440                                  sess->iv.length +
441                                  auth_only_len));
442         }
443         DPAA2_SET_FLE_FIN(sge);
444
445         if (auth_only_len) {
446                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
447                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
448         }
449
450         return 0;
451 }
452
453 static inline int
454 build_authenc_sg_fd(dpaa2_sec_session *sess,
455                  struct rte_crypto_op *op,
456                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
457 {
458         struct rte_crypto_sym_op *sym_op = op->sym;
459         struct ctxt_priv *priv = sess->ctxt;
460         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
461         struct sec_flow_context *flc;
462         uint32_t auth_only_len = sym_op->auth.data.length -
463                                 sym_op->cipher.data.length;
464         int icv_len = sess->digest_length;
465         uint8_t *old_icv;
466         struct rte_mbuf *mbuf;
467         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
468                         sess->iv.offset);
469
470         PMD_INIT_FUNC_TRACE();
471
472         if (sym_op->m_dst)
473                 mbuf = sym_op->m_dst;
474         else
475                 mbuf = sym_op->m_src;
476
477         /* first FLE entry used to store mbuf and session ctxt */
478         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
479                         RTE_CACHE_LINE_SIZE);
480         if (unlikely(!fle)) {
481                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
482                 return -1;
483         }
484         memset(fle, 0, FLE_SG_MEM_SIZE);
485         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
486         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
487
488         op_fle = fle + 1;
489         ip_fle = fle + 2;
490         sge = fle + 3;
491
492         /* Save the shared descriptor */
493         flc = &priv->flc_desc[0].flc;
494
495         /* Configure FD as a FRAME LIST */
496         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
497         DPAA2_SET_FD_COMPOUND_FMT(fd);
498         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
499
500         DPAA2_SEC_DP_DEBUG(
501                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
502                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
503                 sym_op->auth.data.offset,
504                 sym_op->auth.data.length,
505                 sess->digest_length,
506                 sym_op->cipher.data.offset,
507                 sym_op->cipher.data.length,
508                 sess->iv.length,
509                 sym_op->m_src->data_off);
510
511         /* Configure Output FLE with Scatter/Gather Entry */
512         DPAA2_SET_FLE_SG_EXT(op_fle);
513         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
514
515         if (auth_only_len)
516                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
517
518         op_fle->length = (sess->dir == DIR_ENC) ?
519                         (sym_op->cipher.data.length + icv_len) :
520                         sym_op->cipher.data.length;
521
522         /* Configure Output SGE for Encap/Decap */
523         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
524         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
525         sge->length = mbuf->data_len - sym_op->auth.data.offset;
526
527         mbuf = mbuf->next;
528         /* o/p segs */
529         while (mbuf) {
530                 sge++;
531                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
532                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
533                 sge->length = mbuf->data_len;
534                 mbuf = mbuf->next;
535         }
536         sge->length -= icv_len;
537
538         if (sess->dir == DIR_ENC) {
539                 sge++;
540                 DPAA2_SET_FLE_ADDR(sge,
541                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
542                 sge->length = icv_len;
543         }
544         DPAA2_SET_FLE_FIN(sge);
545
546         sge++;
547         mbuf = sym_op->m_src;
548
549         /* Configure Input FLE with Scatter/Gather Entry */
550         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
551         DPAA2_SET_FLE_SG_EXT(ip_fle);
552         DPAA2_SET_FLE_FIN(ip_fle);
553         ip_fle->length = (sess->dir == DIR_ENC) ?
554                         (sym_op->auth.data.length + sess->iv.length) :
555                         (sym_op->auth.data.length + sess->iv.length +
556                          icv_len);
557
558         /* Configure Input SGE for Encap/Decap */
559         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
560         sge->length = sess->iv.length;
561
562         sge++;
563         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
564         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
565                                 mbuf->data_off);
566         sge->length = mbuf->data_len - sym_op->auth.data.offset;
567
568         mbuf = mbuf->next;
569         /* i/p segs */
570         while (mbuf) {
571                 sge++;
572                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
573                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
574                 sge->length = mbuf->data_len;
575                 mbuf = mbuf->next;
576         }
577         sge->length -= icv_len;
578
579         if (sess->dir == DIR_DEC) {
580                 sge++;
581                 old_icv = (uint8_t *)(sge + 1);
582                 memcpy(old_icv, sym_op->auth.digest.data,
583                        icv_len);
584                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
585                 sge->length = icv_len;
586         }
587
588         DPAA2_SET_FLE_FIN(sge);
589         if (auth_only_len) {
590                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
591                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
592         }
593         DPAA2_SET_FD_LEN(fd, ip_fle->length);
594
595         return 0;
596 }
597
598 static inline int
599 build_authenc_fd(dpaa2_sec_session *sess,
600                  struct rte_crypto_op *op,
601                  struct qbman_fd *fd, uint16_t bpid)
602 {
603         struct rte_crypto_sym_op *sym_op = op->sym;
604         struct ctxt_priv *priv = sess->ctxt;
605         struct qbman_fle *fle, *sge;
606         struct sec_flow_context *flc;
607         uint32_t auth_only_len = sym_op->auth.data.length -
608                                 sym_op->cipher.data.length;
609         int icv_len = sess->digest_length, retval;
610         uint8_t *old_icv;
611         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
612                         sess->iv.offset);
613         struct rte_mbuf *dst;
614
615         PMD_INIT_FUNC_TRACE();
616
617         if (sym_op->m_dst)
618                 dst = sym_op->m_dst;
619         else
620                 dst = sym_op->m_src;
621
622         /* we are using the first FLE entry to store Mbuf.
623          * Currently we donot know which FLE has the mbuf stored.
624          * So while retreiving we can go back 1 FLE from the FD -ADDR
625          * to get the MBUF Addr from the previous FLE.
626          * We can have a better approach to use the inline Mbuf
627          */
628         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
629         if (retval) {
630                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
631                 return -1;
632         }
633         memset(fle, 0, FLE_POOL_BUF_SIZE);
634         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
635         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
636         fle = fle + 1;
637         sge = fle + 2;
638         if (likely(bpid < MAX_BPID)) {
639                 DPAA2_SET_FD_BPID(fd, bpid);
640                 DPAA2_SET_FLE_BPID(fle, bpid);
641                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
642                 DPAA2_SET_FLE_BPID(sge, bpid);
643                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
644                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
645                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
646         } else {
647                 DPAA2_SET_FD_IVP(fd);
648                 DPAA2_SET_FLE_IVP(fle);
649                 DPAA2_SET_FLE_IVP((fle + 1));
650                 DPAA2_SET_FLE_IVP(sge);
651                 DPAA2_SET_FLE_IVP((sge + 1));
652                 DPAA2_SET_FLE_IVP((sge + 2));
653                 DPAA2_SET_FLE_IVP((sge + 3));
654         }
655
656         /* Save the shared descriptor */
657         flc = &priv->flc_desc[0].flc;
658         /* Configure FD as a FRAME LIST */
659         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
660         DPAA2_SET_FD_COMPOUND_FMT(fd);
661         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
662
663         DPAA2_SEC_DP_DEBUG(
664                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
665                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
666                 sym_op->auth.data.offset,
667                 sym_op->auth.data.length,
668                 sess->digest_length,
669                 sym_op->cipher.data.offset,
670                 sym_op->cipher.data.length,
671                 sess->iv.length,
672                 sym_op->m_src->data_off);
673
674         /* Configure Output FLE with Scatter/Gather Entry */
675         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
676         if (auth_only_len)
677                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
678         fle->length = (sess->dir == DIR_ENC) ?
679                         (sym_op->cipher.data.length + icv_len) :
680                         sym_op->cipher.data.length;
681
682         DPAA2_SET_FLE_SG_EXT(fle);
683
684         /* Configure Output SGE for Encap/Decap */
685         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
686         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
687                                 dst->data_off);
688         sge->length = sym_op->cipher.data.length;
689
690         if (sess->dir == DIR_ENC) {
691                 sge++;
692                 DPAA2_SET_FLE_ADDR(sge,
693                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
694                 sge->length = sess->digest_length;
695                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
696                                         sess->iv.length));
697         }
698         DPAA2_SET_FLE_FIN(sge);
699
700         sge++;
701         fle++;
702
703         /* Configure Input FLE with Scatter/Gather Entry */
704         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
705         DPAA2_SET_FLE_SG_EXT(fle);
706         DPAA2_SET_FLE_FIN(fle);
707         fle->length = (sess->dir == DIR_ENC) ?
708                         (sym_op->auth.data.length + sess->iv.length) :
709                         (sym_op->auth.data.length + sess->iv.length +
710                          sess->digest_length);
711
712         /* Configure Input SGE for Encap/Decap */
713         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
714         sge->length = sess->iv.length;
715         sge++;
716
717         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
718         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
719                                 sym_op->m_src->data_off);
720         sge->length = sym_op->auth.data.length;
721         if (sess->dir == DIR_DEC) {
722                 sge++;
723                 old_icv = (uint8_t *)(sge + 1);
724                 memcpy(old_icv, sym_op->auth.digest.data,
725                        sess->digest_length);
726                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
727                 sge->length = sess->digest_length;
728                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
729                                  sess->digest_length +
730                                  sess->iv.length));
731         }
732         DPAA2_SET_FLE_FIN(sge);
733         if (auth_only_len) {
734                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
735                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
736         }
737         return 0;
738 }
739
740 static inline int build_auth_sg_fd(
741                 dpaa2_sec_session *sess,
742                 struct rte_crypto_op *op,
743                 struct qbman_fd *fd,
744                 __rte_unused uint16_t bpid)
745 {
746         struct rte_crypto_sym_op *sym_op = op->sym;
747         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
748         struct sec_flow_context *flc;
749         struct ctxt_priv *priv = sess->ctxt;
750         uint8_t *old_digest;
751         struct rte_mbuf *mbuf;
752
753         PMD_INIT_FUNC_TRACE();
754
755         mbuf = sym_op->m_src;
756         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
757                         RTE_CACHE_LINE_SIZE);
758         if (unlikely(!fle)) {
759                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
760                 return -1;
761         }
762         memset(fle, 0, FLE_SG_MEM_SIZE);
763         /* first FLE entry used to store mbuf and session ctxt */
764         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
765         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
766         op_fle = fle + 1;
767         ip_fle = fle + 2;
768         sge = fle + 3;
769
770         flc = &priv->flc_desc[DESC_INITFINAL].flc;
771         /* sg FD */
772         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
773         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
774         DPAA2_SET_FD_COMPOUND_FMT(fd);
775
776         /* o/p fle */
777         DPAA2_SET_FLE_ADDR(op_fle,
778                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
779         op_fle->length = sess->digest_length;
780
781         /* i/p fle */
782         DPAA2_SET_FLE_SG_EXT(ip_fle);
783         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
784         /* i/p 1st seg */
785         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
786         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
787         sge->length = mbuf->data_len - sym_op->auth.data.offset;
788
789         /* i/p segs */
790         mbuf = mbuf->next;
791         while (mbuf) {
792                 sge++;
793                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
794                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
795                 sge->length = mbuf->data_len;
796                 mbuf = mbuf->next;
797         }
798         if (sess->dir == DIR_ENC) {
799                 /* Digest calculation case */
800                 sge->length -= sess->digest_length;
801                 ip_fle->length = sym_op->auth.data.length;
802         } else {
803                 /* Digest verification case */
804                 sge++;
805                 old_digest = (uint8_t *)(sge + 1);
806                 rte_memcpy(old_digest, sym_op->auth.digest.data,
807                            sess->digest_length);
808                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
809                 sge->length = sess->digest_length;
810                 ip_fle->length = sym_op->auth.data.length +
811                                 sess->digest_length;
812         }
813         DPAA2_SET_FLE_FIN(sge);
814         DPAA2_SET_FLE_FIN(ip_fle);
815         DPAA2_SET_FD_LEN(fd, ip_fle->length);
816
817         return 0;
818 }
819
820 static inline int
821 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
822               struct qbman_fd *fd, uint16_t bpid)
823 {
824         struct rte_crypto_sym_op *sym_op = op->sym;
825         struct qbman_fle *fle, *sge;
826         struct sec_flow_context *flc;
827         struct ctxt_priv *priv = sess->ctxt;
828         uint8_t *old_digest;
829         int retval;
830
831         PMD_INIT_FUNC_TRACE();
832
833         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
834         if (retval) {
835                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
836                 return -1;
837         }
838         memset(fle, 0, FLE_POOL_BUF_SIZE);
839         /* TODO we are using the first FLE entry to store Mbuf.
840          * Currently we donot know which FLE has the mbuf stored.
841          * So while retreiving we can go back 1 FLE from the FD -ADDR
842          * to get the MBUF Addr from the previous FLE.
843          * We can have a better approach to use the inline Mbuf
844          */
845         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
846         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
847         fle = fle + 1;
848
849         if (likely(bpid < MAX_BPID)) {
850                 DPAA2_SET_FD_BPID(fd, bpid);
851                 DPAA2_SET_FLE_BPID(fle, bpid);
852                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
853         } else {
854                 DPAA2_SET_FD_IVP(fd);
855                 DPAA2_SET_FLE_IVP(fle);
856                 DPAA2_SET_FLE_IVP((fle + 1));
857         }
858         flc = &priv->flc_desc[DESC_INITFINAL].flc;
859         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
860
861         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
862         fle->length = sess->digest_length;
863
864         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
865         DPAA2_SET_FD_COMPOUND_FMT(fd);
866         fle++;
867
868         if (sess->dir == DIR_ENC) {
869                 DPAA2_SET_FLE_ADDR(fle,
870                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
871                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
872                                      sym_op->m_src->data_off);
873                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
874                 fle->length = sym_op->auth.data.length;
875         } else {
876                 sge = fle + 2;
877                 DPAA2_SET_FLE_SG_EXT(fle);
878                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
879
880                 if (likely(bpid < MAX_BPID)) {
881                         DPAA2_SET_FLE_BPID(sge, bpid);
882                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
883                 } else {
884                         DPAA2_SET_FLE_IVP(sge);
885                         DPAA2_SET_FLE_IVP((sge + 1));
886                 }
887                 DPAA2_SET_FLE_ADDR(sge,
888                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
889                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
890                                      sym_op->m_src->data_off);
891
892                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
893                                  sess->digest_length);
894                 sge->length = sym_op->auth.data.length;
895                 sge++;
896                 old_digest = (uint8_t *)(sge + 1);
897                 rte_memcpy(old_digest, sym_op->auth.digest.data,
898                            sess->digest_length);
899                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
900                 sge->length = sess->digest_length;
901                 fle->length = sym_op->auth.data.length +
902                                 sess->digest_length;
903                 DPAA2_SET_FLE_FIN(sge);
904         }
905         DPAA2_SET_FLE_FIN(fle);
906
907         return 0;
908 }
909
910 static int
911 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
912                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
913 {
914         struct rte_crypto_sym_op *sym_op = op->sym;
915         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
916         struct sec_flow_context *flc;
917         struct ctxt_priv *priv = sess->ctxt;
918         struct rte_mbuf *mbuf;
919         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
920                         sess->iv.offset);
921
922         PMD_INIT_FUNC_TRACE();
923
924         if (sym_op->m_dst)
925                 mbuf = sym_op->m_dst;
926         else
927                 mbuf = sym_op->m_src;
928
929         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
930                         RTE_CACHE_LINE_SIZE);
931         if (!fle) {
932                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
933                 return -1;
934         }
935         memset(fle, 0, FLE_SG_MEM_SIZE);
936         /* first FLE entry used to store mbuf and session ctxt */
937         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
938         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
939
940         op_fle = fle + 1;
941         ip_fle = fle + 2;
942         sge = fle + 3;
943
944         flc = &priv->flc_desc[0].flc;
945
946         DPAA2_SEC_DP_DEBUG(
947                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
948                 " data_off: 0x%x\n",
949                 sym_op->cipher.data.offset,
950                 sym_op->cipher.data.length,
951                 sess->iv.length,
952                 sym_op->m_src->data_off);
953
954         /* o/p fle */
955         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
956         op_fle->length = sym_op->cipher.data.length;
957         DPAA2_SET_FLE_SG_EXT(op_fle);
958
959         /* o/p 1st seg */
960         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
961         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
962         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
963
964         mbuf = mbuf->next;
965         /* o/p segs */
966         while (mbuf) {
967                 sge++;
968                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
969                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
970                 sge->length = mbuf->data_len;
971                 mbuf = mbuf->next;
972         }
973         DPAA2_SET_FLE_FIN(sge);
974
975         DPAA2_SEC_DP_DEBUG(
976                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
977                 flc, fle, fle->addr_hi, fle->addr_lo,
978                 fle->length);
979
980         /* i/p fle */
981         mbuf = sym_op->m_src;
982         sge++;
983         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
984         ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
985         DPAA2_SET_FLE_SG_EXT(ip_fle);
986
987         /* i/p IV */
988         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
989         DPAA2_SET_FLE_OFFSET(sge, 0);
990         sge->length = sess->iv.length;
991
992         sge++;
993
994         /* i/p 1st seg */
995         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
996         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
997                              mbuf->data_off);
998         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
999
1000         mbuf = mbuf->next;
1001         /* i/p segs */
1002         while (mbuf) {
1003                 sge++;
1004                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1005                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1006                 sge->length = mbuf->data_len;
1007                 mbuf = mbuf->next;
1008         }
1009         DPAA2_SET_FLE_FIN(sge);
1010         DPAA2_SET_FLE_FIN(ip_fle);
1011
1012         /* sg fd */
1013         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1014         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1015         DPAA2_SET_FD_COMPOUND_FMT(fd);
1016         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1017
1018         DPAA2_SEC_DP_DEBUG(
1019                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1020                 " off =%d, len =%d\n",
1021                 DPAA2_GET_FD_ADDR(fd),
1022                 DPAA2_GET_FD_BPID(fd),
1023                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1024                 DPAA2_GET_FD_OFFSET(fd),
1025                 DPAA2_GET_FD_LEN(fd));
1026         return 0;
1027 }
1028
1029 static int
1030 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1031                 struct qbman_fd *fd, uint16_t bpid)
1032 {
1033         struct rte_crypto_sym_op *sym_op = op->sym;
1034         struct qbman_fle *fle, *sge;
1035         int retval;
1036         struct sec_flow_context *flc;
1037         struct ctxt_priv *priv = sess->ctxt;
1038         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1039                         sess->iv.offset);
1040         struct rte_mbuf *dst;
1041
1042         PMD_INIT_FUNC_TRACE();
1043
1044         if (sym_op->m_dst)
1045                 dst = sym_op->m_dst;
1046         else
1047                 dst = sym_op->m_src;
1048
1049         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1050         if (retval) {
1051                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1052                 return -1;
1053         }
1054         memset(fle, 0, FLE_POOL_BUF_SIZE);
1055         /* TODO we are using the first FLE entry to store Mbuf.
1056          * Currently we donot know which FLE has the mbuf stored.
1057          * So while retreiving we can go back 1 FLE from the FD -ADDR
1058          * to get the MBUF Addr from the previous FLE.
1059          * We can have a better approach to use the inline Mbuf
1060          */
1061         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1062         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1063         fle = fle + 1;
1064         sge = fle + 2;
1065
1066         if (likely(bpid < MAX_BPID)) {
1067                 DPAA2_SET_FD_BPID(fd, bpid);
1068                 DPAA2_SET_FLE_BPID(fle, bpid);
1069                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1070                 DPAA2_SET_FLE_BPID(sge, bpid);
1071                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1072         } else {
1073                 DPAA2_SET_FD_IVP(fd);
1074                 DPAA2_SET_FLE_IVP(fle);
1075                 DPAA2_SET_FLE_IVP((fle + 1));
1076                 DPAA2_SET_FLE_IVP(sge);
1077                 DPAA2_SET_FLE_IVP((sge + 1));
1078         }
1079
1080         flc = &priv->flc_desc[0].flc;
1081         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1082         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1083                          sess->iv.length);
1084         DPAA2_SET_FD_COMPOUND_FMT(fd);
1085         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1086
1087         DPAA2_SEC_DP_DEBUG(
1088                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1089                 " data_off: 0x%x\n",
1090                 sym_op->cipher.data.offset,
1091                 sym_op->cipher.data.length,
1092                 sess->iv.length,
1093                 sym_op->m_src->data_off);
1094
1095         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1096         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1097                              dst->data_off);
1098
1099         fle->length = sym_op->cipher.data.length + sess->iv.length;
1100
1101         DPAA2_SEC_DP_DEBUG(
1102                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1103                 flc, fle, fle->addr_hi, fle->addr_lo,
1104                 fle->length);
1105
1106         fle++;
1107
1108         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1109         fle->length = sym_op->cipher.data.length + sess->iv.length;
1110
1111         DPAA2_SET_FLE_SG_EXT(fle);
1112
1113         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1114         sge->length = sess->iv.length;
1115
1116         sge++;
1117         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1118         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1119                              sym_op->m_src->data_off);
1120
1121         sge->length = sym_op->cipher.data.length;
1122         DPAA2_SET_FLE_FIN(sge);
1123         DPAA2_SET_FLE_FIN(fle);
1124
1125         DPAA2_SEC_DP_DEBUG(
1126                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1127                 " off =%d, len =%d\n",
1128                 DPAA2_GET_FD_ADDR(fd),
1129                 DPAA2_GET_FD_BPID(fd),
1130                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1131                 DPAA2_GET_FD_OFFSET(fd),
1132                 DPAA2_GET_FD_LEN(fd));
1133
1134         return 0;
1135 }
1136
1137 static inline int
1138 build_sec_fd(struct rte_crypto_op *op,
1139              struct qbman_fd *fd, uint16_t bpid)
1140 {
1141         int ret = -1;
1142         dpaa2_sec_session *sess;
1143
1144         PMD_INIT_FUNC_TRACE();
1145
1146         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1147                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1148                                 op->sym->session, cryptodev_driver_id);
1149         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1150                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1151                                 op->sym->sec_session);
1152         else
1153                 return -1;
1154
1155         /* Segmented buffer */
1156         if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1157                 switch (sess->ctxt_type) {
1158                 case DPAA2_SEC_CIPHER:
1159                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1160                         break;
1161                 case DPAA2_SEC_AUTH:
1162                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1163                         break;
1164                 case DPAA2_SEC_AEAD:
1165                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1166                         break;
1167                 case DPAA2_SEC_CIPHER_HASH:
1168                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1169                         break;
1170                 case DPAA2_SEC_HASH_CIPHER:
1171                 default:
1172                         DPAA2_SEC_ERR("error: Unsupported session");
1173                 }
1174         } else {
1175                 switch (sess->ctxt_type) {
1176                 case DPAA2_SEC_CIPHER:
1177                         ret = build_cipher_fd(sess, op, fd, bpid);
1178                         break;
1179                 case DPAA2_SEC_AUTH:
1180                         ret = build_auth_fd(sess, op, fd, bpid);
1181                         break;
1182                 case DPAA2_SEC_AEAD:
1183                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1184                         break;
1185                 case DPAA2_SEC_CIPHER_HASH:
1186                         ret = build_authenc_fd(sess, op, fd, bpid);
1187                         break;
1188                 case DPAA2_SEC_IPSEC:
1189                         ret = build_proto_fd(sess, op, fd, bpid);
1190                         break;
1191                 case DPAA2_SEC_HASH_CIPHER:
1192                 default:
1193                         DPAA2_SEC_ERR("error: Unsupported session");
1194                 }
1195         }
1196         return ret;
1197 }
1198
1199 static uint16_t
1200 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1201                         uint16_t nb_ops)
1202 {
1203         /* Function to transmit the frames to given device and VQ*/
1204         uint32_t loop;
1205         int32_t ret;
1206         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1207         uint32_t frames_to_send;
1208         struct qbman_eq_desc eqdesc;
1209         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1210         struct qbman_swp *swp;
1211         uint16_t num_tx = 0;
1212         /*todo - need to support multiple buffer pools */
1213         uint16_t bpid;
1214         struct rte_mempool *mb_pool;
1215
1216         if (unlikely(nb_ops == 0))
1217                 return 0;
1218
1219         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1220                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1221                 return 0;
1222         }
1223         /*Prepare enqueue descriptor*/
1224         qbman_eq_desc_clear(&eqdesc);
1225         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1226         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1227         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1228
1229         if (!DPAA2_PER_LCORE_DPIO) {
1230                 ret = dpaa2_affine_qbman_swp();
1231                 if (ret) {
1232                         DPAA2_SEC_ERR("Failure in affining portal");
1233                         return 0;
1234                 }
1235         }
1236         swp = DPAA2_PER_LCORE_PORTAL;
1237
1238         while (nb_ops) {
1239                 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1240
1241                 for (loop = 0; loop < frames_to_send; loop++) {
1242                         /*Clear the unused FD fields before sending*/
1243                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1244                         mb_pool = (*ops)->sym->m_src->pool;
1245                         bpid = mempool_to_bpid(mb_pool);
1246                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1247                         if (ret) {
1248                                 DPAA2_SEC_ERR("error: Improper packet contents"
1249                                               " for crypto operation");
1250                                 goto skip_tx;
1251                         }
1252                         ops++;
1253                 }
1254                 loop = 0;
1255                 while (loop < frames_to_send) {
1256                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1257                                                         &fd_arr[loop],
1258                                                         NULL,
1259                                                         frames_to_send - loop);
1260                 }
1261
1262                 num_tx += frames_to_send;
1263                 nb_ops -= frames_to_send;
1264         }
1265 skip_tx:
1266         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1267         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1268         return num_tx;
1269 }
1270
1271 static inline struct rte_crypto_op *
1272 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1273 {
1274         struct rte_crypto_op *op;
1275         uint16_t len = DPAA2_GET_FD_LEN(fd);
1276         uint16_t diff = 0;
1277         dpaa2_sec_session *sess_priv;
1278
1279         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1280                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1281                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1282
1283         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1284         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1285         op->sym->aead.digest.phys_addr = 0L;
1286
1287         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1288                                 op->sym->sec_session);
1289         if (sess_priv->dir == DIR_ENC)
1290                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1291         else
1292                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1293         diff = len - mbuf->pkt_len;
1294         mbuf->pkt_len += diff;
1295         mbuf->data_len += diff;
1296
1297         return op;
1298 }
1299
1300 static inline struct rte_crypto_op *
1301 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1302 {
1303         struct qbman_fle *fle;
1304         struct rte_crypto_op *op;
1305         struct ctxt_priv *priv;
1306         struct rte_mbuf *dst, *src;
1307
1308         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1309                 return sec_simple_fd_to_mbuf(fd, driver_id);
1310
1311         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1312
1313         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1314                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1315
1316         /* we are using the first FLE entry to store Mbuf.
1317          * Currently we donot know which FLE has the mbuf stored.
1318          * So while retreiving we can go back 1 FLE from the FD -ADDR
1319          * to get the MBUF Addr from the previous FLE.
1320          * We can have a better approach to use the inline Mbuf
1321          */
1322
1323         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1324                 /* TODO complete it. */
1325                 DPAA2_SEC_ERR("error: non inline buffer");
1326                 return NULL;
1327         }
1328         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1329
1330         /* Prefeth op */
1331         src = op->sym->m_src;
1332         rte_prefetch0(src);
1333
1334         if (op->sym->m_dst) {
1335                 dst = op->sym->m_dst;
1336                 rte_prefetch0(dst);
1337         } else
1338                 dst = src;
1339
1340         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1341                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1342                         get_sec_session_private_data(op->sym->sec_session);
1343                 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1344                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1345                         dst->pkt_len = len;
1346                         dst->data_len = len;
1347                 }
1348         }
1349
1350         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1351                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1352                 (void *)dst,
1353                 dst->buf_addr,
1354                 DPAA2_GET_FD_ADDR(fd),
1355                 DPAA2_GET_FD_BPID(fd),
1356                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1357                 DPAA2_GET_FD_OFFSET(fd),
1358                 DPAA2_GET_FD_LEN(fd));
1359
1360         /* free the fle memory */
1361         if (likely(rte_pktmbuf_is_contiguous(src))) {
1362                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1363                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1364         } else
1365                 rte_free((void *)(fle-1));
1366
1367         return op;
1368 }
1369
1370 static uint16_t
1371 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1372                         uint16_t nb_ops)
1373 {
1374         /* Function is responsible to receive frames for a given device and VQ*/
1375         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1376         struct rte_cryptodev *dev =
1377                         (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1378         struct qbman_result *dq_storage;
1379         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1380         int ret, num_rx = 0;
1381         uint8_t is_last = 0, status;
1382         struct qbman_swp *swp;
1383         const struct qbman_fd *fd;
1384         struct qbman_pull_desc pulldesc;
1385
1386         if (!DPAA2_PER_LCORE_DPIO) {
1387                 ret = dpaa2_affine_qbman_swp();
1388                 if (ret) {
1389                         DPAA2_SEC_ERR("Failure in affining portal");
1390                         return 0;
1391                 }
1392         }
1393         swp = DPAA2_PER_LCORE_PORTAL;
1394         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1395
1396         qbman_pull_desc_clear(&pulldesc);
1397         qbman_pull_desc_set_numframes(&pulldesc,
1398                                       (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1399                                       DPAA2_DQRR_RING_SIZE : nb_ops);
1400         qbman_pull_desc_set_fq(&pulldesc, fqid);
1401         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1402                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1403                                     1);
1404
1405         /*Issue a volatile dequeue command. */
1406         while (1) {
1407                 if (qbman_swp_pull(swp, &pulldesc)) {
1408                         DPAA2_SEC_WARN(
1409                                 "SEC VDQ command is not issued : QBMAN busy");
1410                         /* Portal was busy, try again */
1411                         continue;
1412                 }
1413                 break;
1414         };
1415
1416         /* Receive the packets till Last Dequeue entry is found with
1417          * respect to the above issues PULL command.
1418          */
1419         while (!is_last) {
1420                 /* Check if the previous issued command is completed.
1421                  * Also seems like the SWP is shared between the Ethernet Driver
1422                  * and the SEC driver.
1423                  */
1424                 while (!qbman_check_command_complete(dq_storage))
1425                         ;
1426
1427                 /* Loop until the dq_storage is updated with
1428                  * new token by QBMAN
1429                  */
1430                 while (!qbman_check_new_result(dq_storage))
1431                         ;
1432                 /* Check whether Last Pull command is Expired and
1433                  * setting Condition for Loop termination
1434                  */
1435                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1436                         is_last = 1;
1437                         /* Check for valid frame. */
1438                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1439                         if (unlikely(
1440                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1441                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1442                                 continue;
1443                         }
1444                 }
1445
1446                 fd = qbman_result_DQ_fd(dq_storage);
1447                 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1448
1449                 if (unlikely(fd->simple.frc)) {
1450                         /* TODO Parse SEC errors */
1451                         DPAA2_SEC_ERR("SEC returned Error - %x",
1452                                       fd->simple.frc);
1453                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1454                 } else {
1455                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1456                 }
1457
1458                 num_rx++;
1459                 dq_storage++;
1460         } /* End of Packet Rx loop */
1461
1462         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1463
1464         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1465         /*Return the total number of packets received to DPAA2 app*/
1466         return num_rx;
1467 }
1468
1469 /** Release queue pair */
1470 static int
1471 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1472 {
1473         struct dpaa2_sec_qp *qp =
1474                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1475
1476         PMD_INIT_FUNC_TRACE();
1477
1478         if (qp->rx_vq.q_storage) {
1479                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1480                 rte_free(qp->rx_vq.q_storage);
1481         }
1482         rte_free(qp);
1483
1484         dev->data->queue_pairs[queue_pair_id] = NULL;
1485
1486         return 0;
1487 }
1488
1489 /** Setup a queue pair */
1490 static int
1491 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1492                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1493                 __rte_unused int socket_id,
1494                 __rte_unused struct rte_mempool *session_pool)
1495 {
1496         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1497         struct dpaa2_sec_qp *qp;
1498         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1499         struct dpseci_rx_queue_cfg cfg;
1500         int32_t retcode;
1501
1502         PMD_INIT_FUNC_TRACE();
1503
1504         /* If qp is already in use free ring memory and qp metadata. */
1505         if (dev->data->queue_pairs[qp_id] != NULL) {
1506                 DPAA2_SEC_INFO("QP already setup");
1507                 return 0;
1508         }
1509
1510         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1511                     dev, qp_id, qp_conf);
1512
1513         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1514
1515         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1516                         RTE_CACHE_LINE_SIZE);
1517         if (!qp) {
1518                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1519                 return -1;
1520         }
1521
1522         qp->rx_vq.dev = dev;
1523         qp->tx_vq.dev = dev;
1524         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1525                 sizeof(struct queue_storage_info_t),
1526                 RTE_CACHE_LINE_SIZE);
1527         if (!qp->rx_vq.q_storage) {
1528                 DPAA2_SEC_ERR("malloc failed for q_storage");
1529                 return -1;
1530         }
1531         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1532
1533         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1534                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1535                 return -1;
1536         }
1537
1538         dev->data->queue_pairs[qp_id] = qp;
1539
1540         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1541         cfg.user_ctx = (size_t)(&qp->rx_vq);
1542         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1543                                       qp_id, &cfg);
1544         return retcode;
1545 }
1546
1547 /** Return the number of allocated queue pairs */
1548 static uint32_t
1549 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1550 {
1551         PMD_INIT_FUNC_TRACE();
1552
1553         return dev->data->nb_queue_pairs;
1554 }
1555
1556 /** Returns the size of the aesni gcm session structure */
1557 static unsigned int
1558 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1559 {
1560         PMD_INIT_FUNC_TRACE();
1561
1562         return sizeof(dpaa2_sec_session);
1563 }
1564
1565 static int
1566 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1567                       struct rte_crypto_sym_xform *xform,
1568                       dpaa2_sec_session *session)
1569 {
1570         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1571         struct alginfo cipherdata;
1572         int bufsize, i;
1573         struct ctxt_priv *priv;
1574         struct sec_flow_context *flc;
1575
1576         PMD_INIT_FUNC_TRACE();
1577
1578         /* For SEC CIPHER only one descriptor is required. */
1579         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1580                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1581                         RTE_CACHE_LINE_SIZE);
1582         if (priv == NULL) {
1583                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1584                 return -1;
1585         }
1586
1587         priv->fle_pool = dev_priv->fle_pool;
1588
1589         flc = &priv->flc_desc[0].flc;
1590
1591         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1592                         RTE_CACHE_LINE_SIZE);
1593         if (session->cipher_key.data == NULL) {
1594                 DPAA2_SEC_ERR("No Memory for cipher key");
1595                 rte_free(priv);
1596                 return -1;
1597         }
1598         session->cipher_key.length = xform->cipher.key.length;
1599
1600         memcpy(session->cipher_key.data, xform->cipher.key.data,
1601                xform->cipher.key.length);
1602         cipherdata.key = (size_t)session->cipher_key.data;
1603         cipherdata.keylen = session->cipher_key.length;
1604         cipherdata.key_enc_flags = 0;
1605         cipherdata.key_type = RTA_DATA_IMM;
1606
1607         /* Set IV parameters */
1608         session->iv.offset = xform->cipher.iv.offset;
1609         session->iv.length = xform->cipher.iv.length;
1610
1611         switch (xform->cipher.algo) {
1612         case RTE_CRYPTO_CIPHER_AES_CBC:
1613                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1614                 cipherdata.algmode = OP_ALG_AAI_CBC;
1615                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1616                 break;
1617         case RTE_CRYPTO_CIPHER_3DES_CBC:
1618                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1619                 cipherdata.algmode = OP_ALG_AAI_CBC;
1620                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1621                 break;
1622         case RTE_CRYPTO_CIPHER_AES_CTR:
1623                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1624                 cipherdata.algmode = OP_ALG_AAI_CTR;
1625                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1626                 break;
1627         case RTE_CRYPTO_CIPHER_3DES_CTR:
1628         case RTE_CRYPTO_CIPHER_AES_ECB:
1629         case RTE_CRYPTO_CIPHER_3DES_ECB:
1630         case RTE_CRYPTO_CIPHER_AES_XTS:
1631         case RTE_CRYPTO_CIPHER_AES_F8:
1632         case RTE_CRYPTO_CIPHER_ARC4:
1633         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1634         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1635         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1636         case RTE_CRYPTO_CIPHER_NULL:
1637                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1638                         xform->cipher.algo);
1639                 goto error_out;
1640         default:
1641                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1642                         xform->cipher.algo);
1643                 goto error_out;
1644         }
1645         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1646                                 DIR_ENC : DIR_DEC;
1647
1648         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1649                                         &cipherdata, NULL, session->iv.length,
1650                                         session->dir);
1651         if (bufsize < 0) {
1652                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1653                 goto error_out;
1654         }
1655         flc->dhr = 0;
1656         flc->bpv0 = 0x1;
1657         flc->mode_bits = 0x8000;
1658
1659         flc->word1_sdl = (uint8_t)bufsize;
1660         flc->word2_rflc_31_0 = lower_32_bits(
1661                         (size_t)&(((struct dpaa2_sec_qp *)
1662                         dev->data->queue_pairs[0])->rx_vq));
1663         flc->word3_rflc_63_32 = upper_32_bits(
1664                         (size_t)&(((struct dpaa2_sec_qp *)
1665                         dev->data->queue_pairs[0])->rx_vq));
1666         session->ctxt = priv;
1667
1668         for (i = 0; i < bufsize; i++)
1669                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1670
1671         return 0;
1672
1673 error_out:
1674         rte_free(session->cipher_key.data);
1675         rte_free(priv);
1676         return -1;
1677 }
1678
1679 static int
1680 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1681                     struct rte_crypto_sym_xform *xform,
1682                     dpaa2_sec_session *session)
1683 {
1684         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1685         struct alginfo authdata;
1686         int bufsize, i;
1687         struct ctxt_priv *priv;
1688         struct sec_flow_context *flc;
1689
1690         PMD_INIT_FUNC_TRACE();
1691
1692         /* For SEC AUTH three descriptors are required for various stages */
1693         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1694                         sizeof(struct ctxt_priv) + 3 *
1695                         sizeof(struct sec_flc_desc),
1696                         RTE_CACHE_LINE_SIZE);
1697         if (priv == NULL) {
1698                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1699                 return -1;
1700         }
1701
1702         priv->fle_pool = dev_priv->fle_pool;
1703         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1704
1705         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1706                         RTE_CACHE_LINE_SIZE);
1707         if (session->auth_key.data == NULL) {
1708                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1709                 rte_free(priv);
1710                 return -1;
1711         }
1712         session->auth_key.length = xform->auth.key.length;
1713
1714         memcpy(session->auth_key.data, xform->auth.key.data,
1715                xform->auth.key.length);
1716         authdata.key = (size_t)session->auth_key.data;
1717         authdata.keylen = session->auth_key.length;
1718         authdata.key_enc_flags = 0;
1719         authdata.key_type = RTA_DATA_IMM;
1720
1721         session->digest_length = xform->auth.digest_length;
1722
1723         switch (xform->auth.algo) {
1724         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1725                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1726                 authdata.algmode = OP_ALG_AAI_HMAC;
1727                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1728                 break;
1729         case RTE_CRYPTO_AUTH_MD5_HMAC:
1730                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1731                 authdata.algmode = OP_ALG_AAI_HMAC;
1732                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1733                 break;
1734         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1735                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1736                 authdata.algmode = OP_ALG_AAI_HMAC;
1737                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1738                 break;
1739         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1740                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1741                 authdata.algmode = OP_ALG_AAI_HMAC;
1742                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1743                 break;
1744         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1745                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1746                 authdata.algmode = OP_ALG_AAI_HMAC;
1747                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1748                 break;
1749         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1750                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1751                 authdata.algmode = OP_ALG_AAI_HMAC;
1752                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1753                 break;
1754         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1755         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1756         case RTE_CRYPTO_AUTH_NULL:
1757         case RTE_CRYPTO_AUTH_SHA1:
1758         case RTE_CRYPTO_AUTH_SHA256:
1759         case RTE_CRYPTO_AUTH_SHA512:
1760         case RTE_CRYPTO_AUTH_SHA224:
1761         case RTE_CRYPTO_AUTH_SHA384:
1762         case RTE_CRYPTO_AUTH_MD5:
1763         case RTE_CRYPTO_AUTH_AES_GMAC:
1764         case RTE_CRYPTO_AUTH_KASUMI_F9:
1765         case RTE_CRYPTO_AUTH_AES_CMAC:
1766         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1767         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1768                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1769                               xform->auth.algo);
1770                 goto error_out;
1771         default:
1772                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1773                               xform->auth.algo);
1774                 goto error_out;
1775         }
1776         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1777                                 DIR_ENC : DIR_DEC;
1778
1779         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1780                                    1, 0, &authdata, !session->dir,
1781                                    session->digest_length);
1782         if (bufsize < 0) {
1783                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1784                 goto error_out;
1785         }
1786
1787         flc->word1_sdl = (uint8_t)bufsize;
1788         flc->word2_rflc_31_0 = lower_32_bits(
1789                         (size_t)&(((struct dpaa2_sec_qp *)
1790                         dev->data->queue_pairs[0])->rx_vq));
1791         flc->word3_rflc_63_32 = upper_32_bits(
1792                         (size_t)&(((struct dpaa2_sec_qp *)
1793                         dev->data->queue_pairs[0])->rx_vq));
1794         session->ctxt = priv;
1795         for (i = 0; i < bufsize; i++)
1796                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1797                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1798
1799
1800         return 0;
1801
1802 error_out:
1803         rte_free(session->auth_key.data);
1804         rte_free(priv);
1805         return -1;
1806 }
1807
1808 static int
1809 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1810                     struct rte_crypto_sym_xform *xform,
1811                     dpaa2_sec_session *session)
1812 {
1813         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1814         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1815         struct alginfo aeaddata;
1816         int bufsize, i;
1817         struct ctxt_priv *priv;
1818         struct sec_flow_context *flc;
1819         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1820         int err;
1821
1822         PMD_INIT_FUNC_TRACE();
1823
1824         /* Set IV parameters */
1825         session->iv.offset = aead_xform->iv.offset;
1826         session->iv.length = aead_xform->iv.length;
1827         session->ctxt_type = DPAA2_SEC_AEAD;
1828
1829         /* For SEC AEAD only one descriptor is required */
1830         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1831                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1832                         RTE_CACHE_LINE_SIZE);
1833         if (priv == NULL) {
1834                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1835                 return -1;
1836         }
1837
1838         priv->fle_pool = dev_priv->fle_pool;
1839         flc = &priv->flc_desc[0].flc;
1840
1841         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1842                                                RTE_CACHE_LINE_SIZE);
1843         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1844                 DPAA2_SEC_ERR("No Memory for aead key");
1845                 rte_free(priv);
1846                 return -1;
1847         }
1848         memcpy(session->aead_key.data, aead_xform->key.data,
1849                aead_xform->key.length);
1850
1851         session->digest_length = aead_xform->digest_length;
1852         session->aead_key.length = aead_xform->key.length;
1853         ctxt->auth_only_len = aead_xform->aad_length;
1854
1855         aeaddata.key = (size_t)session->aead_key.data;
1856         aeaddata.keylen = session->aead_key.length;
1857         aeaddata.key_enc_flags = 0;
1858         aeaddata.key_type = RTA_DATA_IMM;
1859
1860         switch (aead_xform->algo) {
1861         case RTE_CRYPTO_AEAD_AES_GCM:
1862                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1863                 aeaddata.algmode = OP_ALG_AAI_GCM;
1864                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1865                 break;
1866         case RTE_CRYPTO_AEAD_AES_CCM:
1867                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1868                               aead_xform->algo);
1869                 goto error_out;
1870         default:
1871                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1872                               aead_xform->algo);
1873                 goto error_out;
1874         }
1875         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1876                                 DIR_ENC : DIR_DEC;
1877
1878         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1879         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1880                                MIN_JOB_DESC_SIZE,
1881                                (unsigned int *)priv->flc_desc[0].desc,
1882                                &priv->flc_desc[0].desc[1], 1);
1883
1884         if (err < 0) {
1885                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1886                 goto error_out;
1887         }
1888         if (priv->flc_desc[0].desc[1] & 1) {
1889                 aeaddata.key_type = RTA_DATA_IMM;
1890         } else {
1891                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1892                 aeaddata.key_type = RTA_DATA_PTR;
1893         }
1894         priv->flc_desc[0].desc[0] = 0;
1895         priv->flc_desc[0].desc[1] = 0;
1896
1897         if (session->dir == DIR_ENC)
1898                 bufsize = cnstr_shdsc_gcm_encap(
1899                                 priv->flc_desc[0].desc, 1, 0,
1900                                 &aeaddata, session->iv.length,
1901                                 session->digest_length);
1902         else
1903                 bufsize = cnstr_shdsc_gcm_decap(
1904                                 priv->flc_desc[0].desc, 1, 0,
1905                                 &aeaddata, session->iv.length,
1906                                 session->digest_length);
1907         if (bufsize < 0) {
1908                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1909                 goto error_out;
1910         }
1911
1912         flc->word1_sdl = (uint8_t)bufsize;
1913         flc->word2_rflc_31_0 = lower_32_bits(
1914                         (size_t)&(((struct dpaa2_sec_qp *)
1915                         dev->data->queue_pairs[0])->rx_vq));
1916         flc->word3_rflc_63_32 = upper_32_bits(
1917                         (size_t)&(((struct dpaa2_sec_qp *)
1918                         dev->data->queue_pairs[0])->rx_vq));
1919         session->ctxt = priv;
1920         for (i = 0; i < bufsize; i++)
1921                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1922                             i, priv->flc_desc[0].desc[i]);
1923
1924         return 0;
1925
1926 error_out:
1927         rte_free(session->aead_key.data);
1928         rte_free(priv);
1929         return -1;
1930 }
1931
1932
1933 static int
1934 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1935                     struct rte_crypto_sym_xform *xform,
1936                     dpaa2_sec_session *session)
1937 {
1938         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1939         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1940         struct alginfo authdata, cipherdata;
1941         int bufsize, i;
1942         struct ctxt_priv *priv;
1943         struct sec_flow_context *flc;
1944         struct rte_crypto_cipher_xform *cipher_xform;
1945         struct rte_crypto_auth_xform *auth_xform;
1946         int err;
1947
1948         PMD_INIT_FUNC_TRACE();
1949
1950         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1951                 cipher_xform = &xform->cipher;
1952                 auth_xform = &xform->next->auth;
1953                 session->ctxt_type =
1954                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1955                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1956         } else {
1957                 cipher_xform = &xform->next->cipher;
1958                 auth_xform = &xform->auth;
1959                 session->ctxt_type =
1960                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1961                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1962         }
1963
1964         /* Set IV parameters */
1965         session->iv.offset = cipher_xform->iv.offset;
1966         session->iv.length = cipher_xform->iv.length;
1967
1968         /* For SEC AEAD only one descriptor is required */
1969         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1970                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1971                         RTE_CACHE_LINE_SIZE);
1972         if (priv == NULL) {
1973                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1974                 return -1;
1975         }
1976
1977         priv->fle_pool = dev_priv->fle_pool;
1978         flc = &priv->flc_desc[0].flc;
1979
1980         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1981                                                RTE_CACHE_LINE_SIZE);
1982         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1983                 DPAA2_SEC_ERR("No Memory for cipher key");
1984                 rte_free(priv);
1985                 return -1;
1986         }
1987         session->cipher_key.length = cipher_xform->key.length;
1988         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1989                                              RTE_CACHE_LINE_SIZE);
1990         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1991                 DPAA2_SEC_ERR("No Memory for auth key");
1992                 rte_free(session->cipher_key.data);
1993                 rte_free(priv);
1994                 return -1;
1995         }
1996         session->auth_key.length = auth_xform->key.length;
1997         memcpy(session->cipher_key.data, cipher_xform->key.data,
1998                cipher_xform->key.length);
1999         memcpy(session->auth_key.data, auth_xform->key.data,
2000                auth_xform->key.length);
2001
2002         authdata.key = (size_t)session->auth_key.data;
2003         authdata.keylen = session->auth_key.length;
2004         authdata.key_enc_flags = 0;
2005         authdata.key_type = RTA_DATA_IMM;
2006
2007         session->digest_length = auth_xform->digest_length;
2008
2009         switch (auth_xform->algo) {
2010         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2011                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2012                 authdata.algmode = OP_ALG_AAI_HMAC;
2013                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2014                 break;
2015         case RTE_CRYPTO_AUTH_MD5_HMAC:
2016                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2017                 authdata.algmode = OP_ALG_AAI_HMAC;
2018                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2019                 break;
2020         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2021                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2022                 authdata.algmode = OP_ALG_AAI_HMAC;
2023                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2024                 break;
2025         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2026                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2027                 authdata.algmode = OP_ALG_AAI_HMAC;
2028                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2029                 break;
2030         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2031                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2032                 authdata.algmode = OP_ALG_AAI_HMAC;
2033                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2034                 break;
2035         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2036                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2037                 authdata.algmode = OP_ALG_AAI_HMAC;
2038                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2039                 break;
2040         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2041         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2042         case RTE_CRYPTO_AUTH_NULL:
2043         case RTE_CRYPTO_AUTH_SHA1:
2044         case RTE_CRYPTO_AUTH_SHA256:
2045         case RTE_CRYPTO_AUTH_SHA512:
2046         case RTE_CRYPTO_AUTH_SHA224:
2047         case RTE_CRYPTO_AUTH_SHA384:
2048         case RTE_CRYPTO_AUTH_MD5:
2049         case RTE_CRYPTO_AUTH_AES_GMAC:
2050         case RTE_CRYPTO_AUTH_KASUMI_F9:
2051         case RTE_CRYPTO_AUTH_AES_CMAC:
2052         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2053         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2054                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2055                               auth_xform->algo);
2056                 goto error_out;
2057         default:
2058                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2059                               auth_xform->algo);
2060                 goto error_out;
2061         }
2062         cipherdata.key = (size_t)session->cipher_key.data;
2063         cipherdata.keylen = session->cipher_key.length;
2064         cipherdata.key_enc_flags = 0;
2065         cipherdata.key_type = RTA_DATA_IMM;
2066
2067         switch (cipher_xform->algo) {
2068         case RTE_CRYPTO_CIPHER_AES_CBC:
2069                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2070                 cipherdata.algmode = OP_ALG_AAI_CBC;
2071                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2072                 break;
2073         case RTE_CRYPTO_CIPHER_3DES_CBC:
2074                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2075                 cipherdata.algmode = OP_ALG_AAI_CBC;
2076                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2077                 break;
2078         case RTE_CRYPTO_CIPHER_AES_CTR:
2079                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2080                 cipherdata.algmode = OP_ALG_AAI_CTR;
2081                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2082                 break;
2083         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2084         case RTE_CRYPTO_CIPHER_NULL:
2085         case RTE_CRYPTO_CIPHER_3DES_ECB:
2086         case RTE_CRYPTO_CIPHER_AES_ECB:
2087         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2088                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2089                               cipher_xform->algo);
2090                 goto error_out;
2091         default:
2092                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2093                               cipher_xform->algo);
2094                 goto error_out;
2095         }
2096         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2097                                 DIR_ENC : DIR_DEC;
2098
2099         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2100         priv->flc_desc[0].desc[1] = authdata.keylen;
2101         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2102                                MIN_JOB_DESC_SIZE,
2103                                (unsigned int *)priv->flc_desc[0].desc,
2104                                &priv->flc_desc[0].desc[2], 2);
2105
2106         if (err < 0) {
2107                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2108                 goto error_out;
2109         }
2110         if (priv->flc_desc[0].desc[2] & 1) {
2111                 cipherdata.key_type = RTA_DATA_IMM;
2112         } else {
2113                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2114                 cipherdata.key_type = RTA_DATA_PTR;
2115         }
2116         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2117                 authdata.key_type = RTA_DATA_IMM;
2118         } else {
2119                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2120                 authdata.key_type = RTA_DATA_PTR;
2121         }
2122         priv->flc_desc[0].desc[0] = 0;
2123         priv->flc_desc[0].desc[1] = 0;
2124         priv->flc_desc[0].desc[2] = 0;
2125
2126         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2127                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2128                                               0, &cipherdata, &authdata,
2129                                               session->iv.length,
2130                                               ctxt->auth_only_len,
2131                                               session->digest_length,
2132                                               session->dir);
2133                 if (bufsize < 0) {
2134                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2135                         goto error_out;
2136                 }
2137         } else {
2138                 DPAA2_SEC_ERR("Hash before cipher not supported");
2139                 goto error_out;
2140         }
2141
2142         flc->word1_sdl = (uint8_t)bufsize;
2143         flc->word2_rflc_31_0 = lower_32_bits(
2144                         (size_t)&(((struct dpaa2_sec_qp *)
2145                         dev->data->queue_pairs[0])->rx_vq));
2146         flc->word3_rflc_63_32 = upper_32_bits(
2147                         (size_t)&(((struct dpaa2_sec_qp *)
2148                         dev->data->queue_pairs[0])->rx_vq));
2149         session->ctxt = priv;
2150         for (i = 0; i < bufsize; i++)
2151                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2152                             i, priv->flc_desc[0].desc[i]);
2153
2154         return 0;
2155
2156 error_out:
2157         rte_free(session->cipher_key.data);
2158         rte_free(session->auth_key.data);
2159         rte_free(priv);
2160         return -1;
2161 }
2162
2163 static int
2164 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2165                             struct rte_crypto_sym_xform *xform, void *sess)
2166 {
2167         dpaa2_sec_session *session = sess;
2168
2169         PMD_INIT_FUNC_TRACE();
2170
2171         if (unlikely(sess == NULL)) {
2172                 DPAA2_SEC_ERR("Invalid session struct");
2173                 return -1;
2174         }
2175
2176         memset(session, 0, sizeof(dpaa2_sec_session));
2177         /* Default IV length = 0 */
2178         session->iv.length = 0;
2179
2180         /* Cipher Only */
2181         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2182                 session->ctxt_type = DPAA2_SEC_CIPHER;
2183                 dpaa2_sec_cipher_init(dev, xform, session);
2184
2185         /* Authentication Only */
2186         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2187                    xform->next == NULL) {
2188                 session->ctxt_type = DPAA2_SEC_AUTH;
2189                 dpaa2_sec_auth_init(dev, xform, session);
2190
2191         /* Cipher then Authenticate */
2192         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2193                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2194                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2195                 dpaa2_sec_aead_chain_init(dev, xform, session);
2196
2197         /* Authenticate then Cipher */
2198         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2199                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2200                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2201                 dpaa2_sec_aead_chain_init(dev, xform, session);
2202
2203         /* AEAD operation for AES-GCM kind of Algorithms */
2204         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2205                    xform->next == NULL) {
2206                 dpaa2_sec_aead_init(dev, xform, session);
2207
2208         } else {
2209                 DPAA2_SEC_ERR("Invalid crypto type");
2210                 return -EINVAL;
2211         }
2212
2213         return 0;
2214 }
2215
2216 static int
2217 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2218                             struct rte_security_session_conf *conf,
2219                             void *sess)
2220 {
2221         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2222         struct rte_crypto_auth_xform *auth_xform;
2223         struct rte_crypto_cipher_xform *cipher_xform;
2224         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2225         struct ctxt_priv *priv;
2226         struct ipsec_encap_pdb encap_pdb;
2227         struct ipsec_decap_pdb decap_pdb;
2228         struct alginfo authdata, cipherdata;
2229         int bufsize;
2230         struct sec_flow_context *flc;
2231         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2232
2233         PMD_INIT_FUNC_TRACE();
2234
2235         memset(session, 0, sizeof(dpaa2_sec_session));
2236         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2237                 cipher_xform = &conf->crypto_xform->cipher;
2238                 auth_xform = &conf->crypto_xform->next->auth;
2239         } else {
2240                 auth_xform = &conf->crypto_xform->auth;
2241                 cipher_xform = &conf->crypto_xform->next->cipher;
2242         }
2243         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2244                                 sizeof(struct ctxt_priv) +
2245                                 sizeof(struct sec_flc_desc),
2246                                 RTE_CACHE_LINE_SIZE);
2247
2248         if (priv == NULL) {
2249                 DPAA2_SEC_ERR("No memory for priv CTXT");
2250                 return -ENOMEM;
2251         }
2252
2253         priv->fle_pool = dev_priv->fle_pool;
2254         flc = &priv->flc_desc[0].flc;
2255
2256         session->ctxt_type = DPAA2_SEC_IPSEC;
2257         session->cipher_key.data = rte_zmalloc(NULL,
2258                                                cipher_xform->key.length,
2259                                                RTE_CACHE_LINE_SIZE);
2260         if (session->cipher_key.data == NULL &&
2261                         cipher_xform->key.length > 0) {
2262                 DPAA2_SEC_ERR("No Memory for cipher key");
2263                 rte_free(priv);
2264                 return -ENOMEM;
2265         }
2266
2267         session->cipher_key.length = cipher_xform->key.length;
2268         session->auth_key.data = rte_zmalloc(NULL,
2269                                         auth_xform->key.length,
2270                                         RTE_CACHE_LINE_SIZE);
2271         if (session->auth_key.data == NULL &&
2272                         auth_xform->key.length > 0) {
2273                 DPAA2_SEC_ERR("No Memory for auth key");
2274                 rte_free(session->cipher_key.data);
2275                 rte_free(priv);
2276                 return -ENOMEM;
2277         }
2278         session->auth_key.length = auth_xform->key.length;
2279         memcpy(session->cipher_key.data, cipher_xform->key.data,
2280                         cipher_xform->key.length);
2281         memcpy(session->auth_key.data, auth_xform->key.data,
2282                         auth_xform->key.length);
2283
2284         authdata.key = (size_t)session->auth_key.data;
2285         authdata.keylen = session->auth_key.length;
2286         authdata.key_enc_flags = 0;
2287         authdata.key_type = RTA_DATA_IMM;
2288         switch (auth_xform->algo) {
2289         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2290                 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2291                 authdata.algmode = OP_ALG_AAI_HMAC;
2292                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2293                 break;
2294         case RTE_CRYPTO_AUTH_MD5_HMAC:
2295                 authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2296                 authdata.algmode = OP_ALG_AAI_HMAC;
2297                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2298                 break;
2299         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2300                 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2301                 authdata.algmode = OP_ALG_AAI_HMAC;
2302                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2303                 break;
2304         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2305                 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2306                 authdata.algmode = OP_ALG_AAI_HMAC;
2307                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2308                 break;
2309         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2310                 authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2311                 authdata.algmode = OP_ALG_AAI_HMAC;
2312                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2313                 break;
2314         case RTE_CRYPTO_AUTH_AES_CMAC:
2315                 authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2316                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2317                 break;
2318         case RTE_CRYPTO_AUTH_NULL:
2319                 authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2320                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2321                 break;
2322         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2323         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2324         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2325         case RTE_CRYPTO_AUTH_SHA1:
2326         case RTE_CRYPTO_AUTH_SHA256:
2327         case RTE_CRYPTO_AUTH_SHA512:
2328         case RTE_CRYPTO_AUTH_SHA224:
2329         case RTE_CRYPTO_AUTH_SHA384:
2330         case RTE_CRYPTO_AUTH_MD5:
2331         case RTE_CRYPTO_AUTH_AES_GMAC:
2332         case RTE_CRYPTO_AUTH_KASUMI_F9:
2333         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2334         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2335                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2336                               auth_xform->algo);
2337                 goto out;
2338         default:
2339                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2340                               auth_xform->algo);
2341                 goto out;
2342         }
2343         cipherdata.key = (size_t)session->cipher_key.data;
2344         cipherdata.keylen = session->cipher_key.length;
2345         cipherdata.key_enc_flags = 0;
2346         cipherdata.key_type = RTA_DATA_IMM;
2347
2348         switch (cipher_xform->algo) {
2349         case RTE_CRYPTO_CIPHER_AES_CBC:
2350                 cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2351                 cipherdata.algmode = OP_ALG_AAI_CBC;
2352                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2353                 break;
2354         case RTE_CRYPTO_CIPHER_3DES_CBC:
2355                 cipherdata.algtype = OP_PCL_IPSEC_3DES;
2356                 cipherdata.algmode = OP_ALG_AAI_CBC;
2357                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2358                 break;
2359         case RTE_CRYPTO_CIPHER_AES_CTR:
2360                 cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2361                 cipherdata.algmode = OP_ALG_AAI_CTR;
2362                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2363                 break;
2364         case RTE_CRYPTO_CIPHER_NULL:
2365                 cipherdata.algtype = OP_PCL_IPSEC_NULL;
2366                 break;
2367         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2368         case RTE_CRYPTO_CIPHER_3DES_ECB:
2369         case RTE_CRYPTO_CIPHER_AES_ECB:
2370         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2371                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2372                               cipher_xform->algo);
2373                 goto out;
2374         default:
2375                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2376                               cipher_xform->algo);
2377                 goto out;
2378         }
2379
2380         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2381                 struct ip ip4_hdr;
2382
2383                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2384                 ip4_hdr.ip_v = IPVERSION;
2385                 ip4_hdr.ip_hl = 5;
2386                 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2387                 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2388                 ip4_hdr.ip_id = 0;
2389                 ip4_hdr.ip_off = 0;
2390                 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2391                 ip4_hdr.ip_p = 0x32;
2392                 ip4_hdr.ip_sum = 0;
2393                 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2394                 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2395                 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2396                         sizeof(struct ip));
2397
2398                 /* For Sec Proto only one descriptor is required. */
2399                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2400                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2401                         PDBOPTS_ESP_OIHI_PDB_INL |
2402                         PDBOPTS_ESP_IVSRC |
2403                         PDBHMO_ESP_ENCAP_DTTL |
2404                         PDBHMO_ESP_SNR;
2405                 encap_pdb.spi = ipsec_xform->spi;
2406                 encap_pdb.ip_hdr_len = sizeof(struct ip);
2407
2408                 session->dir = DIR_ENC;
2409                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2410                                 1, 0, SHR_SERIAL, &encap_pdb,
2411                                 (uint8_t *)&ip4_hdr,
2412                                 &cipherdata, &authdata);
2413         } else if (ipsec_xform->direction ==
2414                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2415                 flc->dhr = SEC_FLC_DHR_INBOUND;
2416                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2417                 decap_pdb.options = sizeof(struct ip) << 16;
2418                 session->dir = DIR_DEC;
2419                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2420                                 1, 0, SHR_SERIAL,
2421                                 &decap_pdb, &cipherdata, &authdata);
2422         } else
2423                 goto out;
2424
2425         if (bufsize < 0) {
2426                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2427                 goto out;
2428         }
2429
2430         flc->word1_sdl = (uint8_t)bufsize;
2431
2432         /* Enable the stashing control bit */
2433         DPAA2_SET_FLC_RSC(flc);
2434         flc->word2_rflc_31_0 = lower_32_bits(
2435                         (size_t)&(((struct dpaa2_sec_qp *)
2436                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2437         flc->word3_rflc_63_32 = upper_32_bits(
2438                         (size_t)&(((struct dpaa2_sec_qp *)
2439                         dev->data->queue_pairs[0])->rx_vq));
2440
2441         /* Set EWS bit i.e. enable write-safe */
2442         DPAA2_SET_FLC_EWS(flc);
2443         /* Set BS = 1 i.e reuse input buffers as output buffers */
2444         DPAA2_SET_FLC_REUSE_BS(flc);
2445         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2446         DPAA2_SET_FLC_REUSE_FF(flc);
2447
2448         session->ctxt = priv;
2449
2450         return 0;
2451 out:
2452         rte_free(session->auth_key.data);
2453         rte_free(session->cipher_key.data);
2454         rte_free(priv);
2455         return -1;
2456 }
2457
2458 static int
2459 dpaa2_sec_security_session_create(void *dev,
2460                                   struct rte_security_session_conf *conf,
2461                                   struct rte_security_session *sess,
2462                                   struct rte_mempool *mempool)
2463 {
2464         void *sess_private_data;
2465         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2466         int ret;
2467
2468         if (rte_mempool_get(mempool, &sess_private_data)) {
2469                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2470                 return -ENOMEM;
2471         }
2472
2473         switch (conf->protocol) {
2474         case RTE_SECURITY_PROTOCOL_IPSEC:
2475                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2476                                 sess_private_data);
2477                 break;
2478         case RTE_SECURITY_PROTOCOL_MACSEC:
2479                 return -ENOTSUP;
2480         default:
2481                 return -EINVAL;
2482         }
2483         if (ret != 0) {
2484                 DPAA2_SEC_ERR("Failed to configure session parameters");
2485                 /* Return session to mempool */
2486                 rte_mempool_put(mempool, sess_private_data);
2487                 return ret;
2488         }
2489
2490         set_sec_session_private_data(sess, sess_private_data);
2491
2492         return ret;
2493 }
2494
2495 /** Clear the memory of session so it doesn't leave key material behind */
2496 static int
2497 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2498                 struct rte_security_session *sess)
2499 {
2500         PMD_INIT_FUNC_TRACE();
2501         void *sess_priv = get_sec_session_private_data(sess);
2502
2503         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2504
2505         if (sess_priv) {
2506                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2507
2508                 rte_free(s->ctxt);
2509                 rte_free(s->cipher_key.data);
2510                 rte_free(s->auth_key.data);
2511                 memset(sess, 0, sizeof(dpaa2_sec_session));
2512                 set_sec_session_private_data(sess, NULL);
2513                 rte_mempool_put(sess_mp, sess_priv);
2514         }
2515         return 0;
2516 }
2517
2518 static int
2519 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2520                 struct rte_crypto_sym_xform *xform,
2521                 struct rte_cryptodev_sym_session *sess,
2522                 struct rte_mempool *mempool)
2523 {
2524         void *sess_private_data;
2525         int ret;
2526
2527         if (rte_mempool_get(mempool, &sess_private_data)) {
2528                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2529                 return -ENOMEM;
2530         }
2531
2532         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2533         if (ret != 0) {
2534                 DPAA2_SEC_ERR("Failed to configure session parameters");
2535                 /* Return session to mempool */
2536                 rte_mempool_put(mempool, sess_private_data);
2537                 return ret;
2538         }
2539
2540         set_sym_session_private_data(sess, dev->driver_id,
2541                 sess_private_data);
2542
2543         return 0;
2544 }
2545
2546 /** Clear the memory of session so it doesn't leave key material behind */
2547 static void
2548 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2549                 struct rte_cryptodev_sym_session *sess)
2550 {
2551         PMD_INIT_FUNC_TRACE();
2552         uint8_t index = dev->driver_id;
2553         void *sess_priv = get_sym_session_private_data(sess, index);
2554         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2555
2556         if (sess_priv) {
2557                 rte_free(s->ctxt);
2558                 rte_free(s->cipher_key.data);
2559                 rte_free(s->auth_key.data);
2560                 memset(sess, 0, sizeof(dpaa2_sec_session));
2561                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2562                 set_sym_session_private_data(sess, index, NULL);
2563                 rte_mempool_put(sess_mp, sess_priv);
2564         }
2565 }
2566
2567 static int
2568 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2569                         struct rte_cryptodev_config *config __rte_unused)
2570 {
2571         PMD_INIT_FUNC_TRACE();
2572
2573         return 0;
2574 }
2575
2576 static int
2577 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2578 {
2579         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2580         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2581         struct dpseci_attr attr;
2582         struct dpaa2_queue *dpaa2_q;
2583         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2584                                         dev->data->queue_pairs;
2585         struct dpseci_rx_queue_attr rx_attr;
2586         struct dpseci_tx_queue_attr tx_attr;
2587         int ret, i;
2588
2589         PMD_INIT_FUNC_TRACE();
2590
2591         memset(&attr, 0, sizeof(struct dpseci_attr));
2592
2593         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2594         if (ret) {
2595                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2596                               priv->hw_id);
2597                 goto get_attr_failure;
2598         }
2599         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2600         if (ret) {
2601                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2602                 goto get_attr_failure;
2603         }
2604         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2605                 dpaa2_q = &qp[i]->rx_vq;
2606                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2607                                     &rx_attr);
2608                 dpaa2_q->fqid = rx_attr.fqid;
2609                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2610         }
2611         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2612                 dpaa2_q = &qp[i]->tx_vq;
2613                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2614                                     &tx_attr);
2615                 dpaa2_q->fqid = tx_attr.fqid;
2616                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2617         }
2618
2619         return 0;
2620 get_attr_failure:
2621         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2622         return -1;
2623 }
2624
2625 static void
2626 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2627 {
2628         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2629         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2630         int ret;
2631
2632         PMD_INIT_FUNC_TRACE();
2633
2634         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2635         if (ret) {
2636                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2637                              priv->hw_id);
2638                 return;
2639         }
2640
2641         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2642         if (ret < 0) {
2643                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2644                 return;
2645         }
2646 }
2647
2648 static int
2649 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2650 {
2651         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2652         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2653         int ret;
2654
2655         PMD_INIT_FUNC_TRACE();
2656
2657         /* Function is reverse of dpaa2_sec_dev_init.
2658          * It does the following:
2659          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2660          * 2. Close the DPSECI device
2661          * 3. Free the allocated resources.
2662          */
2663
2664         /*Close the device at underlying layer*/
2665         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2666         if (ret) {
2667                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2668                 return -1;
2669         }
2670
2671         /*Free the allocated memory for ethernet private data and dpseci*/
2672         priv->hw = NULL;
2673         rte_free(dpseci);
2674
2675         return 0;
2676 }
2677
2678 static void
2679 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2680                         struct rte_cryptodev_info *info)
2681 {
2682         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2683
2684         PMD_INIT_FUNC_TRACE();
2685         if (info != NULL) {
2686                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2687                 info->feature_flags = dev->feature_flags;
2688                 info->capabilities = dpaa2_sec_capabilities;
2689                 /* No limit of number of sessions */
2690                 info->sym.max_nb_sessions = 0;
2691                 info->driver_id = cryptodev_driver_id;
2692         }
2693 }
2694
2695 static
2696 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2697                          struct rte_cryptodev_stats *stats)
2698 {
2699         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2700         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2701         struct dpseci_sec_counters counters = {0};
2702         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2703                                         dev->data->queue_pairs;
2704         int ret, i;
2705
2706         PMD_INIT_FUNC_TRACE();
2707         if (stats == NULL) {
2708                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
2709                 return;
2710         }
2711         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2712                 if (qp[i] == NULL) {
2713                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
2714                         continue;
2715                 }
2716
2717                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2718                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2719                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2720                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2721         }
2722
2723         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2724                                       &counters);
2725         if (ret) {
2726                 DPAA2_SEC_ERR("SEC counters failed");
2727         } else {
2728                 DPAA2_SEC_INFO("dpseci hardware stats:"
2729                             "\n\tNum of Requests Dequeued = %" PRIu64
2730                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2731                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2732                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2733                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
2734                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2735                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2736                             counters.dequeued_requests,
2737                             counters.ob_enc_requests,
2738                             counters.ib_dec_requests,
2739                             counters.ob_enc_bytes,
2740                             counters.ob_prot_bytes,
2741                             counters.ib_dec_bytes,
2742                             counters.ib_valid_bytes);
2743         }
2744 }
2745
2746 static
2747 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2748 {
2749         int i;
2750         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2751                                    (dev->data->queue_pairs);
2752
2753         PMD_INIT_FUNC_TRACE();
2754
2755         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2756                 if (qp[i] == NULL) {
2757                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
2758                         continue;
2759                 }
2760                 qp[i]->tx_vq.rx_pkts = 0;
2761                 qp[i]->tx_vq.tx_pkts = 0;
2762                 qp[i]->tx_vq.err_pkts = 0;
2763                 qp[i]->rx_vq.rx_pkts = 0;
2764                 qp[i]->rx_vq.tx_pkts = 0;
2765                 qp[i]->rx_vq.err_pkts = 0;
2766         }
2767 }
2768
2769 static struct rte_cryptodev_ops crypto_ops = {
2770         .dev_configure        = dpaa2_sec_dev_configure,
2771         .dev_start            = dpaa2_sec_dev_start,
2772         .dev_stop             = dpaa2_sec_dev_stop,
2773         .dev_close            = dpaa2_sec_dev_close,
2774         .dev_infos_get        = dpaa2_sec_dev_infos_get,
2775         .stats_get            = dpaa2_sec_stats_get,
2776         .stats_reset          = dpaa2_sec_stats_reset,
2777         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
2778         .queue_pair_release   = dpaa2_sec_queue_pair_release,
2779         .queue_pair_count     = dpaa2_sec_queue_pair_count,
2780         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
2781         .sym_session_configure    = dpaa2_sec_sym_session_configure,
2782         .sym_session_clear        = dpaa2_sec_sym_session_clear,
2783 };
2784
2785 static const struct rte_security_capability *
2786 dpaa2_sec_capabilities_get(void *device __rte_unused)
2787 {
2788         return dpaa2_sec_security_cap;
2789 }
2790
2791 struct rte_security_ops dpaa2_sec_security_ops = {
2792         .session_create = dpaa2_sec_security_session_create,
2793         .session_update = NULL,
2794         .session_stats_get = NULL,
2795         .session_destroy = dpaa2_sec_security_session_destroy,
2796         .set_pkt_metadata = NULL,
2797         .capabilities_get = dpaa2_sec_capabilities_get
2798 };
2799
2800 static int
2801 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2802 {
2803         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2804
2805         rte_free(dev->security_ctx);
2806
2807         rte_mempool_free(internals->fle_pool);
2808
2809         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2810                        dev->data->name, rte_socket_id());
2811
2812         return 0;
2813 }
2814
2815 static int
2816 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2817 {
2818         struct dpaa2_sec_dev_private *internals;
2819         struct rte_device *dev = cryptodev->device;
2820         struct rte_dpaa2_device *dpaa2_dev;
2821         struct rte_security_ctx *security_instance;
2822         struct fsl_mc_io *dpseci;
2823         uint16_t token;
2824         struct dpseci_attr attr;
2825         int retcode, hw_id;
2826         char str[20];
2827
2828         PMD_INIT_FUNC_TRACE();
2829         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2830         if (dpaa2_dev == NULL) {
2831                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
2832                 return -1;
2833         }
2834         hw_id = dpaa2_dev->object_id;
2835
2836         cryptodev->driver_id = cryptodev_driver_id;
2837         cryptodev->dev_ops = &crypto_ops;
2838
2839         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2840         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2841         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2842                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2843                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2844                         RTE_CRYPTODEV_FF_SECURITY |
2845                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2846                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2847                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2848                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2849                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2850
2851         internals = cryptodev->data->dev_private;
2852
2853         /*
2854          * For secondary processes, we don't initialise any further as primary
2855          * has already done this work. Only check we don't need a different
2856          * RX function
2857          */
2858         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2859                 DPAA2_SEC_DEBUG("Device already init by primary process");
2860                 return 0;
2861         }
2862
2863         /* Initialize security_ctx only for primary process*/
2864         security_instance = rte_malloc("rte_security_instances_ops",
2865                                 sizeof(struct rte_security_ctx), 0);
2866         if (security_instance == NULL)
2867                 return -ENOMEM;
2868         security_instance->device = (void *)cryptodev;
2869         security_instance->ops = &dpaa2_sec_security_ops;
2870         security_instance->sess_cnt = 0;
2871         cryptodev->security_ctx = security_instance;
2872
2873         /*Open the rte device via MC and save the handle for further use*/
2874         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2875                                 sizeof(struct fsl_mc_io), 0);
2876         if (!dpseci) {
2877                 DPAA2_SEC_ERR(
2878                         "Error in allocating the memory for dpsec object");
2879                 return -1;
2880         }
2881         dpseci->regs = rte_mcp_ptr_list[0];
2882
2883         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2884         if (retcode != 0) {
2885                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2886                               retcode);
2887                 goto init_error;
2888         }
2889         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2890         if (retcode != 0) {
2891                 DPAA2_SEC_ERR(
2892                              "Cannot get dpsec device attributed: Error = %x",
2893                              retcode);
2894                 goto init_error;
2895         }
2896         sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2897
2898         internals->max_nb_queue_pairs = attr.num_tx_queues;
2899         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2900         internals->hw = dpseci;
2901         internals->token = token;
2902
2903         sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2904         internals->fle_pool = rte_mempool_create((const char *)str,
2905                         FLE_POOL_NUM_BUFS,
2906                         FLE_POOL_BUF_SIZE,
2907                         FLE_POOL_CACHE_SIZE, 0,
2908                         NULL, NULL, NULL, NULL,
2909                         SOCKET_ID_ANY, 0);
2910         if (!internals->fle_pool) {
2911                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2912                 goto init_error;
2913         }
2914
2915         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2916         return 0;
2917
2918 init_error:
2919         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2920
2921         /* dpaa2_sec_uninit(crypto_dev_name); */
2922         return -EFAULT;
2923 }
2924
2925 static int
2926 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
2927                           struct rte_dpaa2_device *dpaa2_dev)
2928 {
2929         struct rte_cryptodev *cryptodev;
2930         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2931
2932         int retval;
2933
2934         sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2935
2936         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2937         if (cryptodev == NULL)
2938                 return -ENOMEM;
2939
2940         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2941                 cryptodev->data->dev_private = rte_zmalloc_socket(
2942                                         "cryptodev private structure",
2943                                         sizeof(struct dpaa2_sec_dev_private),
2944                                         RTE_CACHE_LINE_SIZE,
2945                                         rte_socket_id());
2946
2947                 if (cryptodev->data->dev_private == NULL)
2948                         rte_panic("Cannot allocate memzone for private "
2949                                   "device data");
2950         }
2951
2952         dpaa2_dev->cryptodev = cryptodev;
2953         cryptodev->device = &dpaa2_dev->device;
2954
2955         /* init user callbacks */
2956         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2957
2958         /* Invoke PMD device initialization function */
2959         retval = dpaa2_sec_dev_init(cryptodev);
2960         if (retval == 0)
2961                 return 0;
2962
2963         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2964                 rte_free(cryptodev->data->dev_private);
2965
2966         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2967
2968         return -ENXIO;
2969 }
2970
2971 static int
2972 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2973 {
2974         struct rte_cryptodev *cryptodev;
2975         int ret;
2976
2977         cryptodev = dpaa2_dev->cryptodev;
2978         if (cryptodev == NULL)
2979                 return -ENODEV;
2980
2981         ret = dpaa2_sec_uninit(cryptodev);
2982         if (ret)
2983                 return ret;
2984
2985         return rte_cryptodev_pmd_destroy(cryptodev);
2986 }
2987
2988 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2989         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2990         .drv_type = DPAA2_CRYPTO,
2991         .driver = {
2992                 .name = "DPAA2 SEC PMD"
2993         },
2994         .probe = cryptodev_dpaa2_sec_probe,
2995         .remove = cryptodev_dpaa2_sec_remove,
2996 };
2997
2998 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2999
3000 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3001 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3002                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3003
3004 RTE_INIT(dpaa2_sec_init_log)
3005 {
3006         /* Bus level logs */
3007         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3008         if (dpaa2_logtype_sec >= 0)
3009                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3010 }