drivers/crypto: enable ESN in NXP drivers
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_cryptodev.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpopr.h>
28 #include <fsl_dpseci.h>
29 #include <fsl_mc_sys.h>
30
31 #include "dpaa2_sec_priv.h"
32 #include "dpaa2_sec_event.h"
33 #include "dpaa2_sec_logs.h"
34
35 /* Required types */
36 typedef uint64_t        dma_addr_t;
37
38 /* RTA header files */
39 #include <hw/desc/ipsec.h>
40 #include <hw/desc/pdcp.h>
41 #include <hw/desc/algo.h>
42
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44  * a pointer to the shared descriptor
45  */
46 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID           0x1957
48 #define FSL_DEVICE_ID           0x410
49 #define FSL_SUBSYSTEM_SEC       1
50 #define FSL_MC_DPSECI_DEVID     3
51
52 #define NO_PREFETCH 0
53 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
54 #define FLE_POOL_NUM_BUFS       32000
55 #define FLE_POOL_BUF_SIZE       256
56 #define FLE_POOL_CACHE_SIZE     512
57 #define FLE_SG_MEM_SIZE         2048
58 #define SEC_FLC_DHR_OUTBOUND    -114
59 #define SEC_FLC_DHR_INBOUND     0
60
61 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
62
63 static uint8_t cryptodev_driver_id;
64
65 int dpaa2_logtype_sec;
66
67 static inline int
68 build_proto_compound_fd(dpaa2_sec_session *sess,
69                struct rte_crypto_op *op,
70                struct qbman_fd *fd, uint16_t bpid)
71 {
72         struct rte_crypto_sym_op *sym_op = op->sym;
73         struct ctxt_priv *priv = sess->ctxt;
74         struct qbman_fle *fle, *ip_fle, *op_fle;
75         struct sec_flow_context *flc;
76         struct rte_mbuf *src_mbuf = sym_op->m_src;
77         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
78         int retval;
79
80         if (!dst_mbuf)
81                 dst_mbuf = src_mbuf;
82
83         /* Save the shared descriptor */
84         flc = &priv->flc_desc[0].flc;
85
86         /* we are using the first FLE entry to store Mbuf */
87         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
88         if (retval) {
89                 DPAA2_SEC_ERR("Memory alloc failed");
90                 return -1;
91         }
92         memset(fle, 0, FLE_POOL_BUF_SIZE);
93         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
94         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
95
96         op_fle = fle + 1;
97         ip_fle = fle + 2;
98
99         if (likely(bpid < MAX_BPID)) {
100                 DPAA2_SET_FD_BPID(fd, bpid);
101                 DPAA2_SET_FLE_BPID(op_fle, bpid);
102                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
103         } else {
104                 DPAA2_SET_FD_IVP(fd);
105                 DPAA2_SET_FLE_IVP(op_fle);
106                 DPAA2_SET_FLE_IVP(ip_fle);
107         }
108
109         /* Configure FD as a FRAME LIST */
110         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111         DPAA2_SET_FD_COMPOUND_FMT(fd);
112         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113
114         /* Configure Output FLE with dst mbuf data  */
115         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
116         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
117         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
118
119         /* Configure Input FLE with src mbuf data */
120         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
121         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
122         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
123
124         DPAA2_SET_FD_LEN(fd, ip_fle->length);
125         DPAA2_SET_FLE_FIN(ip_fle);
126
127 #ifdef ENABLE_HFN_OVERRIDE
128         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
129                 /*enable HFN override override */
130                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
131                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
132                 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
133         }
134 #endif
135
136         return 0;
137
138 }
139
140 static inline int
141 build_proto_fd(dpaa2_sec_session *sess,
142                struct rte_crypto_op *op,
143                struct qbman_fd *fd, uint16_t bpid)
144 {
145         struct rte_crypto_sym_op *sym_op = op->sym;
146         if (sym_op->m_dst)
147                 return build_proto_compound_fd(sess, op, fd, bpid);
148
149         struct ctxt_priv *priv = sess->ctxt;
150         struct sec_flow_context *flc;
151         struct rte_mbuf *mbuf = sym_op->m_src;
152
153         if (likely(bpid < MAX_BPID))
154                 DPAA2_SET_FD_BPID(fd, bpid);
155         else
156                 DPAA2_SET_FD_IVP(fd);
157
158         /* Save the shared descriptor */
159         flc = &priv->flc_desc[0].flc;
160
161         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
162         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
163         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
164         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
165
166         /* save physical address of mbuf */
167         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
168         mbuf->buf_iova = (size_t)op;
169
170         return 0;
171 }
172
173 static inline int
174 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
175                  struct rte_crypto_op *op,
176                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
177 {
178         struct rte_crypto_sym_op *sym_op = op->sym;
179         struct ctxt_priv *priv = sess->ctxt;
180         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
181         struct sec_flow_context *flc;
182         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
183         int icv_len = sess->digest_length;
184         uint8_t *old_icv;
185         struct rte_mbuf *mbuf;
186         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
187                         sess->iv.offset);
188
189         PMD_INIT_FUNC_TRACE();
190
191         if (sym_op->m_dst)
192                 mbuf = sym_op->m_dst;
193         else
194                 mbuf = sym_op->m_src;
195
196         /* first FLE entry used to store mbuf and session ctxt */
197         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
198                         RTE_CACHE_LINE_SIZE);
199         if (unlikely(!fle)) {
200                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
201                 return -1;
202         }
203         memset(fle, 0, FLE_SG_MEM_SIZE);
204         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
205         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
206
207         op_fle = fle + 1;
208         ip_fle = fle + 2;
209         sge = fle + 3;
210
211         /* Save the shared descriptor */
212         flc = &priv->flc_desc[0].flc;
213
214         /* Configure FD as a FRAME LIST */
215         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
216         DPAA2_SET_FD_COMPOUND_FMT(fd);
217         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
218
219         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
220                    "iv-len=%d data_off: 0x%x\n",
221                    sym_op->aead.data.offset,
222                    sym_op->aead.data.length,
223                    sess->digest_length,
224                    sess->iv.length,
225                    sym_op->m_src->data_off);
226
227         /* Configure Output FLE with Scatter/Gather Entry */
228         DPAA2_SET_FLE_SG_EXT(op_fle);
229         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
230
231         if (auth_only_len)
232                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
233
234         op_fle->length = (sess->dir == DIR_ENC) ?
235                         (sym_op->aead.data.length + icv_len + auth_only_len) :
236                         sym_op->aead.data.length + auth_only_len;
237
238         /* Configure Output SGE for Encap/Decap */
239         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
240         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
241                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
242         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
243
244         mbuf = mbuf->next;
245         /* o/p segs */
246         while (mbuf) {
247                 sge++;
248                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
249                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
250                 sge->length = mbuf->data_len;
251                 mbuf = mbuf->next;
252         }
253         sge->length -= icv_len;
254
255         if (sess->dir == DIR_ENC) {
256                 sge++;
257                 DPAA2_SET_FLE_ADDR(sge,
258                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
259                 sge->length = icv_len;
260         }
261         DPAA2_SET_FLE_FIN(sge);
262
263         sge++;
264         mbuf = sym_op->m_src;
265
266         /* Configure Input FLE with Scatter/Gather Entry */
267         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
268         DPAA2_SET_FLE_SG_EXT(ip_fle);
269         DPAA2_SET_FLE_FIN(ip_fle);
270         ip_fle->length = (sess->dir == DIR_ENC) ?
271                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
272                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
273                  icv_len);
274
275         /* Configure Input SGE for Encap/Decap */
276         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
277         sge->length = sess->iv.length;
278
279         sge++;
280         if (auth_only_len) {
281                 DPAA2_SET_FLE_ADDR(sge,
282                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
283                 sge->length = auth_only_len;
284                 sge++;
285         }
286
287         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
288         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
289                                 mbuf->data_off);
290         sge->length = mbuf->data_len - sym_op->aead.data.offset;
291
292         mbuf = mbuf->next;
293         /* i/p segs */
294         while (mbuf) {
295                 sge++;
296                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
297                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
298                 sge->length = mbuf->data_len;
299                 mbuf = mbuf->next;
300         }
301
302         if (sess->dir == DIR_DEC) {
303                 sge++;
304                 old_icv = (uint8_t *)(sge + 1);
305                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
306                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
307                 sge->length = icv_len;
308         }
309
310         DPAA2_SET_FLE_FIN(sge);
311         if (auth_only_len) {
312                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
313                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
314         }
315         DPAA2_SET_FD_LEN(fd, ip_fle->length);
316
317         return 0;
318 }
319
320 static inline int
321 build_authenc_gcm_fd(dpaa2_sec_session *sess,
322                      struct rte_crypto_op *op,
323                      struct qbman_fd *fd, uint16_t bpid)
324 {
325         struct rte_crypto_sym_op *sym_op = op->sym;
326         struct ctxt_priv *priv = sess->ctxt;
327         struct qbman_fle *fle, *sge;
328         struct sec_flow_context *flc;
329         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
330         int icv_len = sess->digest_length, retval;
331         uint8_t *old_icv;
332         struct rte_mbuf *dst;
333         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
334                         sess->iv.offset);
335
336         PMD_INIT_FUNC_TRACE();
337
338         if (sym_op->m_dst)
339                 dst = sym_op->m_dst;
340         else
341                 dst = sym_op->m_src;
342
343         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
344          * Currently we donot know which FLE has the mbuf stored.
345          * So while retreiving we can go back 1 FLE from the FD -ADDR
346          * to get the MBUF Addr from the previous FLE.
347          * We can have a better approach to use the inline Mbuf
348          */
349         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
350         if (retval) {
351                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
352                 return -1;
353         }
354         memset(fle, 0, FLE_POOL_BUF_SIZE);
355         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
356         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
357         fle = fle + 1;
358         sge = fle + 2;
359         if (likely(bpid < MAX_BPID)) {
360                 DPAA2_SET_FD_BPID(fd, bpid);
361                 DPAA2_SET_FLE_BPID(fle, bpid);
362                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
363                 DPAA2_SET_FLE_BPID(sge, bpid);
364                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
365                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
366                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
367         } else {
368                 DPAA2_SET_FD_IVP(fd);
369                 DPAA2_SET_FLE_IVP(fle);
370                 DPAA2_SET_FLE_IVP((fle + 1));
371                 DPAA2_SET_FLE_IVP(sge);
372                 DPAA2_SET_FLE_IVP((sge + 1));
373                 DPAA2_SET_FLE_IVP((sge + 2));
374                 DPAA2_SET_FLE_IVP((sge + 3));
375         }
376
377         /* Save the shared descriptor */
378         flc = &priv->flc_desc[0].flc;
379         /* Configure FD as a FRAME LIST */
380         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
381         DPAA2_SET_FD_COMPOUND_FMT(fd);
382         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
383
384         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
385                    "iv-len=%d data_off: 0x%x\n",
386                    sym_op->aead.data.offset,
387                    sym_op->aead.data.length,
388                    sess->digest_length,
389                    sess->iv.length,
390                    sym_op->m_src->data_off);
391
392         /* Configure Output FLE with Scatter/Gather Entry */
393         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
394         if (auth_only_len)
395                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
396         fle->length = (sess->dir == DIR_ENC) ?
397                         (sym_op->aead.data.length + icv_len + auth_only_len) :
398                         sym_op->aead.data.length + auth_only_len;
399
400         DPAA2_SET_FLE_SG_EXT(fle);
401
402         /* Configure Output SGE for Encap/Decap */
403         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
404         DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
405                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
406         sge->length = sym_op->aead.data.length + auth_only_len;
407
408         if (sess->dir == DIR_ENC) {
409                 sge++;
410                 DPAA2_SET_FLE_ADDR(sge,
411                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
412                 sge->length = sess->digest_length;
413                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
414                                         sess->iv.length + auth_only_len));
415         }
416         DPAA2_SET_FLE_FIN(sge);
417
418         sge++;
419         fle++;
420
421         /* Configure Input FLE with Scatter/Gather Entry */
422         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
423         DPAA2_SET_FLE_SG_EXT(fle);
424         DPAA2_SET_FLE_FIN(fle);
425         fle->length = (sess->dir == DIR_ENC) ?
426                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
427                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
428                  sess->digest_length);
429
430         /* Configure Input SGE for Encap/Decap */
431         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
432         sge->length = sess->iv.length;
433         sge++;
434         if (auth_only_len) {
435                 DPAA2_SET_FLE_ADDR(sge,
436                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
437                 sge->length = auth_only_len;
438                 DPAA2_SET_FLE_BPID(sge, bpid);
439                 sge++;
440         }
441
442         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
443         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
444                                 sym_op->m_src->data_off);
445         sge->length = sym_op->aead.data.length;
446         if (sess->dir == DIR_DEC) {
447                 sge++;
448                 old_icv = (uint8_t *)(sge + 1);
449                 memcpy(old_icv, sym_op->aead.digest.data,
450                        sess->digest_length);
451                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
452                 sge->length = sess->digest_length;
453                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
454                                  sess->digest_length +
455                                  sess->iv.length +
456                                  auth_only_len));
457         }
458         DPAA2_SET_FLE_FIN(sge);
459
460         if (auth_only_len) {
461                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
462                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
463         }
464
465         return 0;
466 }
467
468 static inline int
469 build_authenc_sg_fd(dpaa2_sec_session *sess,
470                  struct rte_crypto_op *op,
471                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
472 {
473         struct rte_crypto_sym_op *sym_op = op->sym;
474         struct ctxt_priv *priv = sess->ctxt;
475         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
476         struct sec_flow_context *flc;
477         uint32_t auth_only_len = sym_op->auth.data.length -
478                                 sym_op->cipher.data.length;
479         int icv_len = sess->digest_length;
480         uint8_t *old_icv;
481         struct rte_mbuf *mbuf;
482         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
483                         sess->iv.offset);
484
485         PMD_INIT_FUNC_TRACE();
486
487         if (sym_op->m_dst)
488                 mbuf = sym_op->m_dst;
489         else
490                 mbuf = sym_op->m_src;
491
492         /* first FLE entry used to store mbuf and session ctxt */
493         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
494                         RTE_CACHE_LINE_SIZE);
495         if (unlikely(!fle)) {
496                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
497                 return -1;
498         }
499         memset(fle, 0, FLE_SG_MEM_SIZE);
500         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
501         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
502
503         op_fle = fle + 1;
504         ip_fle = fle + 2;
505         sge = fle + 3;
506
507         /* Save the shared descriptor */
508         flc = &priv->flc_desc[0].flc;
509
510         /* Configure FD as a FRAME LIST */
511         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
512         DPAA2_SET_FD_COMPOUND_FMT(fd);
513         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
514
515         DPAA2_SEC_DP_DEBUG(
516                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
517                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
518                 sym_op->auth.data.offset,
519                 sym_op->auth.data.length,
520                 sess->digest_length,
521                 sym_op->cipher.data.offset,
522                 sym_op->cipher.data.length,
523                 sess->iv.length,
524                 sym_op->m_src->data_off);
525
526         /* Configure Output FLE with Scatter/Gather Entry */
527         DPAA2_SET_FLE_SG_EXT(op_fle);
528         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
529
530         if (auth_only_len)
531                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
532
533         op_fle->length = (sess->dir == DIR_ENC) ?
534                         (sym_op->cipher.data.length + icv_len) :
535                         sym_op->cipher.data.length;
536
537         /* Configure Output SGE for Encap/Decap */
538         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
539         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
540         sge->length = mbuf->data_len - sym_op->auth.data.offset;
541
542         mbuf = mbuf->next;
543         /* o/p segs */
544         while (mbuf) {
545                 sge++;
546                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
547                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
548                 sge->length = mbuf->data_len;
549                 mbuf = mbuf->next;
550         }
551         sge->length -= icv_len;
552
553         if (sess->dir == DIR_ENC) {
554                 sge++;
555                 DPAA2_SET_FLE_ADDR(sge,
556                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
557                 sge->length = icv_len;
558         }
559         DPAA2_SET_FLE_FIN(sge);
560
561         sge++;
562         mbuf = sym_op->m_src;
563
564         /* Configure Input FLE with Scatter/Gather Entry */
565         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
566         DPAA2_SET_FLE_SG_EXT(ip_fle);
567         DPAA2_SET_FLE_FIN(ip_fle);
568         ip_fle->length = (sess->dir == DIR_ENC) ?
569                         (sym_op->auth.data.length + sess->iv.length) :
570                         (sym_op->auth.data.length + sess->iv.length +
571                          icv_len);
572
573         /* Configure Input SGE for Encap/Decap */
574         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
575         sge->length = sess->iv.length;
576
577         sge++;
578         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
579         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
580                                 mbuf->data_off);
581         sge->length = mbuf->data_len - sym_op->auth.data.offset;
582
583         mbuf = mbuf->next;
584         /* i/p segs */
585         while (mbuf) {
586                 sge++;
587                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
588                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
589                 sge->length = mbuf->data_len;
590                 mbuf = mbuf->next;
591         }
592         sge->length -= icv_len;
593
594         if (sess->dir == DIR_DEC) {
595                 sge++;
596                 old_icv = (uint8_t *)(sge + 1);
597                 memcpy(old_icv, sym_op->auth.digest.data,
598                        icv_len);
599                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
600                 sge->length = icv_len;
601         }
602
603         DPAA2_SET_FLE_FIN(sge);
604         if (auth_only_len) {
605                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
606                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
607         }
608         DPAA2_SET_FD_LEN(fd, ip_fle->length);
609
610         return 0;
611 }
612
613 static inline int
614 build_authenc_fd(dpaa2_sec_session *sess,
615                  struct rte_crypto_op *op,
616                  struct qbman_fd *fd, uint16_t bpid)
617 {
618         struct rte_crypto_sym_op *sym_op = op->sym;
619         struct ctxt_priv *priv = sess->ctxt;
620         struct qbman_fle *fle, *sge;
621         struct sec_flow_context *flc;
622         uint32_t auth_only_len = sym_op->auth.data.length -
623                                 sym_op->cipher.data.length;
624         int icv_len = sess->digest_length, retval;
625         uint8_t *old_icv;
626         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
627                         sess->iv.offset);
628         struct rte_mbuf *dst;
629
630         PMD_INIT_FUNC_TRACE();
631
632         if (sym_op->m_dst)
633                 dst = sym_op->m_dst;
634         else
635                 dst = sym_op->m_src;
636
637         /* we are using the first FLE entry to store Mbuf.
638          * Currently we donot know which FLE has the mbuf stored.
639          * So while retreiving we can go back 1 FLE from the FD -ADDR
640          * to get the MBUF Addr from the previous FLE.
641          * We can have a better approach to use the inline Mbuf
642          */
643         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
644         if (retval) {
645                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
646                 return -1;
647         }
648         memset(fle, 0, FLE_POOL_BUF_SIZE);
649         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
650         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
651         fle = fle + 1;
652         sge = fle + 2;
653         if (likely(bpid < MAX_BPID)) {
654                 DPAA2_SET_FD_BPID(fd, bpid);
655                 DPAA2_SET_FLE_BPID(fle, bpid);
656                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
657                 DPAA2_SET_FLE_BPID(sge, bpid);
658                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
659                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
660                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
661         } else {
662                 DPAA2_SET_FD_IVP(fd);
663                 DPAA2_SET_FLE_IVP(fle);
664                 DPAA2_SET_FLE_IVP((fle + 1));
665                 DPAA2_SET_FLE_IVP(sge);
666                 DPAA2_SET_FLE_IVP((sge + 1));
667                 DPAA2_SET_FLE_IVP((sge + 2));
668                 DPAA2_SET_FLE_IVP((sge + 3));
669         }
670
671         /* Save the shared descriptor */
672         flc = &priv->flc_desc[0].flc;
673         /* Configure FD as a FRAME LIST */
674         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
675         DPAA2_SET_FD_COMPOUND_FMT(fd);
676         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
677
678         DPAA2_SEC_DP_DEBUG(
679                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
680                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
681                 sym_op->auth.data.offset,
682                 sym_op->auth.data.length,
683                 sess->digest_length,
684                 sym_op->cipher.data.offset,
685                 sym_op->cipher.data.length,
686                 sess->iv.length,
687                 sym_op->m_src->data_off);
688
689         /* Configure Output FLE with Scatter/Gather Entry */
690         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
691         if (auth_only_len)
692                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
693         fle->length = (sess->dir == DIR_ENC) ?
694                         (sym_op->cipher.data.length + icv_len) :
695                         sym_op->cipher.data.length;
696
697         DPAA2_SET_FLE_SG_EXT(fle);
698
699         /* Configure Output SGE for Encap/Decap */
700         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
701         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
702                                 dst->data_off);
703         sge->length = sym_op->cipher.data.length;
704
705         if (sess->dir == DIR_ENC) {
706                 sge++;
707                 DPAA2_SET_FLE_ADDR(sge,
708                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
709                 sge->length = sess->digest_length;
710                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
711                                         sess->iv.length));
712         }
713         DPAA2_SET_FLE_FIN(sge);
714
715         sge++;
716         fle++;
717
718         /* Configure Input FLE with Scatter/Gather Entry */
719         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
720         DPAA2_SET_FLE_SG_EXT(fle);
721         DPAA2_SET_FLE_FIN(fle);
722         fle->length = (sess->dir == DIR_ENC) ?
723                         (sym_op->auth.data.length + sess->iv.length) :
724                         (sym_op->auth.data.length + sess->iv.length +
725                          sess->digest_length);
726
727         /* Configure Input SGE for Encap/Decap */
728         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
729         sge->length = sess->iv.length;
730         sge++;
731
732         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
733         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
734                                 sym_op->m_src->data_off);
735         sge->length = sym_op->auth.data.length;
736         if (sess->dir == DIR_DEC) {
737                 sge++;
738                 old_icv = (uint8_t *)(sge + 1);
739                 memcpy(old_icv, sym_op->auth.digest.data,
740                        sess->digest_length);
741                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
742                 sge->length = sess->digest_length;
743                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
744                                  sess->digest_length +
745                                  sess->iv.length));
746         }
747         DPAA2_SET_FLE_FIN(sge);
748         if (auth_only_len) {
749                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
750                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
751         }
752         return 0;
753 }
754
755 static inline int build_auth_sg_fd(
756                 dpaa2_sec_session *sess,
757                 struct rte_crypto_op *op,
758                 struct qbman_fd *fd,
759                 __rte_unused uint16_t bpid)
760 {
761         struct rte_crypto_sym_op *sym_op = op->sym;
762         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
763         struct sec_flow_context *flc;
764         struct ctxt_priv *priv = sess->ctxt;
765         uint8_t *old_digest;
766         struct rte_mbuf *mbuf;
767
768         PMD_INIT_FUNC_TRACE();
769
770         mbuf = sym_op->m_src;
771         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
772                         RTE_CACHE_LINE_SIZE);
773         if (unlikely(!fle)) {
774                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
775                 return -1;
776         }
777         memset(fle, 0, FLE_SG_MEM_SIZE);
778         /* first FLE entry used to store mbuf and session ctxt */
779         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
780         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
781         op_fle = fle + 1;
782         ip_fle = fle + 2;
783         sge = fle + 3;
784
785         flc = &priv->flc_desc[DESC_INITFINAL].flc;
786         /* sg FD */
787         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
788         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
789         DPAA2_SET_FD_COMPOUND_FMT(fd);
790
791         /* o/p fle */
792         DPAA2_SET_FLE_ADDR(op_fle,
793                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
794         op_fle->length = sess->digest_length;
795
796         /* i/p fle */
797         DPAA2_SET_FLE_SG_EXT(ip_fle);
798         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
799         /* i/p 1st seg */
800         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
801         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
802         sge->length = mbuf->data_len - sym_op->auth.data.offset;
803
804         /* i/p segs */
805         mbuf = mbuf->next;
806         while (mbuf) {
807                 sge++;
808                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
809                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
810                 sge->length = mbuf->data_len;
811                 mbuf = mbuf->next;
812         }
813         if (sess->dir == DIR_ENC) {
814                 /* Digest calculation case */
815                 sge->length -= sess->digest_length;
816                 ip_fle->length = sym_op->auth.data.length;
817         } else {
818                 /* Digest verification case */
819                 sge++;
820                 old_digest = (uint8_t *)(sge + 1);
821                 rte_memcpy(old_digest, sym_op->auth.digest.data,
822                            sess->digest_length);
823                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
824                 sge->length = sess->digest_length;
825                 ip_fle->length = sym_op->auth.data.length +
826                                 sess->digest_length;
827         }
828         DPAA2_SET_FLE_FIN(sge);
829         DPAA2_SET_FLE_FIN(ip_fle);
830         DPAA2_SET_FD_LEN(fd, ip_fle->length);
831
832         return 0;
833 }
834
835 static inline int
836 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
837               struct qbman_fd *fd, uint16_t bpid)
838 {
839         struct rte_crypto_sym_op *sym_op = op->sym;
840         struct qbman_fle *fle, *sge;
841         struct sec_flow_context *flc;
842         struct ctxt_priv *priv = sess->ctxt;
843         uint8_t *old_digest;
844         int retval;
845
846         PMD_INIT_FUNC_TRACE();
847
848         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
849         if (retval) {
850                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
851                 return -1;
852         }
853         memset(fle, 0, FLE_POOL_BUF_SIZE);
854         /* TODO we are using the first FLE entry to store Mbuf.
855          * Currently we donot know which FLE has the mbuf stored.
856          * So while retreiving we can go back 1 FLE from the FD -ADDR
857          * to get the MBUF Addr from the previous FLE.
858          * We can have a better approach to use the inline Mbuf
859          */
860         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
861         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
862         fle = fle + 1;
863
864         if (likely(bpid < MAX_BPID)) {
865                 DPAA2_SET_FD_BPID(fd, bpid);
866                 DPAA2_SET_FLE_BPID(fle, bpid);
867                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
868         } else {
869                 DPAA2_SET_FD_IVP(fd);
870                 DPAA2_SET_FLE_IVP(fle);
871                 DPAA2_SET_FLE_IVP((fle + 1));
872         }
873         flc = &priv->flc_desc[DESC_INITFINAL].flc;
874         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
875
876         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
877         fle->length = sess->digest_length;
878
879         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
880         DPAA2_SET_FD_COMPOUND_FMT(fd);
881         fle++;
882
883         if (sess->dir == DIR_ENC) {
884                 DPAA2_SET_FLE_ADDR(fle,
885                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
886                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
887                                      sym_op->m_src->data_off);
888                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
889                 fle->length = sym_op->auth.data.length;
890         } else {
891                 sge = fle + 2;
892                 DPAA2_SET_FLE_SG_EXT(fle);
893                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
894
895                 if (likely(bpid < MAX_BPID)) {
896                         DPAA2_SET_FLE_BPID(sge, bpid);
897                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
898                 } else {
899                         DPAA2_SET_FLE_IVP(sge);
900                         DPAA2_SET_FLE_IVP((sge + 1));
901                 }
902                 DPAA2_SET_FLE_ADDR(sge,
903                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
904                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
905                                      sym_op->m_src->data_off);
906
907                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
908                                  sess->digest_length);
909                 sge->length = sym_op->auth.data.length;
910                 sge++;
911                 old_digest = (uint8_t *)(sge + 1);
912                 rte_memcpy(old_digest, sym_op->auth.digest.data,
913                            sess->digest_length);
914                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
915                 sge->length = sess->digest_length;
916                 fle->length = sym_op->auth.data.length +
917                                 sess->digest_length;
918                 DPAA2_SET_FLE_FIN(sge);
919         }
920         DPAA2_SET_FLE_FIN(fle);
921
922         return 0;
923 }
924
925 static int
926 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
927                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
928 {
929         struct rte_crypto_sym_op *sym_op = op->sym;
930         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
931         struct sec_flow_context *flc;
932         struct ctxt_priv *priv = sess->ctxt;
933         struct rte_mbuf *mbuf;
934         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
935                         sess->iv.offset);
936
937         PMD_INIT_FUNC_TRACE();
938
939         if (sym_op->m_dst)
940                 mbuf = sym_op->m_dst;
941         else
942                 mbuf = sym_op->m_src;
943
944         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
945                         RTE_CACHE_LINE_SIZE);
946         if (!fle) {
947                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
948                 return -1;
949         }
950         memset(fle, 0, FLE_SG_MEM_SIZE);
951         /* first FLE entry used to store mbuf and session ctxt */
952         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
953         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
954
955         op_fle = fle + 1;
956         ip_fle = fle + 2;
957         sge = fle + 3;
958
959         flc = &priv->flc_desc[0].flc;
960
961         DPAA2_SEC_DP_DEBUG(
962                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
963                 " data_off: 0x%x\n",
964                 sym_op->cipher.data.offset,
965                 sym_op->cipher.data.length,
966                 sess->iv.length,
967                 sym_op->m_src->data_off);
968
969         /* o/p fle */
970         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
971         op_fle->length = sym_op->cipher.data.length;
972         DPAA2_SET_FLE_SG_EXT(op_fle);
973
974         /* o/p 1st seg */
975         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
976         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
977         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
978
979         mbuf = mbuf->next;
980         /* o/p segs */
981         while (mbuf) {
982                 sge++;
983                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
984                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
985                 sge->length = mbuf->data_len;
986                 mbuf = mbuf->next;
987         }
988         DPAA2_SET_FLE_FIN(sge);
989
990         DPAA2_SEC_DP_DEBUG(
991                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
992                 flc, fle, fle->addr_hi, fle->addr_lo,
993                 fle->length);
994
995         /* i/p fle */
996         mbuf = sym_op->m_src;
997         sge++;
998         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
999         ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
1000         DPAA2_SET_FLE_SG_EXT(ip_fle);
1001
1002         /* i/p IV */
1003         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1004         DPAA2_SET_FLE_OFFSET(sge, 0);
1005         sge->length = sess->iv.length;
1006
1007         sge++;
1008
1009         /* i/p 1st seg */
1010         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1011         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1012                              mbuf->data_off);
1013         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1014
1015         mbuf = mbuf->next;
1016         /* i/p segs */
1017         while (mbuf) {
1018                 sge++;
1019                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1020                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1021                 sge->length = mbuf->data_len;
1022                 mbuf = mbuf->next;
1023         }
1024         DPAA2_SET_FLE_FIN(sge);
1025         DPAA2_SET_FLE_FIN(ip_fle);
1026
1027         /* sg fd */
1028         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1029         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1030         DPAA2_SET_FD_COMPOUND_FMT(fd);
1031         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1032
1033         DPAA2_SEC_DP_DEBUG(
1034                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1035                 " off =%d, len =%d\n",
1036                 DPAA2_GET_FD_ADDR(fd),
1037                 DPAA2_GET_FD_BPID(fd),
1038                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1039                 DPAA2_GET_FD_OFFSET(fd),
1040                 DPAA2_GET_FD_LEN(fd));
1041         return 0;
1042 }
1043
1044 static int
1045 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1046                 struct qbman_fd *fd, uint16_t bpid)
1047 {
1048         struct rte_crypto_sym_op *sym_op = op->sym;
1049         struct qbman_fle *fle, *sge;
1050         int retval;
1051         struct sec_flow_context *flc;
1052         struct ctxt_priv *priv = sess->ctxt;
1053         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1054                         sess->iv.offset);
1055         struct rte_mbuf *dst;
1056
1057         PMD_INIT_FUNC_TRACE();
1058
1059         if (sym_op->m_dst)
1060                 dst = sym_op->m_dst;
1061         else
1062                 dst = sym_op->m_src;
1063
1064         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1065         if (retval) {
1066                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1067                 return -1;
1068         }
1069         memset(fle, 0, FLE_POOL_BUF_SIZE);
1070         /* TODO we are using the first FLE entry to store Mbuf.
1071          * Currently we donot know which FLE has the mbuf stored.
1072          * So while retreiving we can go back 1 FLE from the FD -ADDR
1073          * to get the MBUF Addr from the previous FLE.
1074          * We can have a better approach to use the inline Mbuf
1075          */
1076         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1077         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1078         fle = fle + 1;
1079         sge = fle + 2;
1080
1081         if (likely(bpid < MAX_BPID)) {
1082                 DPAA2_SET_FD_BPID(fd, bpid);
1083                 DPAA2_SET_FLE_BPID(fle, bpid);
1084                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1085                 DPAA2_SET_FLE_BPID(sge, bpid);
1086                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1087         } else {
1088                 DPAA2_SET_FD_IVP(fd);
1089                 DPAA2_SET_FLE_IVP(fle);
1090                 DPAA2_SET_FLE_IVP((fle + 1));
1091                 DPAA2_SET_FLE_IVP(sge);
1092                 DPAA2_SET_FLE_IVP((sge + 1));
1093         }
1094
1095         flc = &priv->flc_desc[0].flc;
1096         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1097         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1098                          sess->iv.length);
1099         DPAA2_SET_FD_COMPOUND_FMT(fd);
1100         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1101
1102         DPAA2_SEC_DP_DEBUG(
1103                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1104                 " data_off: 0x%x\n",
1105                 sym_op->cipher.data.offset,
1106                 sym_op->cipher.data.length,
1107                 sess->iv.length,
1108                 sym_op->m_src->data_off);
1109
1110         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1111         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1112                              dst->data_off);
1113
1114         fle->length = sym_op->cipher.data.length + sess->iv.length;
1115
1116         DPAA2_SEC_DP_DEBUG(
1117                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1118                 flc, fle, fle->addr_hi, fle->addr_lo,
1119                 fle->length);
1120
1121         fle++;
1122
1123         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1124         fle->length = sym_op->cipher.data.length + sess->iv.length;
1125
1126         DPAA2_SET_FLE_SG_EXT(fle);
1127
1128         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1129         sge->length = sess->iv.length;
1130
1131         sge++;
1132         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1133         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1134                              sym_op->m_src->data_off);
1135
1136         sge->length = sym_op->cipher.data.length;
1137         DPAA2_SET_FLE_FIN(sge);
1138         DPAA2_SET_FLE_FIN(fle);
1139
1140         DPAA2_SEC_DP_DEBUG(
1141                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1142                 " off =%d, len =%d\n",
1143                 DPAA2_GET_FD_ADDR(fd),
1144                 DPAA2_GET_FD_BPID(fd),
1145                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1146                 DPAA2_GET_FD_OFFSET(fd),
1147                 DPAA2_GET_FD_LEN(fd));
1148
1149         return 0;
1150 }
1151
1152 static inline int
1153 build_sec_fd(struct rte_crypto_op *op,
1154              struct qbman_fd *fd, uint16_t bpid)
1155 {
1156         int ret = -1;
1157         dpaa2_sec_session *sess;
1158
1159         PMD_INIT_FUNC_TRACE();
1160
1161         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1162                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1163                                 op->sym->session, cryptodev_driver_id);
1164         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1165                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1166                                 op->sym->sec_session);
1167         else
1168                 return -1;
1169
1170         /* Segmented buffer */
1171         if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1172                 switch (sess->ctxt_type) {
1173                 case DPAA2_SEC_CIPHER:
1174                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1175                         break;
1176                 case DPAA2_SEC_AUTH:
1177                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1178                         break;
1179                 case DPAA2_SEC_AEAD:
1180                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1181                         break;
1182                 case DPAA2_SEC_CIPHER_HASH:
1183                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1184                         break;
1185                 case DPAA2_SEC_HASH_CIPHER:
1186                 default:
1187                         DPAA2_SEC_ERR("error: Unsupported session");
1188                 }
1189         } else {
1190                 switch (sess->ctxt_type) {
1191                 case DPAA2_SEC_CIPHER:
1192                         ret = build_cipher_fd(sess, op, fd, bpid);
1193                         break;
1194                 case DPAA2_SEC_AUTH:
1195                         ret = build_auth_fd(sess, op, fd, bpid);
1196                         break;
1197                 case DPAA2_SEC_AEAD:
1198                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1199                         break;
1200                 case DPAA2_SEC_CIPHER_HASH:
1201                         ret = build_authenc_fd(sess, op, fd, bpid);
1202                         break;
1203                 case DPAA2_SEC_IPSEC:
1204                         ret = build_proto_fd(sess, op, fd, bpid);
1205                         break;
1206                 case DPAA2_SEC_PDCP:
1207                         ret = build_proto_compound_fd(sess, op, fd, bpid);
1208                         break;
1209                 case DPAA2_SEC_HASH_CIPHER:
1210                 default:
1211                         DPAA2_SEC_ERR("error: Unsupported session");
1212                 }
1213         }
1214         return ret;
1215 }
1216
1217 static uint16_t
1218 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1219                         uint16_t nb_ops)
1220 {
1221         /* Function to transmit the frames to given device and VQ*/
1222         uint32_t loop;
1223         int32_t ret;
1224         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1225         uint32_t frames_to_send;
1226         struct qbman_eq_desc eqdesc;
1227         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1228         struct qbman_swp *swp;
1229         uint16_t num_tx = 0;
1230         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1231         /*todo - need to support multiple buffer pools */
1232         uint16_t bpid;
1233         struct rte_mempool *mb_pool;
1234
1235         if (unlikely(nb_ops == 0))
1236                 return 0;
1237
1238         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1239                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1240                 return 0;
1241         }
1242         /*Prepare enqueue descriptor*/
1243         qbman_eq_desc_clear(&eqdesc);
1244         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1245         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1246         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1247
1248         if (!DPAA2_PER_LCORE_DPIO) {
1249                 ret = dpaa2_affine_qbman_swp();
1250                 if (ret) {
1251                         DPAA2_SEC_ERR("Failure in affining portal");
1252                         return 0;
1253                 }
1254         }
1255         swp = DPAA2_PER_LCORE_PORTAL;
1256
1257         while (nb_ops) {
1258                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1259                         dpaa2_eqcr_size : nb_ops;
1260
1261                 for (loop = 0; loop < frames_to_send; loop++) {
1262                         if ((*ops)->sym->m_src->seqn) {
1263                          uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1264
1265                          flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1266                          DPAA2_PER_LCORE_DQRR_SIZE--;
1267                          DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1268                          (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1269                         }
1270
1271                         /*Clear the unused FD fields before sending*/
1272                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1273                         mb_pool = (*ops)->sym->m_src->pool;
1274                         bpid = mempool_to_bpid(mb_pool);
1275                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1276                         if (ret) {
1277                                 DPAA2_SEC_ERR("error: Improper packet contents"
1278                                               " for crypto operation");
1279                                 goto skip_tx;
1280                         }
1281                         ops++;
1282                 }
1283                 loop = 0;
1284                 while (loop < frames_to_send) {
1285                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1286                                                         &fd_arr[loop],
1287                                                         &flags[loop],
1288                                                         frames_to_send - loop);
1289                 }
1290
1291                 num_tx += frames_to_send;
1292                 nb_ops -= frames_to_send;
1293         }
1294 skip_tx:
1295         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1296         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1297         return num_tx;
1298 }
1299
1300 static inline struct rte_crypto_op *
1301 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1302 {
1303         struct rte_crypto_op *op;
1304         uint16_t len = DPAA2_GET_FD_LEN(fd);
1305         uint16_t diff = 0;
1306         dpaa2_sec_session *sess_priv;
1307
1308         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1309                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1310                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1311
1312         diff = len - mbuf->pkt_len;
1313         mbuf->pkt_len += diff;
1314         mbuf->data_len += diff;
1315         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1316         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1317         op->sym->aead.digest.phys_addr = 0L;
1318
1319         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1320                                 op->sym->sec_session);
1321         if (sess_priv->dir == DIR_ENC)
1322                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1323         else
1324                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1325
1326         return op;
1327 }
1328
1329 static inline struct rte_crypto_op *
1330 sec_fd_to_mbuf(const struct qbman_fd *fd)
1331 {
1332         struct qbman_fle *fle;
1333         struct rte_crypto_op *op;
1334         struct ctxt_priv *priv;
1335         struct rte_mbuf *dst, *src;
1336
1337         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1338                 return sec_simple_fd_to_mbuf(fd);
1339
1340         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1341
1342         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1343                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1344
1345         /* we are using the first FLE entry to store Mbuf.
1346          * Currently we donot know which FLE has the mbuf stored.
1347          * So while retreiving we can go back 1 FLE from the FD -ADDR
1348          * to get the MBUF Addr from the previous FLE.
1349          * We can have a better approach to use the inline Mbuf
1350          */
1351
1352         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1353                 /* TODO complete it. */
1354                 DPAA2_SEC_ERR("error: non inline buffer");
1355                 return NULL;
1356         }
1357         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1358
1359         /* Prefeth op */
1360         src = op->sym->m_src;
1361         rte_prefetch0(src);
1362
1363         if (op->sym->m_dst) {
1364                 dst = op->sym->m_dst;
1365                 rte_prefetch0(dst);
1366         } else
1367                 dst = src;
1368
1369         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1370                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1371                         get_sec_session_private_data(op->sym->sec_session);
1372                 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1373                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1374                         dst->pkt_len = len;
1375                         dst->data_len = len;
1376                 }
1377         }
1378
1379         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1380                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1381                 (void *)dst,
1382                 dst->buf_addr,
1383                 DPAA2_GET_FD_ADDR(fd),
1384                 DPAA2_GET_FD_BPID(fd),
1385                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1386                 DPAA2_GET_FD_OFFSET(fd),
1387                 DPAA2_GET_FD_LEN(fd));
1388
1389         /* free the fle memory */
1390         if (likely(rte_pktmbuf_is_contiguous(src))) {
1391                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1392                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1393         } else
1394                 rte_free((void *)(fle-1));
1395
1396         return op;
1397 }
1398
1399 static uint16_t
1400 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1401                         uint16_t nb_ops)
1402 {
1403         /* Function is responsible to receive frames for a given device and VQ*/
1404         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1405         struct qbman_result *dq_storage;
1406         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1407         int ret, num_rx = 0;
1408         uint8_t is_last = 0, status;
1409         struct qbman_swp *swp;
1410         const struct qbman_fd *fd;
1411         struct qbman_pull_desc pulldesc;
1412
1413         if (!DPAA2_PER_LCORE_DPIO) {
1414                 ret = dpaa2_affine_qbman_swp();
1415                 if (ret) {
1416                         DPAA2_SEC_ERR("Failure in affining portal");
1417                         return 0;
1418                 }
1419         }
1420         swp = DPAA2_PER_LCORE_PORTAL;
1421         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1422
1423         qbman_pull_desc_clear(&pulldesc);
1424         qbman_pull_desc_set_numframes(&pulldesc,
1425                                       (nb_ops > dpaa2_dqrr_size) ?
1426                                       dpaa2_dqrr_size : nb_ops);
1427         qbman_pull_desc_set_fq(&pulldesc, fqid);
1428         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1429                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1430                                     1);
1431
1432         /*Issue a volatile dequeue command. */
1433         while (1) {
1434                 if (qbman_swp_pull(swp, &pulldesc)) {
1435                         DPAA2_SEC_WARN(
1436                                 "SEC VDQ command is not issued : QBMAN busy");
1437                         /* Portal was busy, try again */
1438                         continue;
1439                 }
1440                 break;
1441         };
1442
1443         /* Receive the packets till Last Dequeue entry is found with
1444          * respect to the above issues PULL command.
1445          */
1446         while (!is_last) {
1447                 /* Check if the previous issued command is completed.
1448                  * Also seems like the SWP is shared between the Ethernet Driver
1449                  * and the SEC driver.
1450                  */
1451                 while (!qbman_check_command_complete(dq_storage))
1452                         ;
1453
1454                 /* Loop until the dq_storage is updated with
1455                  * new token by QBMAN
1456                  */
1457                 while (!qbman_check_new_result(dq_storage))
1458                         ;
1459                 /* Check whether Last Pull command is Expired and
1460                  * setting Condition for Loop termination
1461                  */
1462                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1463                         is_last = 1;
1464                         /* Check for valid frame. */
1465                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1466                         if (unlikely(
1467                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1468                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1469                                 continue;
1470                         }
1471                 }
1472
1473                 fd = qbman_result_DQ_fd(dq_storage);
1474                 ops[num_rx] = sec_fd_to_mbuf(fd);
1475
1476                 if (unlikely(fd->simple.frc)) {
1477                         /* TODO Parse SEC errors */
1478                         DPAA2_SEC_ERR("SEC returned Error - %x",
1479                                       fd->simple.frc);
1480                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1481                 } else {
1482                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1483                 }
1484
1485                 num_rx++;
1486                 dq_storage++;
1487         } /* End of Packet Rx loop */
1488
1489         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1490
1491         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1492         /*Return the total number of packets received to DPAA2 app*/
1493         return num_rx;
1494 }
1495
1496 /** Release queue pair */
1497 static int
1498 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1499 {
1500         struct dpaa2_sec_qp *qp =
1501                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1502
1503         PMD_INIT_FUNC_TRACE();
1504
1505         if (qp->rx_vq.q_storage) {
1506                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1507                 rte_free(qp->rx_vq.q_storage);
1508         }
1509         rte_free(qp);
1510
1511         dev->data->queue_pairs[queue_pair_id] = NULL;
1512
1513         return 0;
1514 }
1515
1516 /** Setup a queue pair */
1517 static int
1518 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1519                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1520                 __rte_unused int socket_id)
1521 {
1522         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1523         struct dpaa2_sec_qp *qp;
1524         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1525         struct dpseci_rx_queue_cfg cfg;
1526         int32_t retcode;
1527
1528         PMD_INIT_FUNC_TRACE();
1529
1530         /* If qp is already in use free ring memory and qp metadata. */
1531         if (dev->data->queue_pairs[qp_id] != NULL) {
1532                 DPAA2_SEC_INFO("QP already setup");
1533                 return 0;
1534         }
1535
1536         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1537                     dev, qp_id, qp_conf);
1538
1539         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1540
1541         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1542                         RTE_CACHE_LINE_SIZE);
1543         if (!qp) {
1544                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1545                 return -1;
1546         }
1547
1548         qp->rx_vq.crypto_data = dev->data;
1549         qp->tx_vq.crypto_data = dev->data;
1550         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1551                 sizeof(struct queue_storage_info_t),
1552                 RTE_CACHE_LINE_SIZE);
1553         if (!qp->rx_vq.q_storage) {
1554                 DPAA2_SEC_ERR("malloc failed for q_storage");
1555                 return -1;
1556         }
1557         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1558
1559         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1560                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1561                 return -1;
1562         }
1563
1564         dev->data->queue_pairs[qp_id] = qp;
1565
1566         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1567         cfg.user_ctx = (size_t)(&qp->rx_vq);
1568         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1569                                       qp_id, &cfg);
1570         return retcode;
1571 }
1572
1573 /** Return the number of allocated queue pairs */
1574 static uint32_t
1575 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1576 {
1577         PMD_INIT_FUNC_TRACE();
1578
1579         return dev->data->nb_queue_pairs;
1580 }
1581
1582 /** Returns the size of the aesni gcm session structure */
1583 static unsigned int
1584 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1585 {
1586         PMD_INIT_FUNC_TRACE();
1587
1588         return sizeof(dpaa2_sec_session);
1589 }
1590
1591 static int
1592 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1593                       struct rte_crypto_sym_xform *xform,
1594                       dpaa2_sec_session *session)
1595 {
1596         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1597         struct alginfo cipherdata;
1598         int bufsize, i;
1599         struct ctxt_priv *priv;
1600         struct sec_flow_context *flc;
1601
1602         PMD_INIT_FUNC_TRACE();
1603
1604         /* For SEC CIPHER only one descriptor is required. */
1605         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1606                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1607                         RTE_CACHE_LINE_SIZE);
1608         if (priv == NULL) {
1609                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1610                 return -1;
1611         }
1612
1613         priv->fle_pool = dev_priv->fle_pool;
1614
1615         flc = &priv->flc_desc[0].flc;
1616
1617         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1618                         RTE_CACHE_LINE_SIZE);
1619         if (session->cipher_key.data == NULL) {
1620                 DPAA2_SEC_ERR("No Memory for cipher key");
1621                 rte_free(priv);
1622                 return -1;
1623         }
1624         session->cipher_key.length = xform->cipher.key.length;
1625
1626         memcpy(session->cipher_key.data, xform->cipher.key.data,
1627                xform->cipher.key.length);
1628         cipherdata.key = (size_t)session->cipher_key.data;
1629         cipherdata.keylen = session->cipher_key.length;
1630         cipherdata.key_enc_flags = 0;
1631         cipherdata.key_type = RTA_DATA_IMM;
1632
1633         /* Set IV parameters */
1634         session->iv.offset = xform->cipher.iv.offset;
1635         session->iv.length = xform->cipher.iv.length;
1636
1637         switch (xform->cipher.algo) {
1638         case RTE_CRYPTO_CIPHER_AES_CBC:
1639                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1640                 cipherdata.algmode = OP_ALG_AAI_CBC;
1641                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1642                 break;
1643         case RTE_CRYPTO_CIPHER_3DES_CBC:
1644                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1645                 cipherdata.algmode = OP_ALG_AAI_CBC;
1646                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1647                 break;
1648         case RTE_CRYPTO_CIPHER_AES_CTR:
1649                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1650                 cipherdata.algmode = OP_ALG_AAI_CTR;
1651                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1652                 break;
1653         case RTE_CRYPTO_CIPHER_3DES_CTR:
1654         case RTE_CRYPTO_CIPHER_AES_ECB:
1655         case RTE_CRYPTO_CIPHER_3DES_ECB:
1656         case RTE_CRYPTO_CIPHER_AES_XTS:
1657         case RTE_CRYPTO_CIPHER_AES_F8:
1658         case RTE_CRYPTO_CIPHER_ARC4:
1659         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1660         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1661         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1662         case RTE_CRYPTO_CIPHER_NULL:
1663                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1664                         xform->cipher.algo);
1665                 goto error_out;
1666         default:
1667                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1668                         xform->cipher.algo);
1669                 goto error_out;
1670         }
1671         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1672                                 DIR_ENC : DIR_DEC;
1673
1674         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1675                                         &cipherdata, NULL, session->iv.length,
1676                                         session->dir);
1677         if (bufsize < 0) {
1678                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1679                 goto error_out;
1680         }
1681
1682         flc->word1_sdl = (uint8_t)bufsize;
1683         session->ctxt = priv;
1684
1685         for (i = 0; i < bufsize; i++)
1686                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1687
1688         return 0;
1689
1690 error_out:
1691         rte_free(session->cipher_key.data);
1692         rte_free(priv);
1693         return -1;
1694 }
1695
1696 static int
1697 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1698                     struct rte_crypto_sym_xform *xform,
1699                     dpaa2_sec_session *session)
1700 {
1701         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1702         struct alginfo authdata;
1703         int bufsize, i;
1704         struct ctxt_priv *priv;
1705         struct sec_flow_context *flc;
1706
1707         PMD_INIT_FUNC_TRACE();
1708
1709         /* For SEC AUTH three descriptors are required for various stages */
1710         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1711                         sizeof(struct ctxt_priv) + 3 *
1712                         sizeof(struct sec_flc_desc),
1713                         RTE_CACHE_LINE_SIZE);
1714         if (priv == NULL) {
1715                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1716                 return -1;
1717         }
1718
1719         priv->fle_pool = dev_priv->fle_pool;
1720         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1721
1722         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1723                         RTE_CACHE_LINE_SIZE);
1724         if (session->auth_key.data == NULL) {
1725                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1726                 rte_free(priv);
1727                 return -1;
1728         }
1729         session->auth_key.length = xform->auth.key.length;
1730
1731         memcpy(session->auth_key.data, xform->auth.key.data,
1732                xform->auth.key.length);
1733         authdata.key = (size_t)session->auth_key.data;
1734         authdata.keylen = session->auth_key.length;
1735         authdata.key_enc_flags = 0;
1736         authdata.key_type = RTA_DATA_IMM;
1737
1738         session->digest_length = xform->auth.digest_length;
1739
1740         switch (xform->auth.algo) {
1741         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1742                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1743                 authdata.algmode = OP_ALG_AAI_HMAC;
1744                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1745                 break;
1746         case RTE_CRYPTO_AUTH_MD5_HMAC:
1747                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1748                 authdata.algmode = OP_ALG_AAI_HMAC;
1749                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1750                 break;
1751         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1752                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1753                 authdata.algmode = OP_ALG_AAI_HMAC;
1754                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1755                 break;
1756         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1757                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1758                 authdata.algmode = OP_ALG_AAI_HMAC;
1759                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1760                 break;
1761         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1762                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1763                 authdata.algmode = OP_ALG_AAI_HMAC;
1764                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1765                 break;
1766         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1767                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1768                 authdata.algmode = OP_ALG_AAI_HMAC;
1769                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1770                 break;
1771         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1772         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1773         case RTE_CRYPTO_AUTH_NULL:
1774         case RTE_CRYPTO_AUTH_SHA1:
1775         case RTE_CRYPTO_AUTH_SHA256:
1776         case RTE_CRYPTO_AUTH_SHA512:
1777         case RTE_CRYPTO_AUTH_SHA224:
1778         case RTE_CRYPTO_AUTH_SHA384:
1779         case RTE_CRYPTO_AUTH_MD5:
1780         case RTE_CRYPTO_AUTH_AES_GMAC:
1781         case RTE_CRYPTO_AUTH_KASUMI_F9:
1782         case RTE_CRYPTO_AUTH_AES_CMAC:
1783         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1784         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1785                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1786                               xform->auth.algo);
1787                 goto error_out;
1788         default:
1789                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1790                               xform->auth.algo);
1791                 goto error_out;
1792         }
1793         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1794                                 DIR_ENC : DIR_DEC;
1795
1796         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1797                                    1, 0, SHR_NEVER, &authdata, !session->dir,
1798                                    session->digest_length);
1799         if (bufsize < 0) {
1800                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1801                 goto error_out;
1802         }
1803
1804         flc->word1_sdl = (uint8_t)bufsize;
1805         session->ctxt = priv;
1806         for (i = 0; i < bufsize; i++)
1807                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1808                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1809
1810
1811         return 0;
1812
1813 error_out:
1814         rte_free(session->auth_key.data);
1815         rte_free(priv);
1816         return -1;
1817 }
1818
1819 static int
1820 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1821                     struct rte_crypto_sym_xform *xform,
1822                     dpaa2_sec_session *session)
1823 {
1824         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1825         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1826         struct alginfo aeaddata;
1827         int bufsize, i;
1828         struct ctxt_priv *priv;
1829         struct sec_flow_context *flc;
1830         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1831         int err;
1832
1833         PMD_INIT_FUNC_TRACE();
1834
1835         /* Set IV parameters */
1836         session->iv.offset = aead_xform->iv.offset;
1837         session->iv.length = aead_xform->iv.length;
1838         session->ctxt_type = DPAA2_SEC_AEAD;
1839
1840         /* For SEC AEAD only one descriptor is required */
1841         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1842                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1843                         RTE_CACHE_LINE_SIZE);
1844         if (priv == NULL) {
1845                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1846                 return -1;
1847         }
1848
1849         priv->fle_pool = dev_priv->fle_pool;
1850         flc = &priv->flc_desc[0].flc;
1851
1852         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1853                                                RTE_CACHE_LINE_SIZE);
1854         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1855                 DPAA2_SEC_ERR("No Memory for aead key");
1856                 rte_free(priv);
1857                 return -1;
1858         }
1859         memcpy(session->aead_key.data, aead_xform->key.data,
1860                aead_xform->key.length);
1861
1862         session->digest_length = aead_xform->digest_length;
1863         session->aead_key.length = aead_xform->key.length;
1864         ctxt->auth_only_len = aead_xform->aad_length;
1865
1866         aeaddata.key = (size_t)session->aead_key.data;
1867         aeaddata.keylen = session->aead_key.length;
1868         aeaddata.key_enc_flags = 0;
1869         aeaddata.key_type = RTA_DATA_IMM;
1870
1871         switch (aead_xform->algo) {
1872         case RTE_CRYPTO_AEAD_AES_GCM:
1873                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1874                 aeaddata.algmode = OP_ALG_AAI_GCM;
1875                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1876                 break;
1877         case RTE_CRYPTO_AEAD_AES_CCM:
1878                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1879                               aead_xform->algo);
1880                 goto error_out;
1881         default:
1882                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1883                               aead_xform->algo);
1884                 goto error_out;
1885         }
1886         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1887                                 DIR_ENC : DIR_DEC;
1888
1889         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1890         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1891                                MIN_JOB_DESC_SIZE,
1892                                (unsigned int *)priv->flc_desc[0].desc,
1893                                &priv->flc_desc[0].desc[1], 1);
1894
1895         if (err < 0) {
1896                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1897                 goto error_out;
1898         }
1899         if (priv->flc_desc[0].desc[1] & 1) {
1900                 aeaddata.key_type = RTA_DATA_IMM;
1901         } else {
1902                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1903                 aeaddata.key_type = RTA_DATA_PTR;
1904         }
1905         priv->flc_desc[0].desc[0] = 0;
1906         priv->flc_desc[0].desc[1] = 0;
1907
1908         if (session->dir == DIR_ENC)
1909                 bufsize = cnstr_shdsc_gcm_encap(
1910                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1911                                 &aeaddata, session->iv.length,
1912                                 session->digest_length);
1913         else
1914                 bufsize = cnstr_shdsc_gcm_decap(
1915                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1916                                 &aeaddata, session->iv.length,
1917                                 session->digest_length);
1918         if (bufsize < 0) {
1919                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1920                 goto error_out;
1921         }
1922
1923         flc->word1_sdl = (uint8_t)bufsize;
1924         session->ctxt = priv;
1925         for (i = 0; i < bufsize; i++)
1926                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1927                             i, priv->flc_desc[0].desc[i]);
1928
1929         return 0;
1930
1931 error_out:
1932         rte_free(session->aead_key.data);
1933         rte_free(priv);
1934         return -1;
1935 }
1936
1937
1938 static int
1939 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1940                     struct rte_crypto_sym_xform *xform,
1941                     dpaa2_sec_session *session)
1942 {
1943         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1944         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1945         struct alginfo authdata, cipherdata;
1946         int bufsize, i;
1947         struct ctxt_priv *priv;
1948         struct sec_flow_context *flc;
1949         struct rte_crypto_cipher_xform *cipher_xform;
1950         struct rte_crypto_auth_xform *auth_xform;
1951         int err;
1952
1953         PMD_INIT_FUNC_TRACE();
1954
1955         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1956                 cipher_xform = &xform->cipher;
1957                 auth_xform = &xform->next->auth;
1958                 session->ctxt_type =
1959                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1960                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1961         } else {
1962                 cipher_xform = &xform->next->cipher;
1963                 auth_xform = &xform->auth;
1964                 session->ctxt_type =
1965                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1966                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1967         }
1968
1969         /* Set IV parameters */
1970         session->iv.offset = cipher_xform->iv.offset;
1971         session->iv.length = cipher_xform->iv.length;
1972
1973         /* For SEC AEAD only one descriptor is required */
1974         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1975                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1976                         RTE_CACHE_LINE_SIZE);
1977         if (priv == NULL) {
1978                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1979                 return -1;
1980         }
1981
1982         priv->fle_pool = dev_priv->fle_pool;
1983         flc = &priv->flc_desc[0].flc;
1984
1985         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1986                                                RTE_CACHE_LINE_SIZE);
1987         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1988                 DPAA2_SEC_ERR("No Memory for cipher key");
1989                 rte_free(priv);
1990                 return -1;
1991         }
1992         session->cipher_key.length = cipher_xform->key.length;
1993         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1994                                              RTE_CACHE_LINE_SIZE);
1995         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1996                 DPAA2_SEC_ERR("No Memory for auth key");
1997                 rte_free(session->cipher_key.data);
1998                 rte_free(priv);
1999                 return -1;
2000         }
2001         session->auth_key.length = auth_xform->key.length;
2002         memcpy(session->cipher_key.data, cipher_xform->key.data,
2003                cipher_xform->key.length);
2004         memcpy(session->auth_key.data, auth_xform->key.data,
2005                auth_xform->key.length);
2006
2007         authdata.key = (size_t)session->auth_key.data;
2008         authdata.keylen = session->auth_key.length;
2009         authdata.key_enc_flags = 0;
2010         authdata.key_type = RTA_DATA_IMM;
2011
2012         session->digest_length = auth_xform->digest_length;
2013
2014         switch (auth_xform->algo) {
2015         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2016                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2017                 authdata.algmode = OP_ALG_AAI_HMAC;
2018                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2019                 break;
2020         case RTE_CRYPTO_AUTH_MD5_HMAC:
2021                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2022                 authdata.algmode = OP_ALG_AAI_HMAC;
2023                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2024                 break;
2025         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2026                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2027                 authdata.algmode = OP_ALG_AAI_HMAC;
2028                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2029                 break;
2030         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2031                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2032                 authdata.algmode = OP_ALG_AAI_HMAC;
2033                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2034                 break;
2035         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2036                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2037                 authdata.algmode = OP_ALG_AAI_HMAC;
2038                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2039                 break;
2040         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2041                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2042                 authdata.algmode = OP_ALG_AAI_HMAC;
2043                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2044                 break;
2045         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2046         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2047         case RTE_CRYPTO_AUTH_NULL:
2048         case RTE_CRYPTO_AUTH_SHA1:
2049         case RTE_CRYPTO_AUTH_SHA256:
2050         case RTE_CRYPTO_AUTH_SHA512:
2051         case RTE_CRYPTO_AUTH_SHA224:
2052         case RTE_CRYPTO_AUTH_SHA384:
2053         case RTE_CRYPTO_AUTH_MD5:
2054         case RTE_CRYPTO_AUTH_AES_GMAC:
2055         case RTE_CRYPTO_AUTH_KASUMI_F9:
2056         case RTE_CRYPTO_AUTH_AES_CMAC:
2057         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2058         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2059                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2060                               auth_xform->algo);
2061                 goto error_out;
2062         default:
2063                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2064                               auth_xform->algo);
2065                 goto error_out;
2066         }
2067         cipherdata.key = (size_t)session->cipher_key.data;
2068         cipherdata.keylen = session->cipher_key.length;
2069         cipherdata.key_enc_flags = 0;
2070         cipherdata.key_type = RTA_DATA_IMM;
2071
2072         switch (cipher_xform->algo) {
2073         case RTE_CRYPTO_CIPHER_AES_CBC:
2074                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2075                 cipherdata.algmode = OP_ALG_AAI_CBC;
2076                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2077                 break;
2078         case RTE_CRYPTO_CIPHER_3DES_CBC:
2079                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2080                 cipherdata.algmode = OP_ALG_AAI_CBC;
2081                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2082                 break;
2083         case RTE_CRYPTO_CIPHER_AES_CTR:
2084                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2085                 cipherdata.algmode = OP_ALG_AAI_CTR;
2086                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2087                 break;
2088         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2089         case RTE_CRYPTO_CIPHER_NULL:
2090         case RTE_CRYPTO_CIPHER_3DES_ECB:
2091         case RTE_CRYPTO_CIPHER_AES_ECB:
2092         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2093                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2094                               cipher_xform->algo);
2095                 goto error_out;
2096         default:
2097                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2098                               cipher_xform->algo);
2099                 goto error_out;
2100         }
2101         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2102                                 DIR_ENC : DIR_DEC;
2103
2104         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2105         priv->flc_desc[0].desc[1] = authdata.keylen;
2106         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2107                                MIN_JOB_DESC_SIZE,
2108                                (unsigned int *)priv->flc_desc[0].desc,
2109                                &priv->flc_desc[0].desc[2], 2);
2110
2111         if (err < 0) {
2112                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2113                 goto error_out;
2114         }
2115         if (priv->flc_desc[0].desc[2] & 1) {
2116                 cipherdata.key_type = RTA_DATA_IMM;
2117         } else {
2118                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2119                 cipherdata.key_type = RTA_DATA_PTR;
2120         }
2121         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2122                 authdata.key_type = RTA_DATA_IMM;
2123         } else {
2124                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2125                 authdata.key_type = RTA_DATA_PTR;
2126         }
2127         priv->flc_desc[0].desc[0] = 0;
2128         priv->flc_desc[0].desc[1] = 0;
2129         priv->flc_desc[0].desc[2] = 0;
2130
2131         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2132                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2133                                               0, SHR_SERIAL,
2134                                               &cipherdata, &authdata,
2135                                               session->iv.length,
2136                                               ctxt->auth_only_len,
2137                                               session->digest_length,
2138                                               session->dir);
2139                 if (bufsize < 0) {
2140                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2141                         goto error_out;
2142                 }
2143         } else {
2144                 DPAA2_SEC_ERR("Hash before cipher not supported");
2145                 goto error_out;
2146         }
2147
2148         flc->word1_sdl = (uint8_t)bufsize;
2149         session->ctxt = priv;
2150         for (i = 0; i < bufsize; i++)
2151                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2152                             i, priv->flc_desc[0].desc[i]);
2153
2154         return 0;
2155
2156 error_out:
2157         rte_free(session->cipher_key.data);
2158         rte_free(session->auth_key.data);
2159         rte_free(priv);
2160         return -1;
2161 }
2162
2163 static int
2164 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2165                             struct rte_crypto_sym_xform *xform, void *sess)
2166 {
2167         dpaa2_sec_session *session = sess;
2168         int ret;
2169
2170         PMD_INIT_FUNC_TRACE();
2171
2172         if (unlikely(sess == NULL)) {
2173                 DPAA2_SEC_ERR("Invalid session struct");
2174                 return -1;
2175         }
2176
2177         memset(session, 0, sizeof(dpaa2_sec_session));
2178         /* Default IV length = 0 */
2179         session->iv.length = 0;
2180
2181         /* Cipher Only */
2182         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2183                 session->ctxt_type = DPAA2_SEC_CIPHER;
2184                 ret = dpaa2_sec_cipher_init(dev, xform, session);
2185
2186         /* Authentication Only */
2187         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2188                    xform->next == NULL) {
2189                 session->ctxt_type = DPAA2_SEC_AUTH;
2190                 ret = dpaa2_sec_auth_init(dev, xform, session);
2191
2192         /* Cipher then Authenticate */
2193         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2194                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2195                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2196                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2197
2198         /* Authenticate then Cipher */
2199         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2200                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2201                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2202                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2203
2204         /* AEAD operation for AES-GCM kind of Algorithms */
2205         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2206                    xform->next == NULL) {
2207                 ret = dpaa2_sec_aead_init(dev, xform, session);
2208
2209         } else {
2210                 DPAA2_SEC_ERR("Invalid crypto type");
2211                 return -EINVAL;
2212         }
2213
2214         return ret;
2215 }
2216
2217 static int
2218 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2219                         dpaa2_sec_session *session,
2220                         struct alginfo *aeaddata)
2221 {
2222         PMD_INIT_FUNC_TRACE();
2223
2224         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2225                                                RTE_CACHE_LINE_SIZE);
2226         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2227                 DPAA2_SEC_ERR("No Memory for aead key");
2228                 return -1;
2229         }
2230         memcpy(session->aead_key.data, aead_xform->key.data,
2231                aead_xform->key.length);
2232
2233         session->digest_length = aead_xform->digest_length;
2234         session->aead_key.length = aead_xform->key.length;
2235
2236         aeaddata->key = (size_t)session->aead_key.data;
2237         aeaddata->keylen = session->aead_key.length;
2238         aeaddata->key_enc_flags = 0;
2239         aeaddata->key_type = RTA_DATA_IMM;
2240
2241         switch (aead_xform->algo) {
2242         case RTE_CRYPTO_AEAD_AES_GCM:
2243                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2244                 aeaddata->algmode = OP_ALG_AAI_GCM;
2245                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2246                 break;
2247         case RTE_CRYPTO_AEAD_AES_CCM:
2248                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2249                 aeaddata->algmode = OP_ALG_AAI_CCM;
2250                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2251                 break;
2252         default:
2253                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2254                               aead_xform->algo);
2255                 return -1;
2256         }
2257         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2258                                 DIR_ENC : DIR_DEC;
2259
2260         return 0;
2261 }
2262
2263 static int
2264 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2265         struct rte_crypto_auth_xform *auth_xform,
2266         dpaa2_sec_session *session,
2267         struct alginfo *cipherdata,
2268         struct alginfo *authdata)
2269 {
2270         if (cipher_xform) {
2271                 session->cipher_key.data = rte_zmalloc(NULL,
2272                                                        cipher_xform->key.length,
2273                                                        RTE_CACHE_LINE_SIZE);
2274                 if (session->cipher_key.data == NULL &&
2275                                 cipher_xform->key.length > 0) {
2276                         DPAA2_SEC_ERR("No Memory for cipher key");
2277                         return -ENOMEM;
2278                 }
2279
2280                 session->cipher_key.length = cipher_xform->key.length;
2281                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2282                                 cipher_xform->key.length);
2283                 session->cipher_alg = cipher_xform->algo;
2284         } else {
2285                 session->cipher_key.data = NULL;
2286                 session->cipher_key.length = 0;
2287                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2288         }
2289
2290         if (auth_xform) {
2291                 session->auth_key.data = rte_zmalloc(NULL,
2292                                                 auth_xform->key.length,
2293                                                 RTE_CACHE_LINE_SIZE);
2294                 if (session->auth_key.data == NULL &&
2295                                 auth_xform->key.length > 0) {
2296                         DPAA2_SEC_ERR("No Memory for auth key");
2297                         return -ENOMEM;
2298                 }
2299                 session->auth_key.length = auth_xform->key.length;
2300                 memcpy(session->auth_key.data, auth_xform->key.data,
2301                                 auth_xform->key.length);
2302                 session->auth_alg = auth_xform->algo;
2303         } else {
2304                 session->auth_key.data = NULL;
2305                 session->auth_key.length = 0;
2306                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2307         }
2308
2309         authdata->key = (size_t)session->auth_key.data;
2310         authdata->keylen = session->auth_key.length;
2311         authdata->key_enc_flags = 0;
2312         authdata->key_type = RTA_DATA_IMM;
2313         switch (session->auth_alg) {
2314         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2315                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2316                 authdata->algmode = OP_ALG_AAI_HMAC;
2317                 break;
2318         case RTE_CRYPTO_AUTH_MD5_HMAC:
2319                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2320                 authdata->algmode = OP_ALG_AAI_HMAC;
2321                 break;
2322         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2323                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2324                 authdata->algmode = OP_ALG_AAI_HMAC;
2325                 break;
2326         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2327                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2328                 authdata->algmode = OP_ALG_AAI_HMAC;
2329                 break;
2330         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2331                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2332                 authdata->algmode = OP_ALG_AAI_HMAC;
2333                 break;
2334         case RTE_CRYPTO_AUTH_AES_CMAC:
2335                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2336                 break;
2337         case RTE_CRYPTO_AUTH_NULL:
2338                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2339                 break;
2340         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2341         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2342         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2343         case RTE_CRYPTO_AUTH_SHA1:
2344         case RTE_CRYPTO_AUTH_SHA256:
2345         case RTE_CRYPTO_AUTH_SHA512:
2346         case RTE_CRYPTO_AUTH_SHA224:
2347         case RTE_CRYPTO_AUTH_SHA384:
2348         case RTE_CRYPTO_AUTH_MD5:
2349         case RTE_CRYPTO_AUTH_AES_GMAC:
2350         case RTE_CRYPTO_AUTH_KASUMI_F9:
2351         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2352         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2353                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2354                               session->auth_alg);
2355                 return -1;
2356         default:
2357                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2358                               session->auth_alg);
2359                 return -1;
2360         }
2361         cipherdata->key = (size_t)session->cipher_key.data;
2362         cipherdata->keylen = session->cipher_key.length;
2363         cipherdata->key_enc_flags = 0;
2364         cipherdata->key_type = RTA_DATA_IMM;
2365
2366         switch (session->cipher_alg) {
2367         case RTE_CRYPTO_CIPHER_AES_CBC:
2368                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2369                 cipherdata->algmode = OP_ALG_AAI_CBC;
2370                 break;
2371         case RTE_CRYPTO_CIPHER_3DES_CBC:
2372                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2373                 cipherdata->algmode = OP_ALG_AAI_CBC;
2374                 break;
2375         case RTE_CRYPTO_CIPHER_AES_CTR:
2376                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2377                 cipherdata->algmode = OP_ALG_AAI_CTR;
2378                 break;
2379         case RTE_CRYPTO_CIPHER_NULL:
2380                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2381                 break;
2382         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2383         case RTE_CRYPTO_CIPHER_3DES_ECB:
2384         case RTE_CRYPTO_CIPHER_AES_ECB:
2385         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2386                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2387                               session->cipher_alg);
2388                 return -1;
2389         default:
2390                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2391                               session->cipher_alg);
2392                 return -1;
2393         }
2394
2395         return 0;
2396 }
2397
2398 #ifdef RTE_LIBRTE_SECURITY_TEST
2399 static uint8_t aes_cbc_iv[] = {
2400         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2401         0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2402 #endif
2403
2404 static int
2405 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2406                             struct rte_security_session_conf *conf,
2407                             void *sess)
2408 {
2409         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2410         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2411         struct rte_crypto_auth_xform *auth_xform = NULL;
2412         struct rte_crypto_aead_xform *aead_xform = NULL;
2413         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2414         struct ctxt_priv *priv;
2415         struct ipsec_encap_pdb encap_pdb;
2416         struct ipsec_decap_pdb decap_pdb;
2417         struct alginfo authdata, cipherdata;
2418         int bufsize;
2419         struct sec_flow_context *flc;
2420         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2421         int ret = -1;
2422
2423         PMD_INIT_FUNC_TRACE();
2424
2425         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2426                                 sizeof(struct ctxt_priv) +
2427                                 sizeof(struct sec_flc_desc),
2428                                 RTE_CACHE_LINE_SIZE);
2429
2430         if (priv == NULL) {
2431                 DPAA2_SEC_ERR("No memory for priv CTXT");
2432                 return -ENOMEM;
2433         }
2434
2435         priv->fle_pool = dev_priv->fle_pool;
2436         flc = &priv->flc_desc[0].flc;
2437
2438         memset(session, 0, sizeof(dpaa2_sec_session));
2439
2440         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2441                 cipher_xform = &conf->crypto_xform->cipher;
2442                 if (conf->crypto_xform->next)
2443                         auth_xform = &conf->crypto_xform->next->auth;
2444                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2445                                         session, &cipherdata, &authdata);
2446         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2447                 auth_xform = &conf->crypto_xform->auth;
2448                 if (conf->crypto_xform->next)
2449                         cipher_xform = &conf->crypto_xform->next->cipher;
2450                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2451                                         session, &cipherdata, &authdata);
2452         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2453                 aead_xform = &conf->crypto_xform->aead;
2454                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2455                                         session, &cipherdata);
2456         } else {
2457                 DPAA2_SEC_ERR("XFORM not specified");
2458                 ret = -EINVAL;
2459                 goto out;
2460         }
2461         if (ret) {
2462                 DPAA2_SEC_ERR("Failed to process xform");
2463                 goto out;
2464         }
2465
2466         session->ctxt_type = DPAA2_SEC_IPSEC;
2467         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2468                 struct ip ip4_hdr;
2469
2470                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2471                 ip4_hdr.ip_v = IPVERSION;
2472                 ip4_hdr.ip_hl = 5;
2473                 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2474                 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2475                 ip4_hdr.ip_id = 0;
2476                 ip4_hdr.ip_off = 0;
2477                 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2478                 ip4_hdr.ip_p = IPPROTO_ESP;
2479                 ip4_hdr.ip_sum = 0;
2480                 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2481                 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2482                 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2483                         sizeof(struct ip));
2484
2485                 /* For Sec Proto only one descriptor is required. */
2486                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2487                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2488                         PDBOPTS_ESP_OIHI_PDB_INL |
2489                         PDBOPTS_ESP_IVSRC |
2490                         PDBHMO_ESP_ENCAP_DTTL |
2491                         PDBHMO_ESP_SNR;
2492                 if (ipsec_xform->options.esn)
2493                         encap_pdb.options |= PDBOPTS_ESP_ESN;
2494                 encap_pdb.spi = ipsec_xform->spi;
2495                 encap_pdb.ip_hdr_len = sizeof(struct ip);
2496
2497                 session->dir = DIR_ENC;
2498                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2499                                 1, 0, SHR_SERIAL, &encap_pdb,
2500                                 (uint8_t *)&ip4_hdr,
2501                                 &cipherdata, &authdata);
2502         } else if (ipsec_xform->direction ==
2503                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2504                 flc->dhr = SEC_FLC_DHR_INBOUND;
2505                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2506                 decap_pdb.options = sizeof(struct ip) << 16;
2507                 if (ipsec_xform->options.esn)
2508                         decap_pdb.options |= PDBOPTS_ESP_ESN;
2509                 session->dir = DIR_DEC;
2510                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2511                                 1, 0, SHR_SERIAL,
2512                                 &decap_pdb, &cipherdata, &authdata);
2513         } else
2514                 goto out;
2515
2516         if (bufsize < 0) {
2517                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2518                 goto out;
2519         }
2520
2521         flc->word1_sdl = (uint8_t)bufsize;
2522
2523         /* Enable the stashing control bit */
2524         DPAA2_SET_FLC_RSC(flc);
2525         flc->word2_rflc_31_0 = lower_32_bits(
2526                         (size_t)&(((struct dpaa2_sec_qp *)
2527                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2528         flc->word3_rflc_63_32 = upper_32_bits(
2529                         (size_t)&(((struct dpaa2_sec_qp *)
2530                         dev->data->queue_pairs[0])->rx_vq));
2531
2532         /* Set EWS bit i.e. enable write-safe */
2533         DPAA2_SET_FLC_EWS(flc);
2534         /* Set BS = 1 i.e reuse input buffers as output buffers */
2535         DPAA2_SET_FLC_REUSE_BS(flc);
2536         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2537         DPAA2_SET_FLC_REUSE_FF(flc);
2538
2539         session->ctxt = priv;
2540
2541         return 0;
2542 out:
2543         rte_free(session->auth_key.data);
2544         rte_free(session->cipher_key.data);
2545         rte_free(priv);
2546         return ret;
2547 }
2548
2549 static int
2550 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2551                            struct rte_security_session_conf *conf,
2552                            void *sess)
2553 {
2554         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2555         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2556         struct rte_crypto_auth_xform *auth_xform = NULL;
2557         struct rte_crypto_cipher_xform *cipher_xform;
2558         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2559         struct ctxt_priv *priv;
2560         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2561         struct alginfo authdata, cipherdata;
2562         int bufsize = -1;
2563         struct sec_flow_context *flc;
2564 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2565         int swap = true;
2566 #else
2567         int swap = false;
2568 #endif
2569
2570         PMD_INIT_FUNC_TRACE();
2571
2572         memset(session, 0, sizeof(dpaa2_sec_session));
2573
2574         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2575                                 sizeof(struct ctxt_priv) +
2576                                 sizeof(struct sec_flc_desc),
2577                                 RTE_CACHE_LINE_SIZE);
2578
2579         if (priv == NULL) {
2580                 DPAA2_SEC_ERR("No memory for priv CTXT");
2581                 return -ENOMEM;
2582         }
2583
2584         priv->fle_pool = dev_priv->fle_pool;
2585         flc = &priv->flc_desc[0].flc;
2586
2587         /* find xfrm types */
2588         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2589                 cipher_xform = &xform->cipher;
2590         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2591                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2592                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2593                 cipher_xform = &xform->cipher;
2594                 auth_xform = &xform->next->auth;
2595         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2596                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2597                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2598                 cipher_xform = &xform->next->cipher;
2599                 auth_xform = &xform->auth;
2600         } else {
2601                 DPAA2_SEC_ERR("Invalid crypto type");
2602                 return -EINVAL;
2603         }
2604
2605         session->ctxt_type = DPAA2_SEC_PDCP;
2606         if (cipher_xform) {
2607                 session->cipher_key.data = rte_zmalloc(NULL,
2608                                                cipher_xform->key.length,
2609                                                RTE_CACHE_LINE_SIZE);
2610                 if (session->cipher_key.data == NULL &&
2611                                 cipher_xform->key.length > 0) {
2612                         DPAA2_SEC_ERR("No Memory for cipher key");
2613                         rte_free(priv);
2614                         return -ENOMEM;
2615                 }
2616                 session->cipher_key.length = cipher_xform->key.length;
2617                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2618                         cipher_xform->key.length);
2619                 session->dir =
2620                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2621                                         DIR_ENC : DIR_DEC;
2622                 session->cipher_alg = cipher_xform->algo;
2623         } else {
2624                 session->cipher_key.data = NULL;
2625                 session->cipher_key.length = 0;
2626                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2627                 session->dir = DIR_ENC;
2628         }
2629
2630         session->pdcp.domain = pdcp_xform->domain;
2631         session->pdcp.bearer = pdcp_xform->bearer;
2632         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2633         session->pdcp.sn_size = pdcp_xform->sn_size;
2634 #ifdef ENABLE_HFN_OVERRIDE
2635         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2636 #endif
2637         session->pdcp.hfn = pdcp_xform->hfn;
2638         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2639
2640         cipherdata.key = (size_t)session->cipher_key.data;
2641         cipherdata.keylen = session->cipher_key.length;
2642         cipherdata.key_enc_flags = 0;
2643         cipherdata.key_type = RTA_DATA_IMM;
2644
2645         switch (session->cipher_alg) {
2646         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2647                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2648                 break;
2649         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2650                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2651                 break;
2652         case RTE_CRYPTO_CIPHER_AES_CTR:
2653                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2654                 break;
2655         case RTE_CRYPTO_CIPHER_NULL:
2656                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2657                 break;
2658         default:
2659                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2660                               session->cipher_alg);
2661                 goto out;
2662         }
2663
2664         /* Auth is only applicable for control mode operation. */
2665         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2666                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2667                         DPAA2_SEC_ERR(
2668                                 "PDCP Seq Num size should be 5 bits for cmode");
2669                         goto out;
2670                 }
2671                 if (auth_xform) {
2672                         session->auth_key.data = rte_zmalloc(NULL,
2673                                                         auth_xform->key.length,
2674                                                         RTE_CACHE_LINE_SIZE);
2675                         if (session->auth_key.data == NULL &&
2676                                         auth_xform->key.length > 0) {
2677                                 DPAA2_SEC_ERR("No Memory for auth key");
2678                                 rte_free(session->cipher_key.data);
2679                                 rte_free(priv);
2680                                 return -ENOMEM;
2681                         }
2682                         session->auth_key.length = auth_xform->key.length;
2683                         memcpy(session->auth_key.data, auth_xform->key.data,
2684                                         auth_xform->key.length);
2685                         session->auth_alg = auth_xform->algo;
2686                 } else {
2687                         session->auth_key.data = NULL;
2688                         session->auth_key.length = 0;
2689                         session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2690                 }
2691                 authdata.key = (size_t)session->auth_key.data;
2692                 authdata.keylen = session->auth_key.length;
2693                 authdata.key_enc_flags = 0;
2694                 authdata.key_type = RTA_DATA_IMM;
2695
2696                 switch (session->auth_alg) {
2697                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2698                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2699                         break;
2700                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2701                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2702                         break;
2703                 case RTE_CRYPTO_AUTH_AES_CMAC:
2704                         authdata.algtype = PDCP_AUTH_TYPE_AES;
2705                         break;
2706                 case RTE_CRYPTO_AUTH_NULL:
2707                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
2708                         break;
2709                 default:
2710                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2711                                       session->auth_alg);
2712                         goto out;
2713                 }
2714
2715                 if (session->dir == DIR_ENC)
2716                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2717                                         priv->flc_desc[0].desc, 1, swap,
2718                                         pdcp_xform->hfn,
2719                                         pdcp_xform->bearer,
2720                                         pdcp_xform->pkt_dir,
2721                                         pdcp_xform->hfn_threshold,
2722                                         &cipherdata, &authdata,
2723                                         0);
2724                 else if (session->dir == DIR_DEC)
2725                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2726                                         priv->flc_desc[0].desc, 1, swap,
2727                                         pdcp_xform->hfn,
2728                                         pdcp_xform->bearer,
2729                                         pdcp_xform->pkt_dir,
2730                                         pdcp_xform->hfn_threshold,
2731                                         &cipherdata, &authdata,
2732                                         0);
2733         } else {
2734                 if (session->dir == DIR_ENC)
2735                         bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2736                                         priv->flc_desc[0].desc, 1, swap,
2737                                         (enum pdcp_sn_size)pdcp_xform->sn_size,
2738                                         pdcp_xform->hfn,
2739                                         pdcp_xform->bearer,
2740                                         pdcp_xform->pkt_dir,
2741                                         pdcp_xform->hfn_threshold,
2742                                         &cipherdata, 0);
2743                 else if (session->dir == DIR_DEC)
2744                         bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2745                                         priv->flc_desc[0].desc, 1, swap,
2746                                         (enum pdcp_sn_size)pdcp_xform->sn_size,
2747                                         pdcp_xform->hfn,
2748                                         pdcp_xform->bearer,
2749                                         pdcp_xform->pkt_dir,
2750                                         pdcp_xform->hfn_threshold,
2751                                         &cipherdata, 0);
2752         }
2753
2754         if (bufsize < 0) {
2755                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2756                 goto out;
2757         }
2758
2759         /* Enable the stashing control bit */
2760         DPAA2_SET_FLC_RSC(flc);
2761         flc->word2_rflc_31_0 = lower_32_bits(
2762                         (size_t)&(((struct dpaa2_sec_qp *)
2763                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2764         flc->word3_rflc_63_32 = upper_32_bits(
2765                         (size_t)&(((struct dpaa2_sec_qp *)
2766                         dev->data->queue_pairs[0])->rx_vq));
2767
2768         flc->word1_sdl = (uint8_t)bufsize;
2769
2770         /* Set EWS bit i.e. enable write-safe */
2771         DPAA2_SET_FLC_EWS(flc);
2772         /* Set BS = 1 i.e reuse input buffers as output buffers */
2773         DPAA2_SET_FLC_REUSE_BS(flc);
2774         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2775         DPAA2_SET_FLC_REUSE_FF(flc);
2776
2777         session->ctxt = priv;
2778
2779         return 0;
2780 out:
2781         rte_free(session->auth_key.data);
2782         rte_free(session->cipher_key.data);
2783         rte_free(priv);
2784         return -1;
2785 }
2786
2787 static int
2788 dpaa2_sec_security_session_create(void *dev,
2789                                   struct rte_security_session_conf *conf,
2790                                   struct rte_security_session *sess,
2791                                   struct rte_mempool *mempool)
2792 {
2793         void *sess_private_data;
2794         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2795         int ret;
2796
2797         if (rte_mempool_get(mempool, &sess_private_data)) {
2798                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2799                 return -ENOMEM;
2800         }
2801
2802         switch (conf->protocol) {
2803         case RTE_SECURITY_PROTOCOL_IPSEC:
2804                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2805                                 sess_private_data);
2806                 break;
2807         case RTE_SECURITY_PROTOCOL_MACSEC:
2808                 return -ENOTSUP;
2809         case RTE_SECURITY_PROTOCOL_PDCP:
2810                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2811                                 sess_private_data);
2812                 break;
2813         default:
2814                 return -EINVAL;
2815         }
2816         if (ret != 0) {
2817                 DPAA2_SEC_ERR("Failed to configure session parameters");
2818                 /* Return session to mempool */
2819                 rte_mempool_put(mempool, sess_private_data);
2820                 return ret;
2821         }
2822
2823         set_sec_session_private_data(sess, sess_private_data);
2824
2825         return ret;
2826 }
2827
2828 /** Clear the memory of session so it doesn't leave key material behind */
2829 static int
2830 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2831                 struct rte_security_session *sess)
2832 {
2833         PMD_INIT_FUNC_TRACE();
2834         void *sess_priv = get_sec_session_private_data(sess);
2835
2836         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2837
2838         if (sess_priv) {
2839                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2840
2841                 rte_free(s->ctxt);
2842                 rte_free(s->cipher_key.data);
2843                 rte_free(s->auth_key.data);
2844                 memset(s, 0, sizeof(dpaa2_sec_session));
2845                 set_sec_session_private_data(sess, NULL);
2846                 rte_mempool_put(sess_mp, sess_priv);
2847         }
2848         return 0;
2849 }
2850
2851 static int
2852 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2853                 struct rte_crypto_sym_xform *xform,
2854                 struct rte_cryptodev_sym_session *sess,
2855                 struct rte_mempool *mempool)
2856 {
2857         void *sess_private_data;
2858         int ret;
2859
2860         if (rte_mempool_get(mempool, &sess_private_data)) {
2861                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2862                 return -ENOMEM;
2863         }
2864
2865         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2866         if (ret != 0) {
2867                 DPAA2_SEC_ERR("Failed to configure session parameters");
2868                 /* Return session to mempool */
2869                 rte_mempool_put(mempool, sess_private_data);
2870                 return ret;
2871         }
2872
2873         set_sym_session_private_data(sess, dev->driver_id,
2874                 sess_private_data);
2875
2876         return 0;
2877 }
2878
2879 /** Clear the memory of session so it doesn't leave key material behind */
2880 static void
2881 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2882                 struct rte_cryptodev_sym_session *sess)
2883 {
2884         PMD_INIT_FUNC_TRACE();
2885         uint8_t index = dev->driver_id;
2886         void *sess_priv = get_sym_session_private_data(sess, index);
2887         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2888
2889         if (sess_priv) {
2890                 rte_free(s->ctxt);
2891                 rte_free(s->cipher_key.data);
2892                 rte_free(s->auth_key.data);
2893                 memset(s, 0, sizeof(dpaa2_sec_session));
2894                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2895                 set_sym_session_private_data(sess, index, NULL);
2896                 rte_mempool_put(sess_mp, sess_priv);
2897         }
2898 }
2899
2900 static int
2901 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2902                         struct rte_cryptodev_config *config __rte_unused)
2903 {
2904         PMD_INIT_FUNC_TRACE();
2905
2906         return 0;
2907 }
2908
2909 static int
2910 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2911 {
2912         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2913         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2914         struct dpseci_attr attr;
2915         struct dpaa2_queue *dpaa2_q;
2916         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2917                                         dev->data->queue_pairs;
2918         struct dpseci_rx_queue_attr rx_attr;
2919         struct dpseci_tx_queue_attr tx_attr;
2920         int ret, i;
2921
2922         PMD_INIT_FUNC_TRACE();
2923
2924         memset(&attr, 0, sizeof(struct dpseci_attr));
2925
2926         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2927         if (ret) {
2928                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2929                               priv->hw_id);
2930                 goto get_attr_failure;
2931         }
2932         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2933         if (ret) {
2934                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2935                 goto get_attr_failure;
2936         }
2937         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2938                 dpaa2_q = &qp[i]->rx_vq;
2939                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2940                                     &rx_attr);
2941                 dpaa2_q->fqid = rx_attr.fqid;
2942                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2943         }
2944         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2945                 dpaa2_q = &qp[i]->tx_vq;
2946                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2947                                     &tx_attr);
2948                 dpaa2_q->fqid = tx_attr.fqid;
2949                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2950         }
2951
2952         return 0;
2953 get_attr_failure:
2954         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2955         return -1;
2956 }
2957
2958 static void
2959 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2960 {
2961         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2962         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2963         int ret;
2964
2965         PMD_INIT_FUNC_TRACE();
2966
2967         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2968         if (ret) {
2969                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2970                              priv->hw_id);
2971                 return;
2972         }
2973
2974         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2975         if (ret < 0) {
2976                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2977                 return;
2978         }
2979 }
2980
2981 static int
2982 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2983 {
2984         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2985         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2986         int ret;
2987
2988         PMD_INIT_FUNC_TRACE();
2989
2990         /* Function is reverse of dpaa2_sec_dev_init.
2991          * It does the following:
2992          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2993          * 2. Close the DPSECI device
2994          * 3. Free the allocated resources.
2995          */
2996
2997         /*Close the device at underlying layer*/
2998         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2999         if (ret) {
3000                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3001                 return -1;
3002         }
3003
3004         /*Free the allocated memory for ethernet private data and dpseci*/
3005         priv->hw = NULL;
3006         rte_free(dpseci);
3007
3008         return 0;
3009 }
3010
3011 static void
3012 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3013                         struct rte_cryptodev_info *info)
3014 {
3015         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3016
3017         PMD_INIT_FUNC_TRACE();
3018         if (info != NULL) {
3019                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3020                 info->feature_flags = dev->feature_flags;
3021                 info->capabilities = dpaa2_sec_capabilities;
3022                 /* No limit of number of sessions */
3023                 info->sym.max_nb_sessions = 0;
3024                 info->driver_id = cryptodev_driver_id;
3025         }
3026 }
3027
3028 static
3029 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3030                          struct rte_cryptodev_stats *stats)
3031 {
3032         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3033         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3034         struct dpseci_sec_counters counters = {0};
3035         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3036                                         dev->data->queue_pairs;
3037         int ret, i;
3038
3039         PMD_INIT_FUNC_TRACE();
3040         if (stats == NULL) {
3041                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3042                 return;
3043         }
3044         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3045                 if (qp[i] == NULL) {
3046                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3047                         continue;
3048                 }
3049
3050                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3051                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3052                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3053                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3054         }
3055
3056         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3057                                       &counters);
3058         if (ret) {
3059                 DPAA2_SEC_ERR("SEC counters failed");
3060         } else {
3061                 DPAA2_SEC_INFO("dpseci hardware stats:"
3062                             "\n\tNum of Requests Dequeued = %" PRIu64
3063                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3064                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3065                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3066                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3067                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3068                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3069                             counters.dequeued_requests,
3070                             counters.ob_enc_requests,
3071                             counters.ib_dec_requests,
3072                             counters.ob_enc_bytes,
3073                             counters.ob_prot_bytes,
3074                             counters.ib_dec_bytes,
3075                             counters.ib_valid_bytes);
3076         }
3077 }
3078
3079 static
3080 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3081 {
3082         int i;
3083         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3084                                    (dev->data->queue_pairs);
3085
3086         PMD_INIT_FUNC_TRACE();
3087
3088         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3089                 if (qp[i] == NULL) {
3090                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3091                         continue;
3092                 }
3093                 qp[i]->tx_vq.rx_pkts = 0;
3094                 qp[i]->tx_vq.tx_pkts = 0;
3095                 qp[i]->tx_vq.err_pkts = 0;
3096                 qp[i]->rx_vq.rx_pkts = 0;
3097                 qp[i]->rx_vq.tx_pkts = 0;
3098                 qp[i]->rx_vq.err_pkts = 0;
3099         }
3100 }
3101
3102 static void __attribute__((hot))
3103 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3104                                  const struct qbman_fd *fd,
3105                                  const struct qbman_result *dq,
3106                                  struct dpaa2_queue *rxq,
3107                                  struct rte_event *ev)
3108 {
3109         /* Prefetching mbuf */
3110         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3111                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3112
3113         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3114         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3115
3116         ev->flow_id = rxq->ev.flow_id;
3117         ev->sub_event_type = rxq->ev.sub_event_type;
3118         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3119         ev->op = RTE_EVENT_OP_NEW;
3120         ev->sched_type = rxq->ev.sched_type;
3121         ev->queue_id = rxq->ev.queue_id;
3122         ev->priority = rxq->ev.priority;
3123         ev->event_ptr = sec_fd_to_mbuf(fd);
3124
3125         qbman_swp_dqrr_consume(swp, dq);
3126 }
3127 static void
3128 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3129                                  const struct qbman_fd *fd,
3130                                  const struct qbman_result *dq,
3131                                  struct dpaa2_queue *rxq,
3132                                  struct rte_event *ev)
3133 {
3134         uint8_t dqrr_index;
3135         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3136         /* Prefetching mbuf */
3137         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3138                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3139
3140         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3141         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3142
3143         ev->flow_id = rxq->ev.flow_id;
3144         ev->sub_event_type = rxq->ev.sub_event_type;
3145         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3146         ev->op = RTE_EVENT_OP_NEW;
3147         ev->sched_type = rxq->ev.sched_type;
3148         ev->queue_id = rxq->ev.queue_id;
3149         ev->priority = rxq->ev.priority;
3150
3151         ev->event_ptr = sec_fd_to_mbuf(fd);
3152         dqrr_index = qbman_get_dqrr_idx(dq);
3153         crypto_op->sym->m_src->seqn = dqrr_index + 1;
3154         DPAA2_PER_LCORE_DQRR_SIZE++;
3155         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3156         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3157 }
3158
3159 int
3160 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3161                 int qp_id,
3162                 uint16_t dpcon_id,
3163                 const struct rte_event *event)
3164 {
3165         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3166         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3167         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3168         struct dpseci_rx_queue_cfg cfg;
3169         int ret;
3170
3171         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3172                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3173         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3174                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3175         else
3176                 return -EINVAL;
3177
3178         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3179         cfg.options = DPSECI_QUEUE_OPT_DEST;
3180         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3181         cfg.dest_cfg.dest_id = dpcon_id;
3182         cfg.dest_cfg.priority = event->priority;
3183
3184         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3185         cfg.user_ctx = (size_t)(qp);
3186         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3187                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3188                 cfg.order_preservation_en = 1;
3189         }
3190         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3191                                   qp_id, &cfg);
3192         if (ret) {
3193                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3194                 return ret;
3195         }
3196
3197         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3198
3199         return 0;
3200 }
3201
3202 int
3203 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3204                         int qp_id)
3205 {
3206         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3207         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3208         struct dpseci_rx_queue_cfg cfg;
3209         int ret;
3210
3211         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3212         cfg.options = DPSECI_QUEUE_OPT_DEST;
3213         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3214
3215         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3216                                   qp_id, &cfg);
3217         if (ret)
3218                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3219
3220         return ret;
3221 }
3222
3223 static struct rte_cryptodev_ops crypto_ops = {
3224         .dev_configure        = dpaa2_sec_dev_configure,
3225         .dev_start            = dpaa2_sec_dev_start,
3226         .dev_stop             = dpaa2_sec_dev_stop,
3227         .dev_close            = dpaa2_sec_dev_close,
3228         .dev_infos_get        = dpaa2_sec_dev_infos_get,
3229         .stats_get            = dpaa2_sec_stats_get,
3230         .stats_reset          = dpaa2_sec_stats_reset,
3231         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3232         .queue_pair_release   = dpaa2_sec_queue_pair_release,
3233         .queue_pair_count     = dpaa2_sec_queue_pair_count,
3234         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3235         .sym_session_configure    = dpaa2_sec_sym_session_configure,
3236         .sym_session_clear        = dpaa2_sec_sym_session_clear,
3237 };
3238
3239 static const struct rte_security_capability *
3240 dpaa2_sec_capabilities_get(void *device __rte_unused)
3241 {
3242         return dpaa2_sec_security_cap;
3243 }
3244
3245 static const struct rte_security_ops dpaa2_sec_security_ops = {
3246         .session_create = dpaa2_sec_security_session_create,
3247         .session_update = NULL,
3248         .session_stats_get = NULL,
3249         .session_destroy = dpaa2_sec_security_session_destroy,
3250         .set_pkt_metadata = NULL,
3251         .capabilities_get = dpaa2_sec_capabilities_get
3252 };
3253
3254 static int
3255 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3256 {
3257         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3258
3259         rte_free(dev->security_ctx);
3260
3261         rte_mempool_free(internals->fle_pool);
3262
3263         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3264                        dev->data->name, rte_socket_id());
3265
3266         return 0;
3267 }
3268
3269 static int
3270 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3271 {
3272         struct dpaa2_sec_dev_private *internals;
3273         struct rte_device *dev = cryptodev->device;
3274         struct rte_dpaa2_device *dpaa2_dev;
3275         struct rte_security_ctx *security_instance;
3276         struct fsl_mc_io *dpseci;
3277         uint16_t token;
3278         struct dpseci_attr attr;
3279         int retcode, hw_id;
3280         char str[30];
3281
3282         PMD_INIT_FUNC_TRACE();
3283         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3284         if (dpaa2_dev == NULL) {
3285                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3286                 return -1;
3287         }
3288         hw_id = dpaa2_dev->object_id;
3289
3290         cryptodev->driver_id = cryptodev_driver_id;
3291         cryptodev->dev_ops = &crypto_ops;
3292
3293         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3294         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3295         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3296                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3297                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3298                         RTE_CRYPTODEV_FF_SECURITY |
3299                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3300                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3301                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3302                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3303                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3304
3305         internals = cryptodev->data->dev_private;
3306
3307         /*
3308          * For secondary processes, we don't initialise any further as primary
3309          * has already done this work. Only check we don't need a different
3310          * RX function
3311          */
3312         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3313                 DPAA2_SEC_DEBUG("Device already init by primary process");
3314                 return 0;
3315         }
3316
3317         /* Initialize security_ctx only for primary process*/
3318         security_instance = rte_malloc("rte_security_instances_ops",
3319                                 sizeof(struct rte_security_ctx), 0);
3320         if (security_instance == NULL)
3321                 return -ENOMEM;
3322         security_instance->device = (void *)cryptodev;
3323         security_instance->ops = &dpaa2_sec_security_ops;
3324         security_instance->sess_cnt = 0;
3325         cryptodev->security_ctx = security_instance;
3326
3327         /*Open the rte device via MC and save the handle for further use*/
3328         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3329                                 sizeof(struct fsl_mc_io), 0);
3330         if (!dpseci) {
3331                 DPAA2_SEC_ERR(
3332                         "Error in allocating the memory for dpsec object");
3333                 return -1;
3334         }
3335         dpseci->regs = rte_mcp_ptr_list[0];
3336
3337         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3338         if (retcode != 0) {
3339                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3340                               retcode);
3341                 goto init_error;
3342         }
3343         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3344         if (retcode != 0) {
3345                 DPAA2_SEC_ERR(
3346                              "Cannot get dpsec device attributed: Error = %x",
3347                              retcode);
3348                 goto init_error;
3349         }
3350         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3351                         "dpsec-%u", hw_id);
3352
3353         internals->max_nb_queue_pairs = attr.num_tx_queues;
3354         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3355         internals->hw = dpseci;
3356         internals->token = token;
3357
3358         snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3359                         getpid(), cryptodev->data->dev_id);
3360         internals->fle_pool = rte_mempool_create((const char *)str,
3361                         FLE_POOL_NUM_BUFS,
3362                         FLE_POOL_BUF_SIZE,
3363                         FLE_POOL_CACHE_SIZE, 0,
3364                         NULL, NULL, NULL, NULL,
3365                         SOCKET_ID_ANY, 0);
3366         if (!internals->fle_pool) {
3367                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3368                 goto init_error;
3369         }
3370
3371         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3372         return 0;
3373
3374 init_error:
3375         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3376
3377         /* dpaa2_sec_uninit(crypto_dev_name); */
3378         return -EFAULT;
3379 }
3380
3381 static int
3382 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3383                           struct rte_dpaa2_device *dpaa2_dev)
3384 {
3385         struct rte_cryptodev *cryptodev;
3386         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3387
3388         int retval;
3389
3390         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3391                         dpaa2_dev->object_id);
3392
3393         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3394         if (cryptodev == NULL)
3395                 return -ENOMEM;
3396
3397         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3398                 cryptodev->data->dev_private = rte_zmalloc_socket(
3399                                         "cryptodev private structure",
3400                                         sizeof(struct dpaa2_sec_dev_private),
3401                                         RTE_CACHE_LINE_SIZE,
3402                                         rte_socket_id());
3403
3404                 if (cryptodev->data->dev_private == NULL)
3405                         rte_panic("Cannot allocate memzone for private "
3406                                   "device data");
3407         }
3408
3409         dpaa2_dev->cryptodev = cryptodev;
3410         cryptodev->device = &dpaa2_dev->device;
3411
3412         /* init user callbacks */
3413         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3414
3415         /* Invoke PMD device initialization function */
3416         retval = dpaa2_sec_dev_init(cryptodev);
3417         if (retval == 0)
3418                 return 0;
3419
3420         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3421                 rte_free(cryptodev->data->dev_private);
3422
3423         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3424
3425         return -ENXIO;
3426 }
3427
3428 static int
3429 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3430 {
3431         struct rte_cryptodev *cryptodev;
3432         int ret;
3433
3434         cryptodev = dpaa2_dev->cryptodev;
3435         if (cryptodev == NULL)
3436                 return -ENODEV;
3437
3438         ret = dpaa2_sec_uninit(cryptodev);
3439         if (ret)
3440                 return ret;
3441
3442         return rte_cryptodev_pmd_destroy(cryptodev);
3443 }
3444
3445 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3446         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3447         .drv_type = DPAA2_CRYPTO,
3448         .driver = {
3449                 .name = "DPAA2 SEC PMD"
3450         },
3451         .probe = cryptodev_dpaa2_sec_probe,
3452         .remove = cryptodev_dpaa2_sec_remove,
3453 };
3454
3455 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3456
3457 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3458 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3459                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3460
3461 RTE_INIT(dpaa2_sec_init_log)
3462 {
3463         /* Bus level logs */
3464         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3465         if (dpaa2_logtype_sec >= 0)
3466                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3467 }