31b7de679e8108279c2df8470607a7913fe977fd
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_cryptodev_pmd.h>
20 #include <rte_common.h>
21 #include <rte_fslmc.h>
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_dpio.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <fsl_dpopr.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_event.h"
32 #include "dpaa2_sec_logs.h"
33
34 /* Required types */
35 typedef uint64_t        dma_addr_t;
36
37 /* RTA header files */
38 #include <hw/desc/ipsec.h>
39 #include <hw/desc/pdcp.h>
40 #include <hw/desc/algo.h>
41
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43  * a pointer to the shared descriptor
44  */
45 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID           0x1957
47 #define FSL_DEVICE_ID           0x410
48 #define FSL_SUBSYSTEM_SEC       1
49 #define FSL_MC_DPSECI_DEVID     3
50
51 #define NO_PREFETCH 0
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS       32000
54 #define FLE_POOL_BUF_SIZE       256
55 #define FLE_POOL_CACHE_SIZE     512
56 #define FLE_SG_MEM_SIZE         2048
57 #define SEC_FLC_DHR_OUTBOUND    -114
58 #define SEC_FLC_DHR_INBOUND     0
59
60 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
61
62 static uint8_t cryptodev_driver_id;
63
64 int dpaa2_logtype_sec;
65
66 static inline int
67 build_proto_compound_fd(dpaa2_sec_session *sess,
68                struct rte_crypto_op *op,
69                struct qbman_fd *fd, uint16_t bpid)
70 {
71         struct rte_crypto_sym_op *sym_op = op->sym;
72         struct ctxt_priv *priv = sess->ctxt;
73         struct qbman_fle *fle, *ip_fle, *op_fle;
74         struct sec_flow_context *flc;
75         struct rte_mbuf *src_mbuf = sym_op->m_src;
76         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
77         int retval;
78
79         if (!dst_mbuf)
80                 dst_mbuf = src_mbuf;
81
82         /* Save the shared descriptor */
83         flc = &priv->flc_desc[0].flc;
84
85         /* we are using the first FLE entry to store Mbuf */
86         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
87         if (retval) {
88                 DPAA2_SEC_ERR("Memory alloc failed");
89                 return -1;
90         }
91         memset(fle, 0, FLE_POOL_BUF_SIZE);
92         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
93         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
94
95         op_fle = fle + 1;
96         ip_fle = fle + 2;
97
98         if (likely(bpid < MAX_BPID)) {
99                 DPAA2_SET_FD_BPID(fd, bpid);
100                 DPAA2_SET_FLE_BPID(op_fle, bpid);
101                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
102         } else {
103                 DPAA2_SET_FD_IVP(fd);
104                 DPAA2_SET_FLE_IVP(op_fle);
105                 DPAA2_SET_FLE_IVP(ip_fle);
106         }
107
108         /* Configure FD as a FRAME LIST */
109         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
110         DPAA2_SET_FD_COMPOUND_FMT(fd);
111         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
112
113         /* Configure Output FLE with dst mbuf data  */
114         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
115         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
116         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
117
118         /* Configure Input FLE with src mbuf data */
119         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
120         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
121         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
122
123         DPAA2_SET_FD_LEN(fd, ip_fle->length);
124         DPAA2_SET_FLE_FIN(ip_fle);
125
126 #ifdef ENABLE_HFN_OVERRIDE
127         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
128                 /*enable HFN override override */
129                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
130                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
131                 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
132         }
133 #endif
134
135         return 0;
136
137 }
138
139 static inline int
140 build_proto_fd(dpaa2_sec_session *sess,
141                struct rte_crypto_op *op,
142                struct qbman_fd *fd, uint16_t bpid)
143 {
144         struct rte_crypto_sym_op *sym_op = op->sym;
145         if (sym_op->m_dst)
146                 return build_proto_compound_fd(sess, op, fd, bpid);
147
148         struct ctxt_priv *priv = sess->ctxt;
149         struct sec_flow_context *flc;
150         struct rte_mbuf *mbuf = sym_op->m_src;
151
152         if (likely(bpid < MAX_BPID))
153                 DPAA2_SET_FD_BPID(fd, bpid);
154         else
155                 DPAA2_SET_FD_IVP(fd);
156
157         /* Save the shared descriptor */
158         flc = &priv->flc_desc[0].flc;
159
160         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
161         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
162         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
163         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
164
165         /* save physical address of mbuf */
166         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
167         mbuf->buf_iova = (size_t)op;
168
169         return 0;
170 }
171
172 static inline int
173 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
174                  struct rte_crypto_op *op,
175                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
176 {
177         struct rte_crypto_sym_op *sym_op = op->sym;
178         struct ctxt_priv *priv = sess->ctxt;
179         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
180         struct sec_flow_context *flc;
181         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
182         int icv_len = sess->digest_length;
183         uint8_t *old_icv;
184         struct rte_mbuf *mbuf;
185         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
186                         sess->iv.offset);
187
188         PMD_INIT_FUNC_TRACE();
189
190         if (sym_op->m_dst)
191                 mbuf = sym_op->m_dst;
192         else
193                 mbuf = sym_op->m_src;
194
195         /* first FLE entry used to store mbuf and session ctxt */
196         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
197                         RTE_CACHE_LINE_SIZE);
198         if (unlikely(!fle)) {
199                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
200                 return -1;
201         }
202         memset(fle, 0, FLE_SG_MEM_SIZE);
203         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
204         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
205
206         op_fle = fle + 1;
207         ip_fle = fle + 2;
208         sge = fle + 3;
209
210         /* Save the shared descriptor */
211         flc = &priv->flc_desc[0].flc;
212
213         /* Configure FD as a FRAME LIST */
214         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
215         DPAA2_SET_FD_COMPOUND_FMT(fd);
216         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
217
218         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
219                    "iv-len=%d data_off: 0x%x\n",
220                    sym_op->aead.data.offset,
221                    sym_op->aead.data.length,
222                    sess->digest_length,
223                    sess->iv.length,
224                    sym_op->m_src->data_off);
225
226         /* Configure Output FLE with Scatter/Gather Entry */
227         DPAA2_SET_FLE_SG_EXT(op_fle);
228         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
229
230         if (auth_only_len)
231                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
232
233         op_fle->length = (sess->dir == DIR_ENC) ?
234                         (sym_op->aead.data.length + icv_len + auth_only_len) :
235                         sym_op->aead.data.length + auth_only_len;
236
237         /* Configure Output SGE for Encap/Decap */
238         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
239         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
240                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
241         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
242
243         mbuf = mbuf->next;
244         /* o/p segs */
245         while (mbuf) {
246                 sge++;
247                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
248                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
249                 sge->length = mbuf->data_len;
250                 mbuf = mbuf->next;
251         }
252         sge->length -= icv_len;
253
254         if (sess->dir == DIR_ENC) {
255                 sge++;
256                 DPAA2_SET_FLE_ADDR(sge,
257                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
258                 sge->length = icv_len;
259         }
260         DPAA2_SET_FLE_FIN(sge);
261
262         sge++;
263         mbuf = sym_op->m_src;
264
265         /* Configure Input FLE with Scatter/Gather Entry */
266         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
267         DPAA2_SET_FLE_SG_EXT(ip_fle);
268         DPAA2_SET_FLE_FIN(ip_fle);
269         ip_fle->length = (sess->dir == DIR_ENC) ?
270                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
271                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
272                  icv_len);
273
274         /* Configure Input SGE for Encap/Decap */
275         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
276         sge->length = sess->iv.length;
277
278         sge++;
279         if (auth_only_len) {
280                 DPAA2_SET_FLE_ADDR(sge,
281                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
282                 sge->length = auth_only_len;
283                 sge++;
284         }
285
286         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
287         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
288                                 mbuf->data_off);
289         sge->length = mbuf->data_len - sym_op->aead.data.offset;
290
291         mbuf = mbuf->next;
292         /* i/p segs */
293         while (mbuf) {
294                 sge++;
295                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
296                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
297                 sge->length = mbuf->data_len;
298                 mbuf = mbuf->next;
299         }
300
301         if (sess->dir == DIR_DEC) {
302                 sge++;
303                 old_icv = (uint8_t *)(sge + 1);
304                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
305                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
306                 sge->length = icv_len;
307         }
308
309         DPAA2_SET_FLE_FIN(sge);
310         if (auth_only_len) {
311                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
312                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
313         }
314         DPAA2_SET_FD_LEN(fd, ip_fle->length);
315
316         return 0;
317 }
318
319 static inline int
320 build_authenc_gcm_fd(dpaa2_sec_session *sess,
321                      struct rte_crypto_op *op,
322                      struct qbman_fd *fd, uint16_t bpid)
323 {
324         struct rte_crypto_sym_op *sym_op = op->sym;
325         struct ctxt_priv *priv = sess->ctxt;
326         struct qbman_fle *fle, *sge;
327         struct sec_flow_context *flc;
328         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
329         int icv_len = sess->digest_length, retval;
330         uint8_t *old_icv;
331         struct rte_mbuf *dst;
332         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
333                         sess->iv.offset);
334
335         PMD_INIT_FUNC_TRACE();
336
337         if (sym_op->m_dst)
338                 dst = sym_op->m_dst;
339         else
340                 dst = sym_op->m_src;
341
342         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
343          * Currently we donot know which FLE has the mbuf stored.
344          * So while retreiving we can go back 1 FLE from the FD -ADDR
345          * to get the MBUF Addr from the previous FLE.
346          * We can have a better approach to use the inline Mbuf
347          */
348         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
349         if (retval) {
350                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
351                 return -1;
352         }
353         memset(fle, 0, FLE_POOL_BUF_SIZE);
354         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
355         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
356         fle = fle + 1;
357         sge = fle + 2;
358         if (likely(bpid < MAX_BPID)) {
359                 DPAA2_SET_FD_BPID(fd, bpid);
360                 DPAA2_SET_FLE_BPID(fle, bpid);
361                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
362                 DPAA2_SET_FLE_BPID(sge, bpid);
363                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
364                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
365                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
366         } else {
367                 DPAA2_SET_FD_IVP(fd);
368                 DPAA2_SET_FLE_IVP(fle);
369                 DPAA2_SET_FLE_IVP((fle + 1));
370                 DPAA2_SET_FLE_IVP(sge);
371                 DPAA2_SET_FLE_IVP((sge + 1));
372                 DPAA2_SET_FLE_IVP((sge + 2));
373                 DPAA2_SET_FLE_IVP((sge + 3));
374         }
375
376         /* Save the shared descriptor */
377         flc = &priv->flc_desc[0].flc;
378         /* Configure FD as a FRAME LIST */
379         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
380         DPAA2_SET_FD_COMPOUND_FMT(fd);
381         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
382
383         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
384                    "iv-len=%d data_off: 0x%x\n",
385                    sym_op->aead.data.offset,
386                    sym_op->aead.data.length,
387                    sess->digest_length,
388                    sess->iv.length,
389                    sym_op->m_src->data_off);
390
391         /* Configure Output FLE with Scatter/Gather Entry */
392         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
393         if (auth_only_len)
394                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
395         fle->length = (sess->dir == DIR_ENC) ?
396                         (sym_op->aead.data.length + icv_len + auth_only_len) :
397                         sym_op->aead.data.length + auth_only_len;
398
399         DPAA2_SET_FLE_SG_EXT(fle);
400
401         /* Configure Output SGE for Encap/Decap */
402         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
403         DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
404                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
405         sge->length = sym_op->aead.data.length + auth_only_len;
406
407         if (sess->dir == DIR_ENC) {
408                 sge++;
409                 DPAA2_SET_FLE_ADDR(sge,
410                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
411                 sge->length = sess->digest_length;
412                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
413                                         sess->iv.length + auth_only_len));
414         }
415         DPAA2_SET_FLE_FIN(sge);
416
417         sge++;
418         fle++;
419
420         /* Configure Input FLE with Scatter/Gather Entry */
421         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
422         DPAA2_SET_FLE_SG_EXT(fle);
423         DPAA2_SET_FLE_FIN(fle);
424         fle->length = (sess->dir == DIR_ENC) ?
425                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
426                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
427                  sess->digest_length);
428
429         /* Configure Input SGE for Encap/Decap */
430         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
431         sge->length = sess->iv.length;
432         sge++;
433         if (auth_only_len) {
434                 DPAA2_SET_FLE_ADDR(sge,
435                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
436                 sge->length = auth_only_len;
437                 DPAA2_SET_FLE_BPID(sge, bpid);
438                 sge++;
439         }
440
441         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
442         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
443                                 sym_op->m_src->data_off);
444         sge->length = sym_op->aead.data.length;
445         if (sess->dir == DIR_DEC) {
446                 sge++;
447                 old_icv = (uint8_t *)(sge + 1);
448                 memcpy(old_icv, sym_op->aead.digest.data,
449                        sess->digest_length);
450                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
451                 sge->length = sess->digest_length;
452                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
453                                  sess->digest_length +
454                                  sess->iv.length +
455                                  auth_only_len));
456         }
457         DPAA2_SET_FLE_FIN(sge);
458
459         if (auth_only_len) {
460                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
461                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
462         }
463
464         return 0;
465 }
466
467 static inline int
468 build_authenc_sg_fd(dpaa2_sec_session *sess,
469                  struct rte_crypto_op *op,
470                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
471 {
472         struct rte_crypto_sym_op *sym_op = op->sym;
473         struct ctxt_priv *priv = sess->ctxt;
474         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
475         struct sec_flow_context *flc;
476         uint32_t auth_only_len = sym_op->auth.data.length -
477                                 sym_op->cipher.data.length;
478         int icv_len = sess->digest_length;
479         uint8_t *old_icv;
480         struct rte_mbuf *mbuf;
481         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
482                         sess->iv.offset);
483
484         PMD_INIT_FUNC_TRACE();
485
486         if (sym_op->m_dst)
487                 mbuf = sym_op->m_dst;
488         else
489                 mbuf = sym_op->m_src;
490
491         /* first FLE entry used to store mbuf and session ctxt */
492         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
493                         RTE_CACHE_LINE_SIZE);
494         if (unlikely(!fle)) {
495                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
496                 return -1;
497         }
498         memset(fle, 0, FLE_SG_MEM_SIZE);
499         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
500         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
501
502         op_fle = fle + 1;
503         ip_fle = fle + 2;
504         sge = fle + 3;
505
506         /* Save the shared descriptor */
507         flc = &priv->flc_desc[0].flc;
508
509         /* Configure FD as a FRAME LIST */
510         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
511         DPAA2_SET_FD_COMPOUND_FMT(fd);
512         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
513
514         DPAA2_SEC_DP_DEBUG(
515                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
516                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
517                 sym_op->auth.data.offset,
518                 sym_op->auth.data.length,
519                 sess->digest_length,
520                 sym_op->cipher.data.offset,
521                 sym_op->cipher.data.length,
522                 sess->iv.length,
523                 sym_op->m_src->data_off);
524
525         /* Configure Output FLE with Scatter/Gather Entry */
526         DPAA2_SET_FLE_SG_EXT(op_fle);
527         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
528
529         if (auth_only_len)
530                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
531
532         op_fle->length = (sess->dir == DIR_ENC) ?
533                         (sym_op->cipher.data.length + icv_len) :
534                         sym_op->cipher.data.length;
535
536         /* Configure Output SGE for Encap/Decap */
537         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
538         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
539         sge->length = mbuf->data_len - sym_op->auth.data.offset;
540
541         mbuf = mbuf->next;
542         /* o/p segs */
543         while (mbuf) {
544                 sge++;
545                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
546                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
547                 sge->length = mbuf->data_len;
548                 mbuf = mbuf->next;
549         }
550         sge->length -= icv_len;
551
552         if (sess->dir == DIR_ENC) {
553                 sge++;
554                 DPAA2_SET_FLE_ADDR(sge,
555                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
556                 sge->length = icv_len;
557         }
558         DPAA2_SET_FLE_FIN(sge);
559
560         sge++;
561         mbuf = sym_op->m_src;
562
563         /* Configure Input FLE with Scatter/Gather Entry */
564         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
565         DPAA2_SET_FLE_SG_EXT(ip_fle);
566         DPAA2_SET_FLE_FIN(ip_fle);
567         ip_fle->length = (sess->dir == DIR_ENC) ?
568                         (sym_op->auth.data.length + sess->iv.length) :
569                         (sym_op->auth.data.length + sess->iv.length +
570                          icv_len);
571
572         /* Configure Input SGE for Encap/Decap */
573         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
574         sge->length = sess->iv.length;
575
576         sge++;
577         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
578         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
579                                 mbuf->data_off);
580         sge->length = mbuf->data_len - sym_op->auth.data.offset;
581
582         mbuf = mbuf->next;
583         /* i/p segs */
584         while (mbuf) {
585                 sge++;
586                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
587                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
588                 sge->length = mbuf->data_len;
589                 mbuf = mbuf->next;
590         }
591         sge->length -= icv_len;
592
593         if (sess->dir == DIR_DEC) {
594                 sge++;
595                 old_icv = (uint8_t *)(sge + 1);
596                 memcpy(old_icv, sym_op->auth.digest.data,
597                        icv_len);
598                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
599                 sge->length = icv_len;
600         }
601
602         DPAA2_SET_FLE_FIN(sge);
603         if (auth_only_len) {
604                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
605                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
606         }
607         DPAA2_SET_FD_LEN(fd, ip_fle->length);
608
609         return 0;
610 }
611
612 static inline int
613 build_authenc_fd(dpaa2_sec_session *sess,
614                  struct rte_crypto_op *op,
615                  struct qbman_fd *fd, uint16_t bpid)
616 {
617         struct rte_crypto_sym_op *sym_op = op->sym;
618         struct ctxt_priv *priv = sess->ctxt;
619         struct qbman_fle *fle, *sge;
620         struct sec_flow_context *flc;
621         uint32_t auth_only_len = sym_op->auth.data.length -
622                                 sym_op->cipher.data.length;
623         int icv_len = sess->digest_length, retval;
624         uint8_t *old_icv;
625         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
626                         sess->iv.offset);
627         struct rte_mbuf *dst;
628
629         PMD_INIT_FUNC_TRACE();
630
631         if (sym_op->m_dst)
632                 dst = sym_op->m_dst;
633         else
634                 dst = sym_op->m_src;
635
636         /* we are using the first FLE entry to store Mbuf.
637          * Currently we donot know which FLE has the mbuf stored.
638          * So while retreiving we can go back 1 FLE from the FD -ADDR
639          * to get the MBUF Addr from the previous FLE.
640          * We can have a better approach to use the inline Mbuf
641          */
642         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
643         if (retval) {
644                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
645                 return -1;
646         }
647         memset(fle, 0, FLE_POOL_BUF_SIZE);
648         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
649         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
650         fle = fle + 1;
651         sge = fle + 2;
652         if (likely(bpid < MAX_BPID)) {
653                 DPAA2_SET_FD_BPID(fd, bpid);
654                 DPAA2_SET_FLE_BPID(fle, bpid);
655                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
656                 DPAA2_SET_FLE_BPID(sge, bpid);
657                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
658                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
659                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
660         } else {
661                 DPAA2_SET_FD_IVP(fd);
662                 DPAA2_SET_FLE_IVP(fle);
663                 DPAA2_SET_FLE_IVP((fle + 1));
664                 DPAA2_SET_FLE_IVP(sge);
665                 DPAA2_SET_FLE_IVP((sge + 1));
666                 DPAA2_SET_FLE_IVP((sge + 2));
667                 DPAA2_SET_FLE_IVP((sge + 3));
668         }
669
670         /* Save the shared descriptor */
671         flc = &priv->flc_desc[0].flc;
672         /* Configure FD as a FRAME LIST */
673         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
674         DPAA2_SET_FD_COMPOUND_FMT(fd);
675         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
676
677         DPAA2_SEC_DP_DEBUG(
678                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
679                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
680                 sym_op->auth.data.offset,
681                 sym_op->auth.data.length,
682                 sess->digest_length,
683                 sym_op->cipher.data.offset,
684                 sym_op->cipher.data.length,
685                 sess->iv.length,
686                 sym_op->m_src->data_off);
687
688         /* Configure Output FLE with Scatter/Gather Entry */
689         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
690         if (auth_only_len)
691                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
692         fle->length = (sess->dir == DIR_ENC) ?
693                         (sym_op->cipher.data.length + icv_len) :
694                         sym_op->cipher.data.length;
695
696         DPAA2_SET_FLE_SG_EXT(fle);
697
698         /* Configure Output SGE for Encap/Decap */
699         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
700         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
701                                 dst->data_off);
702         sge->length = sym_op->cipher.data.length;
703
704         if (sess->dir == DIR_ENC) {
705                 sge++;
706                 DPAA2_SET_FLE_ADDR(sge,
707                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
708                 sge->length = sess->digest_length;
709                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
710                                         sess->iv.length));
711         }
712         DPAA2_SET_FLE_FIN(sge);
713
714         sge++;
715         fle++;
716
717         /* Configure Input FLE with Scatter/Gather Entry */
718         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
719         DPAA2_SET_FLE_SG_EXT(fle);
720         DPAA2_SET_FLE_FIN(fle);
721         fle->length = (sess->dir == DIR_ENC) ?
722                         (sym_op->auth.data.length + sess->iv.length) :
723                         (sym_op->auth.data.length + sess->iv.length +
724                          sess->digest_length);
725
726         /* Configure Input SGE for Encap/Decap */
727         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
728         sge->length = sess->iv.length;
729         sge++;
730
731         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
732         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
733                                 sym_op->m_src->data_off);
734         sge->length = sym_op->auth.data.length;
735         if (sess->dir == DIR_DEC) {
736                 sge++;
737                 old_icv = (uint8_t *)(sge + 1);
738                 memcpy(old_icv, sym_op->auth.digest.data,
739                        sess->digest_length);
740                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
741                 sge->length = sess->digest_length;
742                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
743                                  sess->digest_length +
744                                  sess->iv.length));
745         }
746         DPAA2_SET_FLE_FIN(sge);
747         if (auth_only_len) {
748                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
749                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
750         }
751         return 0;
752 }
753
754 static inline int build_auth_sg_fd(
755                 dpaa2_sec_session *sess,
756                 struct rte_crypto_op *op,
757                 struct qbman_fd *fd,
758                 __rte_unused uint16_t bpid)
759 {
760         struct rte_crypto_sym_op *sym_op = op->sym;
761         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
762         struct sec_flow_context *flc;
763         struct ctxt_priv *priv = sess->ctxt;
764         uint8_t *old_digest;
765         struct rte_mbuf *mbuf;
766
767         PMD_INIT_FUNC_TRACE();
768
769         mbuf = sym_op->m_src;
770         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
771                         RTE_CACHE_LINE_SIZE);
772         if (unlikely(!fle)) {
773                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
774                 return -1;
775         }
776         memset(fle, 0, FLE_SG_MEM_SIZE);
777         /* first FLE entry used to store mbuf and session ctxt */
778         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
779         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
780         op_fle = fle + 1;
781         ip_fle = fle + 2;
782         sge = fle + 3;
783
784         flc = &priv->flc_desc[DESC_INITFINAL].flc;
785         /* sg FD */
786         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
787         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
788         DPAA2_SET_FD_COMPOUND_FMT(fd);
789
790         /* o/p fle */
791         DPAA2_SET_FLE_ADDR(op_fle,
792                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
793         op_fle->length = sess->digest_length;
794
795         /* i/p fle */
796         DPAA2_SET_FLE_SG_EXT(ip_fle);
797         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
798         /* i/p 1st seg */
799         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
800         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
801         sge->length = mbuf->data_len - sym_op->auth.data.offset;
802
803         /* i/p segs */
804         mbuf = mbuf->next;
805         while (mbuf) {
806                 sge++;
807                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
808                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
809                 sge->length = mbuf->data_len;
810                 mbuf = mbuf->next;
811         }
812         if (sess->dir == DIR_ENC) {
813                 /* Digest calculation case */
814                 sge->length -= sess->digest_length;
815                 ip_fle->length = sym_op->auth.data.length;
816         } else {
817                 /* Digest verification case */
818                 sge++;
819                 old_digest = (uint8_t *)(sge + 1);
820                 rte_memcpy(old_digest, sym_op->auth.digest.data,
821                            sess->digest_length);
822                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
823                 sge->length = sess->digest_length;
824                 ip_fle->length = sym_op->auth.data.length +
825                                 sess->digest_length;
826         }
827         DPAA2_SET_FLE_FIN(sge);
828         DPAA2_SET_FLE_FIN(ip_fle);
829         DPAA2_SET_FD_LEN(fd, ip_fle->length);
830
831         return 0;
832 }
833
834 static inline int
835 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
836               struct qbman_fd *fd, uint16_t bpid)
837 {
838         struct rte_crypto_sym_op *sym_op = op->sym;
839         struct qbman_fle *fle, *sge;
840         struct sec_flow_context *flc;
841         struct ctxt_priv *priv = sess->ctxt;
842         uint8_t *old_digest;
843         int retval;
844
845         PMD_INIT_FUNC_TRACE();
846
847         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
848         if (retval) {
849                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
850                 return -1;
851         }
852         memset(fle, 0, FLE_POOL_BUF_SIZE);
853         /* TODO we are using the first FLE entry to store Mbuf.
854          * Currently we donot know which FLE has the mbuf stored.
855          * So while retreiving we can go back 1 FLE from the FD -ADDR
856          * to get the MBUF Addr from the previous FLE.
857          * We can have a better approach to use the inline Mbuf
858          */
859         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
860         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
861         fle = fle + 1;
862
863         if (likely(bpid < MAX_BPID)) {
864                 DPAA2_SET_FD_BPID(fd, bpid);
865                 DPAA2_SET_FLE_BPID(fle, bpid);
866                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
867         } else {
868                 DPAA2_SET_FD_IVP(fd);
869                 DPAA2_SET_FLE_IVP(fle);
870                 DPAA2_SET_FLE_IVP((fle + 1));
871         }
872         flc = &priv->flc_desc[DESC_INITFINAL].flc;
873         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
874
875         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
876         fle->length = sess->digest_length;
877
878         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
879         DPAA2_SET_FD_COMPOUND_FMT(fd);
880         fle++;
881
882         if (sess->dir == DIR_ENC) {
883                 DPAA2_SET_FLE_ADDR(fle,
884                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
885                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
886                                      sym_op->m_src->data_off);
887                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
888                 fle->length = sym_op->auth.data.length;
889         } else {
890                 sge = fle + 2;
891                 DPAA2_SET_FLE_SG_EXT(fle);
892                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
893
894                 if (likely(bpid < MAX_BPID)) {
895                         DPAA2_SET_FLE_BPID(sge, bpid);
896                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
897                 } else {
898                         DPAA2_SET_FLE_IVP(sge);
899                         DPAA2_SET_FLE_IVP((sge + 1));
900                 }
901                 DPAA2_SET_FLE_ADDR(sge,
902                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
903                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
904                                      sym_op->m_src->data_off);
905
906                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
907                                  sess->digest_length);
908                 sge->length = sym_op->auth.data.length;
909                 sge++;
910                 old_digest = (uint8_t *)(sge + 1);
911                 rte_memcpy(old_digest, sym_op->auth.digest.data,
912                            sess->digest_length);
913                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
914                 sge->length = sess->digest_length;
915                 fle->length = sym_op->auth.data.length +
916                                 sess->digest_length;
917                 DPAA2_SET_FLE_FIN(sge);
918         }
919         DPAA2_SET_FLE_FIN(fle);
920
921         return 0;
922 }
923
924 static int
925 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
926                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
927 {
928         struct rte_crypto_sym_op *sym_op = op->sym;
929         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
930         struct sec_flow_context *flc;
931         struct ctxt_priv *priv = sess->ctxt;
932         struct rte_mbuf *mbuf;
933         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
934                         sess->iv.offset);
935
936         PMD_INIT_FUNC_TRACE();
937
938         if (sym_op->m_dst)
939                 mbuf = sym_op->m_dst;
940         else
941                 mbuf = sym_op->m_src;
942
943         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
944                         RTE_CACHE_LINE_SIZE);
945         if (!fle) {
946                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
947                 return -1;
948         }
949         memset(fle, 0, FLE_SG_MEM_SIZE);
950         /* first FLE entry used to store mbuf and session ctxt */
951         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
952         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
953
954         op_fle = fle + 1;
955         ip_fle = fle + 2;
956         sge = fle + 3;
957
958         flc = &priv->flc_desc[0].flc;
959
960         DPAA2_SEC_DP_DEBUG(
961                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
962                 " data_off: 0x%x\n",
963                 sym_op->cipher.data.offset,
964                 sym_op->cipher.data.length,
965                 sess->iv.length,
966                 sym_op->m_src->data_off);
967
968         /* o/p fle */
969         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
970         op_fle->length = sym_op->cipher.data.length;
971         DPAA2_SET_FLE_SG_EXT(op_fle);
972
973         /* o/p 1st seg */
974         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
975         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
976         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
977
978         mbuf = mbuf->next;
979         /* o/p segs */
980         while (mbuf) {
981                 sge++;
982                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
983                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
984                 sge->length = mbuf->data_len;
985                 mbuf = mbuf->next;
986         }
987         DPAA2_SET_FLE_FIN(sge);
988
989         DPAA2_SEC_DP_DEBUG(
990                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
991                 flc, fle, fle->addr_hi, fle->addr_lo,
992                 fle->length);
993
994         /* i/p fle */
995         mbuf = sym_op->m_src;
996         sge++;
997         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
998         ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
999         DPAA2_SET_FLE_SG_EXT(ip_fle);
1000
1001         /* i/p IV */
1002         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1003         DPAA2_SET_FLE_OFFSET(sge, 0);
1004         sge->length = sess->iv.length;
1005
1006         sge++;
1007
1008         /* i/p 1st seg */
1009         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1010         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1011                              mbuf->data_off);
1012         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1013
1014         mbuf = mbuf->next;
1015         /* i/p segs */
1016         while (mbuf) {
1017                 sge++;
1018                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1019                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1020                 sge->length = mbuf->data_len;
1021                 mbuf = mbuf->next;
1022         }
1023         DPAA2_SET_FLE_FIN(sge);
1024         DPAA2_SET_FLE_FIN(ip_fle);
1025
1026         /* sg fd */
1027         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1028         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1029         DPAA2_SET_FD_COMPOUND_FMT(fd);
1030         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1031
1032         DPAA2_SEC_DP_DEBUG(
1033                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1034                 " off =%d, len =%d\n",
1035                 DPAA2_GET_FD_ADDR(fd),
1036                 DPAA2_GET_FD_BPID(fd),
1037                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1038                 DPAA2_GET_FD_OFFSET(fd),
1039                 DPAA2_GET_FD_LEN(fd));
1040         return 0;
1041 }
1042
1043 static int
1044 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1045                 struct qbman_fd *fd, uint16_t bpid)
1046 {
1047         struct rte_crypto_sym_op *sym_op = op->sym;
1048         struct qbman_fle *fle, *sge;
1049         int retval;
1050         struct sec_flow_context *flc;
1051         struct ctxt_priv *priv = sess->ctxt;
1052         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1053                         sess->iv.offset);
1054         struct rte_mbuf *dst;
1055
1056         PMD_INIT_FUNC_TRACE();
1057
1058         if (sym_op->m_dst)
1059                 dst = sym_op->m_dst;
1060         else
1061                 dst = sym_op->m_src;
1062
1063         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1064         if (retval) {
1065                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1066                 return -1;
1067         }
1068         memset(fle, 0, FLE_POOL_BUF_SIZE);
1069         /* TODO we are using the first FLE entry to store Mbuf.
1070          * Currently we donot know which FLE has the mbuf stored.
1071          * So while retreiving we can go back 1 FLE from the FD -ADDR
1072          * to get the MBUF Addr from the previous FLE.
1073          * We can have a better approach to use the inline Mbuf
1074          */
1075         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1076         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1077         fle = fle + 1;
1078         sge = fle + 2;
1079
1080         if (likely(bpid < MAX_BPID)) {
1081                 DPAA2_SET_FD_BPID(fd, bpid);
1082                 DPAA2_SET_FLE_BPID(fle, bpid);
1083                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1084                 DPAA2_SET_FLE_BPID(sge, bpid);
1085                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1086         } else {
1087                 DPAA2_SET_FD_IVP(fd);
1088                 DPAA2_SET_FLE_IVP(fle);
1089                 DPAA2_SET_FLE_IVP((fle + 1));
1090                 DPAA2_SET_FLE_IVP(sge);
1091                 DPAA2_SET_FLE_IVP((sge + 1));
1092         }
1093
1094         flc = &priv->flc_desc[0].flc;
1095         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1096         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1097                          sess->iv.length);
1098         DPAA2_SET_FD_COMPOUND_FMT(fd);
1099         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1100
1101         DPAA2_SEC_DP_DEBUG(
1102                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1103                 " data_off: 0x%x\n",
1104                 sym_op->cipher.data.offset,
1105                 sym_op->cipher.data.length,
1106                 sess->iv.length,
1107                 sym_op->m_src->data_off);
1108
1109         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1110         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1111                              dst->data_off);
1112
1113         fle->length = sym_op->cipher.data.length + sess->iv.length;
1114
1115         DPAA2_SEC_DP_DEBUG(
1116                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1117                 flc, fle, fle->addr_hi, fle->addr_lo,
1118                 fle->length);
1119
1120         fle++;
1121
1122         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1123         fle->length = sym_op->cipher.data.length + sess->iv.length;
1124
1125         DPAA2_SET_FLE_SG_EXT(fle);
1126
1127         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1128         sge->length = sess->iv.length;
1129
1130         sge++;
1131         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1132         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1133                              sym_op->m_src->data_off);
1134
1135         sge->length = sym_op->cipher.data.length;
1136         DPAA2_SET_FLE_FIN(sge);
1137         DPAA2_SET_FLE_FIN(fle);
1138
1139         DPAA2_SEC_DP_DEBUG(
1140                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1141                 " off =%d, len =%d\n",
1142                 DPAA2_GET_FD_ADDR(fd),
1143                 DPAA2_GET_FD_BPID(fd),
1144                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1145                 DPAA2_GET_FD_OFFSET(fd),
1146                 DPAA2_GET_FD_LEN(fd));
1147
1148         return 0;
1149 }
1150
1151 static inline int
1152 build_sec_fd(struct rte_crypto_op *op,
1153              struct qbman_fd *fd, uint16_t bpid)
1154 {
1155         int ret = -1;
1156         dpaa2_sec_session *sess;
1157
1158         PMD_INIT_FUNC_TRACE();
1159
1160         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1161                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1162                                 op->sym->session, cryptodev_driver_id);
1163         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1164                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1165                                 op->sym->sec_session);
1166         else
1167                 return -1;
1168
1169         /* Segmented buffer */
1170         if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1171                 switch (sess->ctxt_type) {
1172                 case DPAA2_SEC_CIPHER:
1173                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1174                         break;
1175                 case DPAA2_SEC_AUTH:
1176                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1177                         break;
1178                 case DPAA2_SEC_AEAD:
1179                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1180                         break;
1181                 case DPAA2_SEC_CIPHER_HASH:
1182                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1183                         break;
1184                 case DPAA2_SEC_HASH_CIPHER:
1185                 default:
1186                         DPAA2_SEC_ERR("error: Unsupported session");
1187                 }
1188         } else {
1189                 switch (sess->ctxt_type) {
1190                 case DPAA2_SEC_CIPHER:
1191                         ret = build_cipher_fd(sess, op, fd, bpid);
1192                         break;
1193                 case DPAA2_SEC_AUTH:
1194                         ret = build_auth_fd(sess, op, fd, bpid);
1195                         break;
1196                 case DPAA2_SEC_AEAD:
1197                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1198                         break;
1199                 case DPAA2_SEC_CIPHER_HASH:
1200                         ret = build_authenc_fd(sess, op, fd, bpid);
1201                         break;
1202                 case DPAA2_SEC_IPSEC:
1203                         ret = build_proto_fd(sess, op, fd, bpid);
1204                         break;
1205                 case DPAA2_SEC_PDCP:
1206                         ret = build_proto_compound_fd(sess, op, fd, bpid);
1207                         break;
1208                 case DPAA2_SEC_HASH_CIPHER:
1209                 default:
1210                         DPAA2_SEC_ERR("error: Unsupported session");
1211                 }
1212         }
1213         return ret;
1214 }
1215
1216 static uint16_t
1217 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1218                         uint16_t nb_ops)
1219 {
1220         /* Function to transmit the frames to given device and VQ*/
1221         uint32_t loop;
1222         int32_t ret;
1223         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1224         uint32_t frames_to_send;
1225         struct qbman_eq_desc eqdesc;
1226         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1227         struct qbman_swp *swp;
1228         uint16_t num_tx = 0;
1229         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1230         /*todo - need to support multiple buffer pools */
1231         uint16_t bpid;
1232         struct rte_mempool *mb_pool;
1233
1234         if (unlikely(nb_ops == 0))
1235                 return 0;
1236
1237         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1238                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1239                 return 0;
1240         }
1241         /*Prepare enqueue descriptor*/
1242         qbman_eq_desc_clear(&eqdesc);
1243         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1244         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1245         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1246
1247         if (!DPAA2_PER_LCORE_DPIO) {
1248                 ret = dpaa2_affine_qbman_swp();
1249                 if (ret) {
1250                         DPAA2_SEC_ERR("Failure in affining portal");
1251                         return 0;
1252                 }
1253         }
1254         swp = DPAA2_PER_LCORE_PORTAL;
1255
1256         while (nb_ops) {
1257                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1258                         dpaa2_eqcr_size : nb_ops;
1259
1260                 for (loop = 0; loop < frames_to_send; loop++) {
1261                         if ((*ops)->sym->m_src->seqn) {
1262                          uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1263
1264                          flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1265                          DPAA2_PER_LCORE_DQRR_SIZE--;
1266                          DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1267                          (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1268                         }
1269
1270                         /*Clear the unused FD fields before sending*/
1271                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1272                         mb_pool = (*ops)->sym->m_src->pool;
1273                         bpid = mempool_to_bpid(mb_pool);
1274                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1275                         if (ret) {
1276                                 DPAA2_SEC_ERR("error: Improper packet contents"
1277                                               " for crypto operation");
1278                                 goto skip_tx;
1279                         }
1280                         ops++;
1281                 }
1282                 loop = 0;
1283                 while (loop < frames_to_send) {
1284                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1285                                                         &fd_arr[loop],
1286                                                         &flags[loop],
1287                                                         frames_to_send - loop);
1288                 }
1289
1290                 num_tx += frames_to_send;
1291                 nb_ops -= frames_to_send;
1292         }
1293 skip_tx:
1294         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1295         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1296         return num_tx;
1297 }
1298
1299 static inline struct rte_crypto_op *
1300 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1301 {
1302         struct rte_crypto_op *op;
1303         uint16_t len = DPAA2_GET_FD_LEN(fd);
1304         uint16_t diff = 0;
1305         dpaa2_sec_session *sess_priv;
1306
1307         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1308                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1309                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1310
1311         diff = len - mbuf->pkt_len;
1312         mbuf->pkt_len += diff;
1313         mbuf->data_len += diff;
1314         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1315         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1316         op->sym->aead.digest.phys_addr = 0L;
1317
1318         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1319                                 op->sym->sec_session);
1320         if (sess_priv->dir == DIR_ENC)
1321                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1322         else
1323                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1324
1325         return op;
1326 }
1327
1328 static inline struct rte_crypto_op *
1329 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1330 {
1331         struct qbman_fle *fle;
1332         struct rte_crypto_op *op;
1333         struct ctxt_priv *priv;
1334         struct rte_mbuf *dst, *src;
1335
1336         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1337                 return sec_simple_fd_to_mbuf(fd, driver_id);
1338
1339         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1340
1341         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1342                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1343
1344         /* we are using the first FLE entry to store Mbuf.
1345          * Currently we donot know which FLE has the mbuf stored.
1346          * So while retreiving we can go back 1 FLE from the FD -ADDR
1347          * to get the MBUF Addr from the previous FLE.
1348          * We can have a better approach to use the inline Mbuf
1349          */
1350
1351         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1352                 /* TODO complete it. */
1353                 DPAA2_SEC_ERR("error: non inline buffer");
1354                 return NULL;
1355         }
1356         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1357
1358         /* Prefeth op */
1359         src = op->sym->m_src;
1360         rte_prefetch0(src);
1361
1362         if (op->sym->m_dst) {
1363                 dst = op->sym->m_dst;
1364                 rte_prefetch0(dst);
1365         } else
1366                 dst = src;
1367
1368         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1369                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1370                         get_sec_session_private_data(op->sym->sec_session);
1371                 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1372                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1373                         dst->pkt_len = len;
1374                         dst->data_len = len;
1375                 }
1376         }
1377
1378         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1379                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1380                 (void *)dst,
1381                 dst->buf_addr,
1382                 DPAA2_GET_FD_ADDR(fd),
1383                 DPAA2_GET_FD_BPID(fd),
1384                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1385                 DPAA2_GET_FD_OFFSET(fd),
1386                 DPAA2_GET_FD_LEN(fd));
1387
1388         /* free the fle memory */
1389         if (likely(rte_pktmbuf_is_contiguous(src))) {
1390                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1391                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1392         } else
1393                 rte_free((void *)(fle-1));
1394
1395         return op;
1396 }
1397
1398 static uint16_t
1399 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1400                         uint16_t nb_ops)
1401 {
1402         /* Function is responsible to receive frames for a given device and VQ*/
1403         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1404         struct rte_cryptodev *dev =
1405                         (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1406         struct qbman_result *dq_storage;
1407         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1408         int ret, num_rx = 0;
1409         uint8_t is_last = 0, status;
1410         struct qbman_swp *swp;
1411         const struct qbman_fd *fd;
1412         struct qbman_pull_desc pulldesc;
1413
1414         if (!DPAA2_PER_LCORE_DPIO) {
1415                 ret = dpaa2_affine_qbman_swp();
1416                 if (ret) {
1417                         DPAA2_SEC_ERR("Failure in affining portal");
1418                         return 0;
1419                 }
1420         }
1421         swp = DPAA2_PER_LCORE_PORTAL;
1422         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1423
1424         qbman_pull_desc_clear(&pulldesc);
1425         qbman_pull_desc_set_numframes(&pulldesc,
1426                                       (nb_ops > dpaa2_dqrr_size) ?
1427                                       dpaa2_dqrr_size : nb_ops);
1428         qbman_pull_desc_set_fq(&pulldesc, fqid);
1429         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1430                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1431                                     1);
1432
1433         /*Issue a volatile dequeue command. */
1434         while (1) {
1435                 if (qbman_swp_pull(swp, &pulldesc)) {
1436                         DPAA2_SEC_WARN(
1437                                 "SEC VDQ command is not issued : QBMAN busy");
1438                         /* Portal was busy, try again */
1439                         continue;
1440                 }
1441                 break;
1442         };
1443
1444         /* Receive the packets till Last Dequeue entry is found with
1445          * respect to the above issues PULL command.
1446          */
1447         while (!is_last) {
1448                 /* Check if the previous issued command is completed.
1449                  * Also seems like the SWP is shared between the Ethernet Driver
1450                  * and the SEC driver.
1451                  */
1452                 while (!qbman_check_command_complete(dq_storage))
1453                         ;
1454
1455                 /* Loop until the dq_storage is updated with
1456                  * new token by QBMAN
1457                  */
1458                 while (!qbman_check_new_result(dq_storage))
1459                         ;
1460                 /* Check whether Last Pull command is Expired and
1461                  * setting Condition for Loop termination
1462                  */
1463                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1464                         is_last = 1;
1465                         /* Check for valid frame. */
1466                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1467                         if (unlikely(
1468                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1469                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1470                                 continue;
1471                         }
1472                 }
1473
1474                 fd = qbman_result_DQ_fd(dq_storage);
1475                 ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1476
1477                 if (unlikely(fd->simple.frc)) {
1478                         /* TODO Parse SEC errors */
1479                         DPAA2_SEC_ERR("SEC returned Error - %x",
1480                                       fd->simple.frc);
1481                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1482                 } else {
1483                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1484                 }
1485
1486                 num_rx++;
1487                 dq_storage++;
1488         } /* End of Packet Rx loop */
1489
1490         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1491
1492         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1493         /*Return the total number of packets received to DPAA2 app*/
1494         return num_rx;
1495 }
1496
1497 /** Release queue pair */
1498 static int
1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1500 {
1501         struct dpaa2_sec_qp *qp =
1502                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1503
1504         PMD_INIT_FUNC_TRACE();
1505
1506         if (qp->rx_vq.q_storage) {
1507                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1508                 rte_free(qp->rx_vq.q_storage);
1509         }
1510         rte_free(qp);
1511
1512         dev->data->queue_pairs[queue_pair_id] = NULL;
1513
1514         return 0;
1515 }
1516
1517 /** Setup a queue pair */
1518 static int
1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1520                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1521                 __rte_unused int socket_id)
1522 {
1523         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1524         struct dpaa2_sec_qp *qp;
1525         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1526         struct dpseci_rx_queue_cfg cfg;
1527         int32_t retcode;
1528
1529         PMD_INIT_FUNC_TRACE();
1530
1531         /* If qp is already in use free ring memory and qp metadata. */
1532         if (dev->data->queue_pairs[qp_id] != NULL) {
1533                 DPAA2_SEC_INFO("QP already setup");
1534                 return 0;
1535         }
1536
1537         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1538                     dev, qp_id, qp_conf);
1539
1540         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1541
1542         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1543                         RTE_CACHE_LINE_SIZE);
1544         if (!qp) {
1545                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1546                 return -1;
1547         }
1548
1549         qp->rx_vq.dev = dev;
1550         qp->tx_vq.dev = dev;
1551         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1552                 sizeof(struct queue_storage_info_t),
1553                 RTE_CACHE_LINE_SIZE);
1554         if (!qp->rx_vq.q_storage) {
1555                 DPAA2_SEC_ERR("malloc failed for q_storage");
1556                 return -1;
1557         }
1558         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1559
1560         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1561                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1562                 return -1;
1563         }
1564
1565         dev->data->queue_pairs[qp_id] = qp;
1566
1567         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1568         cfg.user_ctx = (size_t)(&qp->rx_vq);
1569         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1570                                       qp_id, &cfg);
1571         return retcode;
1572 }
1573
1574 /** Return the number of allocated queue pairs */
1575 static uint32_t
1576 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1577 {
1578         PMD_INIT_FUNC_TRACE();
1579
1580         return dev->data->nb_queue_pairs;
1581 }
1582
1583 /** Returns the size of the aesni gcm session structure */
1584 static unsigned int
1585 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1586 {
1587         PMD_INIT_FUNC_TRACE();
1588
1589         return sizeof(dpaa2_sec_session);
1590 }
1591
1592 static int
1593 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1594                       struct rte_crypto_sym_xform *xform,
1595                       dpaa2_sec_session *session)
1596 {
1597         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1598         struct alginfo cipherdata;
1599         int bufsize, i;
1600         struct ctxt_priv *priv;
1601         struct sec_flow_context *flc;
1602
1603         PMD_INIT_FUNC_TRACE();
1604
1605         /* For SEC CIPHER only one descriptor is required. */
1606         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1607                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1608                         RTE_CACHE_LINE_SIZE);
1609         if (priv == NULL) {
1610                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1611                 return -1;
1612         }
1613
1614         priv->fle_pool = dev_priv->fle_pool;
1615
1616         flc = &priv->flc_desc[0].flc;
1617
1618         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1619                         RTE_CACHE_LINE_SIZE);
1620         if (session->cipher_key.data == NULL) {
1621                 DPAA2_SEC_ERR("No Memory for cipher key");
1622                 rte_free(priv);
1623                 return -1;
1624         }
1625         session->cipher_key.length = xform->cipher.key.length;
1626
1627         memcpy(session->cipher_key.data, xform->cipher.key.data,
1628                xform->cipher.key.length);
1629         cipherdata.key = (size_t)session->cipher_key.data;
1630         cipherdata.keylen = session->cipher_key.length;
1631         cipherdata.key_enc_flags = 0;
1632         cipherdata.key_type = RTA_DATA_IMM;
1633
1634         /* Set IV parameters */
1635         session->iv.offset = xform->cipher.iv.offset;
1636         session->iv.length = xform->cipher.iv.length;
1637
1638         switch (xform->cipher.algo) {
1639         case RTE_CRYPTO_CIPHER_AES_CBC:
1640                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1641                 cipherdata.algmode = OP_ALG_AAI_CBC;
1642                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1643                 break;
1644         case RTE_CRYPTO_CIPHER_3DES_CBC:
1645                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1646                 cipherdata.algmode = OP_ALG_AAI_CBC;
1647                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1648                 break;
1649         case RTE_CRYPTO_CIPHER_AES_CTR:
1650                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1651                 cipherdata.algmode = OP_ALG_AAI_CTR;
1652                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1653                 break;
1654         case RTE_CRYPTO_CIPHER_3DES_CTR:
1655         case RTE_CRYPTO_CIPHER_AES_ECB:
1656         case RTE_CRYPTO_CIPHER_3DES_ECB:
1657         case RTE_CRYPTO_CIPHER_AES_XTS:
1658         case RTE_CRYPTO_CIPHER_AES_F8:
1659         case RTE_CRYPTO_CIPHER_ARC4:
1660         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1661         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1662         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1663         case RTE_CRYPTO_CIPHER_NULL:
1664                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1665                         xform->cipher.algo);
1666                 goto error_out;
1667         default:
1668                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1669                         xform->cipher.algo);
1670                 goto error_out;
1671         }
1672         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1673                                 DIR_ENC : DIR_DEC;
1674
1675         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1676                                         &cipherdata, NULL, session->iv.length,
1677                                         session->dir);
1678         if (bufsize < 0) {
1679                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1680                 goto error_out;
1681         }
1682
1683         flc->word1_sdl = (uint8_t)bufsize;
1684         session->ctxt = priv;
1685
1686         for (i = 0; i < bufsize; i++)
1687                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1688
1689         return 0;
1690
1691 error_out:
1692         rte_free(session->cipher_key.data);
1693         rte_free(priv);
1694         return -1;
1695 }
1696
1697 static int
1698 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1699                     struct rte_crypto_sym_xform *xform,
1700                     dpaa2_sec_session *session)
1701 {
1702         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1703         struct alginfo authdata;
1704         int bufsize, i;
1705         struct ctxt_priv *priv;
1706         struct sec_flow_context *flc;
1707
1708         PMD_INIT_FUNC_TRACE();
1709
1710         /* For SEC AUTH three descriptors are required for various stages */
1711         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1712                         sizeof(struct ctxt_priv) + 3 *
1713                         sizeof(struct sec_flc_desc),
1714                         RTE_CACHE_LINE_SIZE);
1715         if (priv == NULL) {
1716                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1717                 return -1;
1718         }
1719
1720         priv->fle_pool = dev_priv->fle_pool;
1721         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1722
1723         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1724                         RTE_CACHE_LINE_SIZE);
1725         if (session->auth_key.data == NULL) {
1726                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1727                 rte_free(priv);
1728                 return -1;
1729         }
1730         session->auth_key.length = xform->auth.key.length;
1731
1732         memcpy(session->auth_key.data, xform->auth.key.data,
1733                xform->auth.key.length);
1734         authdata.key = (size_t)session->auth_key.data;
1735         authdata.keylen = session->auth_key.length;
1736         authdata.key_enc_flags = 0;
1737         authdata.key_type = RTA_DATA_IMM;
1738
1739         session->digest_length = xform->auth.digest_length;
1740
1741         switch (xform->auth.algo) {
1742         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1743                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1744                 authdata.algmode = OP_ALG_AAI_HMAC;
1745                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1746                 break;
1747         case RTE_CRYPTO_AUTH_MD5_HMAC:
1748                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1749                 authdata.algmode = OP_ALG_AAI_HMAC;
1750                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1751                 break;
1752         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1753                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1754                 authdata.algmode = OP_ALG_AAI_HMAC;
1755                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1756                 break;
1757         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1758                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1759                 authdata.algmode = OP_ALG_AAI_HMAC;
1760                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1761                 break;
1762         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1763                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1764                 authdata.algmode = OP_ALG_AAI_HMAC;
1765                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1766                 break;
1767         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1768                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1769                 authdata.algmode = OP_ALG_AAI_HMAC;
1770                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1771                 break;
1772         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1773         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1774         case RTE_CRYPTO_AUTH_NULL:
1775         case RTE_CRYPTO_AUTH_SHA1:
1776         case RTE_CRYPTO_AUTH_SHA256:
1777         case RTE_CRYPTO_AUTH_SHA512:
1778         case RTE_CRYPTO_AUTH_SHA224:
1779         case RTE_CRYPTO_AUTH_SHA384:
1780         case RTE_CRYPTO_AUTH_MD5:
1781         case RTE_CRYPTO_AUTH_AES_GMAC:
1782         case RTE_CRYPTO_AUTH_KASUMI_F9:
1783         case RTE_CRYPTO_AUTH_AES_CMAC:
1784         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1785         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1786                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1787                               xform->auth.algo);
1788                 goto error_out;
1789         default:
1790                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1791                               xform->auth.algo);
1792                 goto error_out;
1793         }
1794         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1795                                 DIR_ENC : DIR_DEC;
1796
1797         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1798                                    1, 0, SHR_NEVER, &authdata, !session->dir,
1799                                    session->digest_length);
1800         if (bufsize < 0) {
1801                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1802                 goto error_out;
1803         }
1804
1805         flc->word1_sdl = (uint8_t)bufsize;
1806         session->ctxt = priv;
1807         for (i = 0; i < bufsize; i++)
1808                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1809                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1810
1811
1812         return 0;
1813
1814 error_out:
1815         rte_free(session->auth_key.data);
1816         rte_free(priv);
1817         return -1;
1818 }
1819
1820 static int
1821 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1822                     struct rte_crypto_sym_xform *xform,
1823                     dpaa2_sec_session *session)
1824 {
1825         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1826         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1827         struct alginfo aeaddata;
1828         int bufsize, i;
1829         struct ctxt_priv *priv;
1830         struct sec_flow_context *flc;
1831         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1832         int err;
1833
1834         PMD_INIT_FUNC_TRACE();
1835
1836         /* Set IV parameters */
1837         session->iv.offset = aead_xform->iv.offset;
1838         session->iv.length = aead_xform->iv.length;
1839         session->ctxt_type = DPAA2_SEC_AEAD;
1840
1841         /* For SEC AEAD only one descriptor is required */
1842         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1843                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1844                         RTE_CACHE_LINE_SIZE);
1845         if (priv == NULL) {
1846                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1847                 return -1;
1848         }
1849
1850         priv->fle_pool = dev_priv->fle_pool;
1851         flc = &priv->flc_desc[0].flc;
1852
1853         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1854                                                RTE_CACHE_LINE_SIZE);
1855         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1856                 DPAA2_SEC_ERR("No Memory for aead key");
1857                 rte_free(priv);
1858                 return -1;
1859         }
1860         memcpy(session->aead_key.data, aead_xform->key.data,
1861                aead_xform->key.length);
1862
1863         session->digest_length = aead_xform->digest_length;
1864         session->aead_key.length = aead_xform->key.length;
1865         ctxt->auth_only_len = aead_xform->aad_length;
1866
1867         aeaddata.key = (size_t)session->aead_key.data;
1868         aeaddata.keylen = session->aead_key.length;
1869         aeaddata.key_enc_flags = 0;
1870         aeaddata.key_type = RTA_DATA_IMM;
1871
1872         switch (aead_xform->algo) {
1873         case RTE_CRYPTO_AEAD_AES_GCM:
1874                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1875                 aeaddata.algmode = OP_ALG_AAI_GCM;
1876                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1877                 break;
1878         case RTE_CRYPTO_AEAD_AES_CCM:
1879                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1880                               aead_xform->algo);
1881                 goto error_out;
1882         default:
1883                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1884                               aead_xform->algo);
1885                 goto error_out;
1886         }
1887         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1888                                 DIR_ENC : DIR_DEC;
1889
1890         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1891         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1892                                MIN_JOB_DESC_SIZE,
1893                                (unsigned int *)priv->flc_desc[0].desc,
1894                                &priv->flc_desc[0].desc[1], 1);
1895
1896         if (err < 0) {
1897                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1898                 goto error_out;
1899         }
1900         if (priv->flc_desc[0].desc[1] & 1) {
1901                 aeaddata.key_type = RTA_DATA_IMM;
1902         } else {
1903                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1904                 aeaddata.key_type = RTA_DATA_PTR;
1905         }
1906         priv->flc_desc[0].desc[0] = 0;
1907         priv->flc_desc[0].desc[1] = 0;
1908
1909         if (session->dir == DIR_ENC)
1910                 bufsize = cnstr_shdsc_gcm_encap(
1911                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1912                                 &aeaddata, session->iv.length,
1913                                 session->digest_length);
1914         else
1915                 bufsize = cnstr_shdsc_gcm_decap(
1916                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1917                                 &aeaddata, session->iv.length,
1918                                 session->digest_length);
1919         if (bufsize < 0) {
1920                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1921                 goto error_out;
1922         }
1923
1924         flc->word1_sdl = (uint8_t)bufsize;
1925         session->ctxt = priv;
1926         for (i = 0; i < bufsize; i++)
1927                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1928                             i, priv->flc_desc[0].desc[i]);
1929
1930         return 0;
1931
1932 error_out:
1933         rte_free(session->aead_key.data);
1934         rte_free(priv);
1935         return -1;
1936 }
1937
1938
1939 static int
1940 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1941                     struct rte_crypto_sym_xform *xform,
1942                     dpaa2_sec_session *session)
1943 {
1944         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1945         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1946         struct alginfo authdata, cipherdata;
1947         int bufsize, i;
1948         struct ctxt_priv *priv;
1949         struct sec_flow_context *flc;
1950         struct rte_crypto_cipher_xform *cipher_xform;
1951         struct rte_crypto_auth_xform *auth_xform;
1952         int err;
1953
1954         PMD_INIT_FUNC_TRACE();
1955
1956         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1957                 cipher_xform = &xform->cipher;
1958                 auth_xform = &xform->next->auth;
1959                 session->ctxt_type =
1960                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1961                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1962         } else {
1963                 cipher_xform = &xform->next->cipher;
1964                 auth_xform = &xform->auth;
1965                 session->ctxt_type =
1966                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1967                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1968         }
1969
1970         /* Set IV parameters */
1971         session->iv.offset = cipher_xform->iv.offset;
1972         session->iv.length = cipher_xform->iv.length;
1973
1974         /* For SEC AEAD only one descriptor is required */
1975         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1976                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1977                         RTE_CACHE_LINE_SIZE);
1978         if (priv == NULL) {
1979                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1980                 return -1;
1981         }
1982
1983         priv->fle_pool = dev_priv->fle_pool;
1984         flc = &priv->flc_desc[0].flc;
1985
1986         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1987                                                RTE_CACHE_LINE_SIZE);
1988         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1989                 DPAA2_SEC_ERR("No Memory for cipher key");
1990                 rte_free(priv);
1991                 return -1;
1992         }
1993         session->cipher_key.length = cipher_xform->key.length;
1994         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1995                                              RTE_CACHE_LINE_SIZE);
1996         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1997                 DPAA2_SEC_ERR("No Memory for auth key");
1998                 rte_free(session->cipher_key.data);
1999                 rte_free(priv);
2000                 return -1;
2001         }
2002         session->auth_key.length = auth_xform->key.length;
2003         memcpy(session->cipher_key.data, cipher_xform->key.data,
2004                cipher_xform->key.length);
2005         memcpy(session->auth_key.data, auth_xform->key.data,
2006                auth_xform->key.length);
2007
2008         authdata.key = (size_t)session->auth_key.data;
2009         authdata.keylen = session->auth_key.length;
2010         authdata.key_enc_flags = 0;
2011         authdata.key_type = RTA_DATA_IMM;
2012
2013         session->digest_length = auth_xform->digest_length;
2014
2015         switch (auth_xform->algo) {
2016         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2017                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2018                 authdata.algmode = OP_ALG_AAI_HMAC;
2019                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2020                 break;
2021         case RTE_CRYPTO_AUTH_MD5_HMAC:
2022                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2023                 authdata.algmode = OP_ALG_AAI_HMAC;
2024                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2025                 break;
2026         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2027                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2028                 authdata.algmode = OP_ALG_AAI_HMAC;
2029                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2030                 break;
2031         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2032                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2033                 authdata.algmode = OP_ALG_AAI_HMAC;
2034                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2035                 break;
2036         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2037                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2038                 authdata.algmode = OP_ALG_AAI_HMAC;
2039                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2040                 break;
2041         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2042                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2043                 authdata.algmode = OP_ALG_AAI_HMAC;
2044                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2045                 break;
2046         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2047         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2048         case RTE_CRYPTO_AUTH_NULL:
2049         case RTE_CRYPTO_AUTH_SHA1:
2050         case RTE_CRYPTO_AUTH_SHA256:
2051         case RTE_CRYPTO_AUTH_SHA512:
2052         case RTE_CRYPTO_AUTH_SHA224:
2053         case RTE_CRYPTO_AUTH_SHA384:
2054         case RTE_CRYPTO_AUTH_MD5:
2055         case RTE_CRYPTO_AUTH_AES_GMAC:
2056         case RTE_CRYPTO_AUTH_KASUMI_F9:
2057         case RTE_CRYPTO_AUTH_AES_CMAC:
2058         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2059         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2060                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2061                               auth_xform->algo);
2062                 goto error_out;
2063         default:
2064                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2065                               auth_xform->algo);
2066                 goto error_out;
2067         }
2068         cipherdata.key = (size_t)session->cipher_key.data;
2069         cipherdata.keylen = session->cipher_key.length;
2070         cipherdata.key_enc_flags = 0;
2071         cipherdata.key_type = RTA_DATA_IMM;
2072
2073         switch (cipher_xform->algo) {
2074         case RTE_CRYPTO_CIPHER_AES_CBC:
2075                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2076                 cipherdata.algmode = OP_ALG_AAI_CBC;
2077                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2078                 break;
2079         case RTE_CRYPTO_CIPHER_3DES_CBC:
2080                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2081                 cipherdata.algmode = OP_ALG_AAI_CBC;
2082                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2083                 break;
2084         case RTE_CRYPTO_CIPHER_AES_CTR:
2085                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2086                 cipherdata.algmode = OP_ALG_AAI_CTR;
2087                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2088                 break;
2089         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2090         case RTE_CRYPTO_CIPHER_NULL:
2091         case RTE_CRYPTO_CIPHER_3DES_ECB:
2092         case RTE_CRYPTO_CIPHER_AES_ECB:
2093         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2094                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2095                               cipher_xform->algo);
2096                 goto error_out;
2097         default:
2098                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2099                               cipher_xform->algo);
2100                 goto error_out;
2101         }
2102         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2103                                 DIR_ENC : DIR_DEC;
2104
2105         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2106         priv->flc_desc[0].desc[1] = authdata.keylen;
2107         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2108                                MIN_JOB_DESC_SIZE,
2109                                (unsigned int *)priv->flc_desc[0].desc,
2110                                &priv->flc_desc[0].desc[2], 2);
2111
2112         if (err < 0) {
2113                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2114                 goto error_out;
2115         }
2116         if (priv->flc_desc[0].desc[2] & 1) {
2117                 cipherdata.key_type = RTA_DATA_IMM;
2118         } else {
2119                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2120                 cipherdata.key_type = RTA_DATA_PTR;
2121         }
2122         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2123                 authdata.key_type = RTA_DATA_IMM;
2124         } else {
2125                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2126                 authdata.key_type = RTA_DATA_PTR;
2127         }
2128         priv->flc_desc[0].desc[0] = 0;
2129         priv->flc_desc[0].desc[1] = 0;
2130         priv->flc_desc[0].desc[2] = 0;
2131
2132         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2133                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2134                                               0, SHR_SERIAL,
2135                                               &cipherdata, &authdata,
2136                                               session->iv.length,
2137                                               ctxt->auth_only_len,
2138                                               session->digest_length,
2139                                               session->dir);
2140                 if (bufsize < 0) {
2141                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2142                         goto error_out;
2143                 }
2144         } else {
2145                 DPAA2_SEC_ERR("Hash before cipher not supported");
2146                 goto error_out;
2147         }
2148
2149         flc->word1_sdl = (uint8_t)bufsize;
2150         session->ctxt = priv;
2151         for (i = 0; i < bufsize; i++)
2152                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2153                             i, priv->flc_desc[0].desc[i]);
2154
2155         return 0;
2156
2157 error_out:
2158         rte_free(session->cipher_key.data);
2159         rte_free(session->auth_key.data);
2160         rte_free(priv);
2161         return -1;
2162 }
2163
2164 static int
2165 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2166                             struct rte_crypto_sym_xform *xform, void *sess)
2167 {
2168         dpaa2_sec_session *session = sess;
2169
2170         PMD_INIT_FUNC_TRACE();
2171
2172         if (unlikely(sess == NULL)) {
2173                 DPAA2_SEC_ERR("Invalid session struct");
2174                 return -1;
2175         }
2176
2177         memset(session, 0, sizeof(dpaa2_sec_session));
2178         /* Default IV length = 0 */
2179         session->iv.length = 0;
2180
2181         /* Cipher Only */
2182         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2183                 session->ctxt_type = DPAA2_SEC_CIPHER;
2184                 dpaa2_sec_cipher_init(dev, xform, session);
2185
2186         /* Authentication Only */
2187         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2188                    xform->next == NULL) {
2189                 session->ctxt_type = DPAA2_SEC_AUTH;
2190                 dpaa2_sec_auth_init(dev, xform, session);
2191
2192         /* Cipher then Authenticate */
2193         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2194                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2195                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2196                 dpaa2_sec_aead_chain_init(dev, xform, session);
2197
2198         /* Authenticate then Cipher */
2199         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2200                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2201                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2202                 dpaa2_sec_aead_chain_init(dev, xform, session);
2203
2204         /* AEAD operation for AES-GCM kind of Algorithms */
2205         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2206                    xform->next == NULL) {
2207                 dpaa2_sec_aead_init(dev, xform, session);
2208
2209         } else {
2210                 DPAA2_SEC_ERR("Invalid crypto type");
2211                 return -EINVAL;
2212         }
2213
2214         return 0;
2215 }
2216
2217 static int
2218 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2219                         dpaa2_sec_session *session,
2220                         struct alginfo *aeaddata)
2221 {
2222         PMD_INIT_FUNC_TRACE();
2223
2224         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2225                                                RTE_CACHE_LINE_SIZE);
2226         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2227                 DPAA2_SEC_ERR("No Memory for aead key");
2228                 return -1;
2229         }
2230         memcpy(session->aead_key.data, aead_xform->key.data,
2231                aead_xform->key.length);
2232
2233         session->digest_length = aead_xform->digest_length;
2234         session->aead_key.length = aead_xform->key.length;
2235
2236         aeaddata->key = (size_t)session->aead_key.data;
2237         aeaddata->keylen = session->aead_key.length;
2238         aeaddata->key_enc_flags = 0;
2239         aeaddata->key_type = RTA_DATA_IMM;
2240
2241         switch (aead_xform->algo) {
2242         case RTE_CRYPTO_AEAD_AES_GCM:
2243                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2244                 aeaddata->algmode = OP_ALG_AAI_GCM;
2245                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2246                 break;
2247         case RTE_CRYPTO_AEAD_AES_CCM:
2248                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2249                 aeaddata->algmode = OP_ALG_AAI_CCM;
2250                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2251                 break;
2252         default:
2253                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2254                               aead_xform->algo);
2255                 return -1;
2256         }
2257         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2258                                 DIR_ENC : DIR_DEC;
2259
2260         return 0;
2261 }
2262
2263 static int
2264 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2265         struct rte_crypto_auth_xform *auth_xform,
2266         dpaa2_sec_session *session,
2267         struct alginfo *cipherdata,
2268         struct alginfo *authdata)
2269 {
2270         if (cipher_xform) {
2271                 session->cipher_key.data = rte_zmalloc(NULL,
2272                                                        cipher_xform->key.length,
2273                                                        RTE_CACHE_LINE_SIZE);
2274                 if (session->cipher_key.data == NULL &&
2275                                 cipher_xform->key.length > 0) {
2276                         DPAA2_SEC_ERR("No Memory for cipher key");
2277                         return -ENOMEM;
2278                 }
2279
2280                 session->cipher_key.length = cipher_xform->key.length;
2281                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2282                                 cipher_xform->key.length);
2283                 session->cipher_alg = cipher_xform->algo;
2284         } else {
2285                 session->cipher_key.data = NULL;
2286                 session->cipher_key.length = 0;
2287                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2288         }
2289
2290         if (auth_xform) {
2291                 session->auth_key.data = rte_zmalloc(NULL,
2292                                                 auth_xform->key.length,
2293                                                 RTE_CACHE_LINE_SIZE);
2294                 if (session->auth_key.data == NULL &&
2295                                 auth_xform->key.length > 0) {
2296                         DPAA2_SEC_ERR("No Memory for auth key");
2297                         return -ENOMEM;
2298                 }
2299                 session->auth_key.length = auth_xform->key.length;
2300                 memcpy(session->auth_key.data, auth_xform->key.data,
2301                                 auth_xform->key.length);
2302                 session->auth_alg = auth_xform->algo;
2303         } else {
2304                 session->auth_key.data = NULL;
2305                 session->auth_key.length = 0;
2306                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2307         }
2308
2309         authdata->key = (size_t)session->auth_key.data;
2310         authdata->keylen = session->auth_key.length;
2311         authdata->key_enc_flags = 0;
2312         authdata->key_type = RTA_DATA_IMM;
2313         switch (session->auth_alg) {
2314         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2315                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2316                 authdata->algmode = OP_ALG_AAI_HMAC;
2317                 break;
2318         case RTE_CRYPTO_AUTH_MD5_HMAC:
2319                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2320                 authdata->algmode = OP_ALG_AAI_HMAC;
2321                 break;
2322         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2323                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2324                 authdata->algmode = OP_ALG_AAI_HMAC;
2325                 break;
2326         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2327                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2328                 authdata->algmode = OP_ALG_AAI_HMAC;
2329                 break;
2330         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2331                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2332                 authdata->algmode = OP_ALG_AAI_HMAC;
2333                 break;
2334         case RTE_CRYPTO_AUTH_AES_CMAC:
2335                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2336                 break;
2337         case RTE_CRYPTO_AUTH_NULL:
2338                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2339                 break;
2340         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2341         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2342         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2343         case RTE_CRYPTO_AUTH_SHA1:
2344         case RTE_CRYPTO_AUTH_SHA256:
2345         case RTE_CRYPTO_AUTH_SHA512:
2346         case RTE_CRYPTO_AUTH_SHA224:
2347         case RTE_CRYPTO_AUTH_SHA384:
2348         case RTE_CRYPTO_AUTH_MD5:
2349         case RTE_CRYPTO_AUTH_AES_GMAC:
2350         case RTE_CRYPTO_AUTH_KASUMI_F9:
2351         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2352         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2353                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2354                               session->auth_alg);
2355                 return -1;
2356         default:
2357                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2358                               session->auth_alg);
2359                 return -1;
2360         }
2361         cipherdata->key = (size_t)session->cipher_key.data;
2362         cipherdata->keylen = session->cipher_key.length;
2363         cipherdata->key_enc_flags = 0;
2364         cipherdata->key_type = RTA_DATA_IMM;
2365
2366         switch (session->cipher_alg) {
2367         case RTE_CRYPTO_CIPHER_AES_CBC:
2368                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2369                 cipherdata->algmode = OP_ALG_AAI_CBC;
2370                 break;
2371         case RTE_CRYPTO_CIPHER_3DES_CBC:
2372                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2373                 cipherdata->algmode = OP_ALG_AAI_CBC;
2374                 break;
2375         case RTE_CRYPTO_CIPHER_AES_CTR:
2376                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2377                 cipherdata->algmode = OP_ALG_AAI_CTR;
2378                 break;
2379         case RTE_CRYPTO_CIPHER_NULL:
2380                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2381                 break;
2382         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2383         case RTE_CRYPTO_CIPHER_3DES_ECB:
2384         case RTE_CRYPTO_CIPHER_AES_ECB:
2385         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2386                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2387                               session->cipher_alg);
2388                 return -1;
2389         default:
2390                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2391                               session->cipher_alg);
2392                 return -1;
2393         }
2394
2395         return 0;
2396 }
2397
2398 #ifdef RTE_LIBRTE_SECURITY_TEST
2399 static uint8_t aes_cbc_iv[] = {
2400         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2401         0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2402 #endif
2403
2404 static int
2405 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2406                             struct rte_security_session_conf *conf,
2407                             void *sess)
2408 {
2409         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2410         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2411         struct rte_crypto_auth_xform *auth_xform = NULL;
2412         struct rte_crypto_aead_xform *aead_xform = NULL;
2413         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2414         struct ctxt_priv *priv;
2415         struct ipsec_encap_pdb encap_pdb;
2416         struct ipsec_decap_pdb decap_pdb;
2417         struct alginfo authdata, cipherdata;
2418         int bufsize;
2419         struct sec_flow_context *flc;
2420         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2421         int ret = -1;
2422
2423         PMD_INIT_FUNC_TRACE();
2424
2425         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2426                                 sizeof(struct ctxt_priv) +
2427                                 sizeof(struct sec_flc_desc),
2428                                 RTE_CACHE_LINE_SIZE);
2429
2430         if (priv == NULL) {
2431                 DPAA2_SEC_ERR("No memory for priv CTXT");
2432                 return -ENOMEM;
2433         }
2434
2435         priv->fle_pool = dev_priv->fle_pool;
2436         flc = &priv->flc_desc[0].flc;
2437
2438         memset(session, 0, sizeof(dpaa2_sec_session));
2439
2440         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2441                 cipher_xform = &conf->crypto_xform->cipher;
2442                 if (conf->crypto_xform->next)
2443                         auth_xform = &conf->crypto_xform->next->auth;
2444                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2445                                         session, &cipherdata, &authdata);
2446         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2447                 auth_xform = &conf->crypto_xform->auth;
2448                 if (conf->crypto_xform->next)
2449                         cipher_xform = &conf->crypto_xform->next->cipher;
2450                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2451                                         session, &cipherdata, &authdata);
2452         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2453                 aead_xform = &conf->crypto_xform->aead;
2454                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2455                                         session, &cipherdata);
2456         } else {
2457                 DPAA2_SEC_ERR("XFORM not specified");
2458                 ret = -EINVAL;
2459                 goto out;
2460         }
2461         if (ret) {
2462                 DPAA2_SEC_ERR("Failed to process xform");
2463                 goto out;
2464         }
2465
2466         session->ctxt_type = DPAA2_SEC_IPSEC;
2467         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2468                 struct ip ip4_hdr;
2469
2470                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2471                 ip4_hdr.ip_v = IPVERSION;
2472                 ip4_hdr.ip_hl = 5;
2473                 ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2474                 ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2475                 ip4_hdr.ip_id = 0;
2476                 ip4_hdr.ip_off = 0;
2477                 ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2478                 ip4_hdr.ip_p = IPPROTO_ESP;
2479                 ip4_hdr.ip_sum = 0;
2480                 ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2481                 ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2482                 ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2483                         sizeof(struct ip));
2484
2485                 /* For Sec Proto only one descriptor is required. */
2486                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2487                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2488                         PDBOPTS_ESP_OIHI_PDB_INL |
2489                         PDBOPTS_ESP_IVSRC |
2490                         PDBHMO_ESP_ENCAP_DTTL |
2491                         PDBHMO_ESP_SNR;
2492                 encap_pdb.spi = ipsec_xform->spi;
2493                 encap_pdb.ip_hdr_len = sizeof(struct ip);
2494
2495                 session->dir = DIR_ENC;
2496                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2497                                 1, 0, SHR_SERIAL, &encap_pdb,
2498                                 (uint8_t *)&ip4_hdr,
2499                                 &cipherdata, &authdata);
2500         } else if (ipsec_xform->direction ==
2501                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2502                 flc->dhr = SEC_FLC_DHR_INBOUND;
2503                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2504                 decap_pdb.options = sizeof(struct ip) << 16;
2505                 session->dir = DIR_DEC;
2506                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2507                                 1, 0, SHR_SERIAL,
2508                                 &decap_pdb, &cipherdata, &authdata);
2509         } else
2510                 goto out;
2511
2512         if (bufsize < 0) {
2513                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2514                 goto out;
2515         }
2516
2517         flc->word1_sdl = (uint8_t)bufsize;
2518
2519         /* Enable the stashing control bit */
2520         DPAA2_SET_FLC_RSC(flc);
2521         flc->word2_rflc_31_0 = lower_32_bits(
2522                         (size_t)&(((struct dpaa2_sec_qp *)
2523                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2524         flc->word3_rflc_63_32 = upper_32_bits(
2525                         (size_t)&(((struct dpaa2_sec_qp *)
2526                         dev->data->queue_pairs[0])->rx_vq));
2527
2528         /* Set EWS bit i.e. enable write-safe */
2529         DPAA2_SET_FLC_EWS(flc);
2530         /* Set BS = 1 i.e reuse input buffers as output buffers */
2531         DPAA2_SET_FLC_REUSE_BS(flc);
2532         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2533         DPAA2_SET_FLC_REUSE_FF(flc);
2534
2535         session->ctxt = priv;
2536
2537         return 0;
2538 out:
2539         rte_free(session->auth_key.data);
2540         rte_free(session->cipher_key.data);
2541         rte_free(priv);
2542         return ret;
2543 }
2544
2545 static int
2546 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2547                            struct rte_security_session_conf *conf,
2548                            void *sess)
2549 {
2550         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2551         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2552         struct rte_crypto_auth_xform *auth_xform = NULL;
2553         struct rte_crypto_cipher_xform *cipher_xform;
2554         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2555         struct ctxt_priv *priv;
2556         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2557         struct alginfo authdata, cipherdata;
2558         int bufsize = -1;
2559         struct sec_flow_context *flc;
2560 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2561         int swap = true;
2562 #else
2563         int swap = false;
2564 #endif
2565
2566         PMD_INIT_FUNC_TRACE();
2567
2568         memset(session, 0, sizeof(dpaa2_sec_session));
2569
2570         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2571                                 sizeof(struct ctxt_priv) +
2572                                 sizeof(struct sec_flc_desc),
2573                                 RTE_CACHE_LINE_SIZE);
2574
2575         if (priv == NULL) {
2576                 DPAA2_SEC_ERR("No memory for priv CTXT");
2577                 return -ENOMEM;
2578         }
2579
2580         priv->fle_pool = dev_priv->fle_pool;
2581         flc = &priv->flc_desc[0].flc;
2582
2583         /* find xfrm types */
2584         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2585                 cipher_xform = &xform->cipher;
2586         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2587                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2588                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2589                 cipher_xform = &xform->cipher;
2590                 auth_xform = &xform->next->auth;
2591         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2592                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2593                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2594                 cipher_xform = &xform->next->cipher;
2595                 auth_xform = &xform->auth;
2596         } else {
2597                 DPAA2_SEC_ERR("Invalid crypto type");
2598                 return -EINVAL;
2599         }
2600
2601         session->ctxt_type = DPAA2_SEC_PDCP;
2602         if (cipher_xform) {
2603                 session->cipher_key.data = rte_zmalloc(NULL,
2604                                                cipher_xform->key.length,
2605                                                RTE_CACHE_LINE_SIZE);
2606                 if (session->cipher_key.data == NULL &&
2607                                 cipher_xform->key.length > 0) {
2608                         DPAA2_SEC_ERR("No Memory for cipher key");
2609                         rte_free(priv);
2610                         return -ENOMEM;
2611                 }
2612                 session->cipher_key.length = cipher_xform->key.length;
2613                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2614                         cipher_xform->key.length);
2615                 session->dir =
2616                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2617                                         DIR_ENC : DIR_DEC;
2618                 session->cipher_alg = cipher_xform->algo;
2619         } else {
2620                 session->cipher_key.data = NULL;
2621                 session->cipher_key.length = 0;
2622                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2623                 session->dir = DIR_ENC;
2624         }
2625
2626         session->pdcp.domain = pdcp_xform->domain;
2627         session->pdcp.bearer = pdcp_xform->bearer;
2628         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2629         session->pdcp.sn_size = pdcp_xform->sn_size;
2630 #ifdef ENABLE_HFN_OVERRIDE
2631         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2632 #endif
2633         session->pdcp.hfn = pdcp_xform->hfn;
2634         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2635
2636         cipherdata.key = (size_t)session->cipher_key.data;
2637         cipherdata.keylen = session->cipher_key.length;
2638         cipherdata.key_enc_flags = 0;
2639         cipherdata.key_type = RTA_DATA_IMM;
2640
2641         switch (session->cipher_alg) {
2642         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2643                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2644                 break;
2645         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2646                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2647                 break;
2648         case RTE_CRYPTO_CIPHER_AES_CTR:
2649                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2650                 break;
2651         case RTE_CRYPTO_CIPHER_NULL:
2652                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2653                 break;
2654         default:
2655                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2656                               session->cipher_alg);
2657                 goto out;
2658         }
2659
2660         /* Auth is only applicable for control mode operation. */
2661         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2662                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
2663                         DPAA2_SEC_ERR(
2664                                 "PDCP Seq Num size should be 5 bits for cmode");
2665                         goto out;
2666                 }
2667                 if (auth_xform) {
2668                         session->auth_key.data = rte_zmalloc(NULL,
2669                                                         auth_xform->key.length,
2670                                                         RTE_CACHE_LINE_SIZE);
2671                         if (session->auth_key.data == NULL &&
2672                                         auth_xform->key.length > 0) {
2673                                 DPAA2_SEC_ERR("No Memory for auth key");
2674                                 rte_free(session->cipher_key.data);
2675                                 rte_free(priv);
2676                                 return -ENOMEM;
2677                         }
2678                         session->auth_key.length = auth_xform->key.length;
2679                         memcpy(session->auth_key.data, auth_xform->key.data,
2680                                         auth_xform->key.length);
2681                         session->auth_alg = auth_xform->algo;
2682                 } else {
2683                         session->auth_key.data = NULL;
2684                         session->auth_key.length = 0;
2685                         session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2686                 }
2687                 authdata.key = (size_t)session->auth_key.data;
2688                 authdata.keylen = session->auth_key.length;
2689                 authdata.key_enc_flags = 0;
2690                 authdata.key_type = RTA_DATA_IMM;
2691
2692                 switch (session->auth_alg) {
2693                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2694                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2695                         break;
2696                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2697                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2698                         break;
2699                 case RTE_CRYPTO_AUTH_AES_CMAC:
2700                         authdata.algtype = PDCP_AUTH_TYPE_AES;
2701                         break;
2702                 case RTE_CRYPTO_AUTH_NULL:
2703                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
2704                         break;
2705                 default:
2706                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2707                                       session->auth_alg);
2708                         goto out;
2709                 }
2710
2711                 if (session->dir == DIR_ENC)
2712                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2713                                         priv->flc_desc[0].desc, 1, swap,
2714                                         pdcp_xform->hfn,
2715                                         pdcp_xform->bearer,
2716                                         pdcp_xform->pkt_dir,
2717                                         pdcp_xform->hfn_threshold,
2718                                         &cipherdata, &authdata,
2719                                         0);
2720                 else if (session->dir == DIR_DEC)
2721                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2722                                         priv->flc_desc[0].desc, 1, swap,
2723                                         pdcp_xform->hfn,
2724                                         pdcp_xform->bearer,
2725                                         pdcp_xform->pkt_dir,
2726                                         pdcp_xform->hfn_threshold,
2727                                         &cipherdata, &authdata,
2728                                         0);
2729         } else {
2730                 if (session->dir == DIR_ENC)
2731                         bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2732                                         priv->flc_desc[0].desc, 1, swap,
2733                                         (enum pdcp_sn_size)pdcp_xform->sn_size,
2734                                         pdcp_xform->hfn,
2735                                         pdcp_xform->bearer,
2736                                         pdcp_xform->pkt_dir,
2737                                         pdcp_xform->hfn_threshold,
2738                                         &cipherdata, 0);
2739                 else if (session->dir == DIR_DEC)
2740                         bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2741                                         priv->flc_desc[0].desc, 1, swap,
2742                                         (enum pdcp_sn_size)pdcp_xform->sn_size,
2743                                         pdcp_xform->hfn,
2744                                         pdcp_xform->bearer,
2745                                         pdcp_xform->pkt_dir,
2746                                         pdcp_xform->hfn_threshold,
2747                                         &cipherdata, 0);
2748         }
2749
2750         if (bufsize < 0) {
2751                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2752                 goto out;
2753         }
2754
2755         /* Enable the stashing control bit */
2756         DPAA2_SET_FLC_RSC(flc);
2757         flc->word2_rflc_31_0 = lower_32_bits(
2758                         (size_t)&(((struct dpaa2_sec_qp *)
2759                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2760         flc->word3_rflc_63_32 = upper_32_bits(
2761                         (size_t)&(((struct dpaa2_sec_qp *)
2762                         dev->data->queue_pairs[0])->rx_vq));
2763
2764         flc->word1_sdl = (uint8_t)bufsize;
2765
2766         /* Set EWS bit i.e. enable write-safe */
2767         DPAA2_SET_FLC_EWS(flc);
2768         /* Set BS = 1 i.e reuse input buffers as output buffers */
2769         DPAA2_SET_FLC_REUSE_BS(flc);
2770         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2771         DPAA2_SET_FLC_REUSE_FF(flc);
2772
2773         session->ctxt = priv;
2774
2775         return 0;
2776 out:
2777         rte_free(session->auth_key.data);
2778         rte_free(session->cipher_key.data);
2779         rte_free(priv);
2780         return -1;
2781 }
2782
2783 static int
2784 dpaa2_sec_security_session_create(void *dev,
2785                                   struct rte_security_session_conf *conf,
2786                                   struct rte_security_session *sess,
2787                                   struct rte_mempool *mempool)
2788 {
2789         void *sess_private_data;
2790         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2791         int ret;
2792
2793         if (rte_mempool_get(mempool, &sess_private_data)) {
2794                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2795                 return -ENOMEM;
2796         }
2797
2798         switch (conf->protocol) {
2799         case RTE_SECURITY_PROTOCOL_IPSEC:
2800                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2801                                 sess_private_data);
2802                 break;
2803         case RTE_SECURITY_PROTOCOL_MACSEC:
2804                 return -ENOTSUP;
2805         case RTE_SECURITY_PROTOCOL_PDCP:
2806                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2807                                 sess_private_data);
2808                 break;
2809         default:
2810                 return -EINVAL;
2811         }
2812         if (ret != 0) {
2813                 DPAA2_SEC_ERR("Failed to configure session parameters");
2814                 /* Return session to mempool */
2815                 rte_mempool_put(mempool, sess_private_data);
2816                 return ret;
2817         }
2818
2819         set_sec_session_private_data(sess, sess_private_data);
2820
2821         return ret;
2822 }
2823
2824 /** Clear the memory of session so it doesn't leave key material behind */
2825 static int
2826 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2827                 struct rte_security_session *sess)
2828 {
2829         PMD_INIT_FUNC_TRACE();
2830         void *sess_priv = get_sec_session_private_data(sess);
2831
2832         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2833
2834         if (sess_priv) {
2835                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2836
2837                 rte_free(s->ctxt);
2838                 rte_free(s->cipher_key.data);
2839                 rte_free(s->auth_key.data);
2840                 memset(sess, 0, sizeof(dpaa2_sec_session));
2841                 set_sec_session_private_data(sess, NULL);
2842                 rte_mempool_put(sess_mp, sess_priv);
2843         }
2844         return 0;
2845 }
2846
2847 static int
2848 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2849                 struct rte_crypto_sym_xform *xform,
2850                 struct rte_cryptodev_sym_session *sess,
2851                 struct rte_mempool *mempool)
2852 {
2853         void *sess_private_data;
2854         int ret;
2855
2856         if (rte_mempool_get(mempool, &sess_private_data)) {
2857                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2858                 return -ENOMEM;
2859         }
2860
2861         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2862         if (ret != 0) {
2863                 DPAA2_SEC_ERR("Failed to configure session parameters");
2864                 /* Return session to mempool */
2865                 rte_mempool_put(mempool, sess_private_data);
2866                 return ret;
2867         }
2868
2869         set_sym_session_private_data(sess, dev->driver_id,
2870                 sess_private_data);
2871
2872         return 0;
2873 }
2874
2875 /** Clear the memory of session so it doesn't leave key material behind */
2876 static void
2877 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2878                 struct rte_cryptodev_sym_session *sess)
2879 {
2880         PMD_INIT_FUNC_TRACE();
2881         uint8_t index = dev->driver_id;
2882         void *sess_priv = get_sym_session_private_data(sess, index);
2883         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2884
2885         if (sess_priv) {
2886                 rte_free(s->ctxt);
2887                 rte_free(s->cipher_key.data);
2888                 rte_free(s->auth_key.data);
2889                 memset(s, 0, sizeof(dpaa2_sec_session));
2890                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2891                 set_sym_session_private_data(sess, index, NULL);
2892                 rte_mempool_put(sess_mp, sess_priv);
2893         }
2894 }
2895
2896 static int
2897 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2898                         struct rte_cryptodev_config *config __rte_unused)
2899 {
2900         PMD_INIT_FUNC_TRACE();
2901
2902         return 0;
2903 }
2904
2905 static int
2906 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2907 {
2908         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2909         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2910         struct dpseci_attr attr;
2911         struct dpaa2_queue *dpaa2_q;
2912         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2913                                         dev->data->queue_pairs;
2914         struct dpseci_rx_queue_attr rx_attr;
2915         struct dpseci_tx_queue_attr tx_attr;
2916         int ret, i;
2917
2918         PMD_INIT_FUNC_TRACE();
2919
2920         memset(&attr, 0, sizeof(struct dpseci_attr));
2921
2922         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2923         if (ret) {
2924                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2925                               priv->hw_id);
2926                 goto get_attr_failure;
2927         }
2928         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2929         if (ret) {
2930                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2931                 goto get_attr_failure;
2932         }
2933         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2934                 dpaa2_q = &qp[i]->rx_vq;
2935                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2936                                     &rx_attr);
2937                 dpaa2_q->fqid = rx_attr.fqid;
2938                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2939         }
2940         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2941                 dpaa2_q = &qp[i]->tx_vq;
2942                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2943                                     &tx_attr);
2944                 dpaa2_q->fqid = tx_attr.fqid;
2945                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2946         }
2947
2948         return 0;
2949 get_attr_failure:
2950         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2951         return -1;
2952 }
2953
2954 static void
2955 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2956 {
2957         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2958         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2959         int ret;
2960
2961         PMD_INIT_FUNC_TRACE();
2962
2963         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2964         if (ret) {
2965                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2966                              priv->hw_id);
2967                 return;
2968         }
2969
2970         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2971         if (ret < 0) {
2972                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2973                 return;
2974         }
2975 }
2976
2977 static int
2978 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2979 {
2980         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2981         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2982         int ret;
2983
2984         PMD_INIT_FUNC_TRACE();
2985
2986         /* Function is reverse of dpaa2_sec_dev_init.
2987          * It does the following:
2988          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2989          * 2. Close the DPSECI device
2990          * 3. Free the allocated resources.
2991          */
2992
2993         /*Close the device at underlying layer*/
2994         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2995         if (ret) {
2996                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2997                 return -1;
2998         }
2999
3000         /*Free the allocated memory for ethernet private data and dpseci*/
3001         priv->hw = NULL;
3002         rte_free(dpseci);
3003
3004         return 0;
3005 }
3006
3007 static void
3008 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3009                         struct rte_cryptodev_info *info)
3010 {
3011         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3012
3013         PMD_INIT_FUNC_TRACE();
3014         if (info != NULL) {
3015                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3016                 info->feature_flags = dev->feature_flags;
3017                 info->capabilities = dpaa2_sec_capabilities;
3018                 /* No limit of number of sessions */
3019                 info->sym.max_nb_sessions = 0;
3020                 info->driver_id = cryptodev_driver_id;
3021         }
3022 }
3023
3024 static
3025 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3026                          struct rte_cryptodev_stats *stats)
3027 {
3028         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3029         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3030         struct dpseci_sec_counters counters = {0};
3031         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3032                                         dev->data->queue_pairs;
3033         int ret, i;
3034
3035         PMD_INIT_FUNC_TRACE();
3036         if (stats == NULL) {
3037                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3038                 return;
3039         }
3040         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3041                 if (qp[i] == NULL) {
3042                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3043                         continue;
3044                 }
3045
3046                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3047                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3048                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3049                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3050         }
3051
3052         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3053                                       &counters);
3054         if (ret) {
3055                 DPAA2_SEC_ERR("SEC counters failed");
3056         } else {
3057                 DPAA2_SEC_INFO("dpseci hardware stats:"
3058                             "\n\tNum of Requests Dequeued = %" PRIu64
3059                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3060                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3061                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3062                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3063                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3064                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3065                             counters.dequeued_requests,
3066                             counters.ob_enc_requests,
3067                             counters.ib_dec_requests,
3068                             counters.ob_enc_bytes,
3069                             counters.ob_prot_bytes,
3070                             counters.ib_dec_bytes,
3071                             counters.ib_valid_bytes);
3072         }
3073 }
3074
3075 static
3076 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3077 {
3078         int i;
3079         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3080                                    (dev->data->queue_pairs);
3081
3082         PMD_INIT_FUNC_TRACE();
3083
3084         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3085                 if (qp[i] == NULL) {
3086                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3087                         continue;
3088                 }
3089                 qp[i]->tx_vq.rx_pkts = 0;
3090                 qp[i]->tx_vq.tx_pkts = 0;
3091                 qp[i]->tx_vq.err_pkts = 0;
3092                 qp[i]->rx_vq.rx_pkts = 0;
3093                 qp[i]->rx_vq.tx_pkts = 0;
3094                 qp[i]->rx_vq.err_pkts = 0;
3095         }
3096 }
3097
3098 static void __attribute__((hot))
3099 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3100                                  const struct qbman_fd *fd,
3101                                  const struct qbman_result *dq,
3102                                  struct dpaa2_queue *rxq,
3103                                  struct rte_event *ev)
3104 {
3105         /* Prefetching mbuf */
3106         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3107                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3108
3109         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3110         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3111
3112         ev->flow_id = rxq->ev.flow_id;
3113         ev->sub_event_type = rxq->ev.sub_event_type;
3114         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3115         ev->op = RTE_EVENT_OP_NEW;
3116         ev->sched_type = rxq->ev.sched_type;
3117         ev->queue_id = rxq->ev.queue_id;
3118         ev->priority = rxq->ev.priority;
3119         ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
3120                                 (rxq->dev))->driver_id);
3121
3122         qbman_swp_dqrr_consume(swp, dq);
3123 }
3124 static void
3125 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3126                                  const struct qbman_fd *fd,
3127                                  const struct qbman_result *dq,
3128                                  struct dpaa2_queue *rxq,
3129                                  struct rte_event *ev)
3130 {
3131         uint8_t dqrr_index;
3132         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3133         /* Prefetching mbuf */
3134         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3135                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3136
3137         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3138         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3139
3140         ev->flow_id = rxq->ev.flow_id;
3141         ev->sub_event_type = rxq->ev.sub_event_type;
3142         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3143         ev->op = RTE_EVENT_OP_NEW;
3144         ev->sched_type = rxq->ev.sched_type;
3145         ev->queue_id = rxq->ev.queue_id;
3146         ev->priority = rxq->ev.priority;
3147
3148         ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
3149                                 (rxq->dev))->driver_id);
3150         dqrr_index = qbman_get_dqrr_idx(dq);
3151         crypto_op->sym->m_src->seqn = dqrr_index + 1;
3152         DPAA2_PER_LCORE_DQRR_SIZE++;
3153         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3154         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3155 }
3156
3157 int
3158 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3159                 int qp_id,
3160                 uint16_t dpcon_id,
3161                 const struct rte_event *event)
3162 {
3163         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3164         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3165         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3166         struct dpseci_rx_queue_cfg cfg;
3167         int ret;
3168
3169         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3170                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3171         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3172                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3173         else
3174                 return -EINVAL;
3175
3176         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3177         cfg.options = DPSECI_QUEUE_OPT_DEST;
3178         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3179         cfg.dest_cfg.dest_id = dpcon_id;
3180         cfg.dest_cfg.priority = event->priority;
3181
3182         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3183         cfg.user_ctx = (size_t)(qp);
3184         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3185                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3186                 cfg.order_preservation_en = 1;
3187         }
3188         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3189                                   qp_id, &cfg);
3190         if (ret) {
3191                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3192                 return ret;
3193         }
3194
3195         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3196
3197         return 0;
3198 }
3199
3200 int
3201 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3202                         int qp_id)
3203 {
3204         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3205         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3206         struct dpseci_rx_queue_cfg cfg;
3207         int ret;
3208
3209         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3210         cfg.options = DPSECI_QUEUE_OPT_DEST;
3211         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3212
3213         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3214                                   qp_id, &cfg);
3215         if (ret)
3216                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3217
3218         return ret;
3219 }
3220
3221 static struct rte_cryptodev_ops crypto_ops = {
3222         .dev_configure        = dpaa2_sec_dev_configure,
3223         .dev_start            = dpaa2_sec_dev_start,
3224         .dev_stop             = dpaa2_sec_dev_stop,
3225         .dev_close            = dpaa2_sec_dev_close,
3226         .dev_infos_get        = dpaa2_sec_dev_infos_get,
3227         .stats_get            = dpaa2_sec_stats_get,
3228         .stats_reset          = dpaa2_sec_stats_reset,
3229         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3230         .queue_pair_release   = dpaa2_sec_queue_pair_release,
3231         .queue_pair_count     = dpaa2_sec_queue_pair_count,
3232         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3233         .sym_session_configure    = dpaa2_sec_sym_session_configure,
3234         .sym_session_clear        = dpaa2_sec_sym_session_clear,
3235 };
3236
3237 static const struct rte_security_capability *
3238 dpaa2_sec_capabilities_get(void *device __rte_unused)
3239 {
3240         return dpaa2_sec_security_cap;
3241 }
3242
3243 static const struct rte_security_ops dpaa2_sec_security_ops = {
3244         .session_create = dpaa2_sec_security_session_create,
3245         .session_update = NULL,
3246         .session_stats_get = NULL,
3247         .session_destroy = dpaa2_sec_security_session_destroy,
3248         .set_pkt_metadata = NULL,
3249         .capabilities_get = dpaa2_sec_capabilities_get
3250 };
3251
3252 static int
3253 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3254 {
3255         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3256
3257         rte_free(dev->security_ctx);
3258
3259         rte_mempool_free(internals->fle_pool);
3260
3261         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3262                        dev->data->name, rte_socket_id());
3263
3264         return 0;
3265 }
3266
3267 static int
3268 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3269 {
3270         struct dpaa2_sec_dev_private *internals;
3271         struct rte_device *dev = cryptodev->device;
3272         struct rte_dpaa2_device *dpaa2_dev;
3273         struct rte_security_ctx *security_instance;
3274         struct fsl_mc_io *dpseci;
3275         uint16_t token;
3276         struct dpseci_attr attr;
3277         int retcode, hw_id;
3278         char str[20];
3279
3280         PMD_INIT_FUNC_TRACE();
3281         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3282         if (dpaa2_dev == NULL) {
3283                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3284                 return -1;
3285         }
3286         hw_id = dpaa2_dev->object_id;
3287
3288         cryptodev->driver_id = cryptodev_driver_id;
3289         cryptodev->dev_ops = &crypto_ops;
3290
3291         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3292         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3293         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3294                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3295                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3296                         RTE_CRYPTODEV_FF_SECURITY |
3297                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3298                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3299                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3300                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3301                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3302
3303         internals = cryptodev->data->dev_private;
3304
3305         /*
3306          * For secondary processes, we don't initialise any further as primary
3307          * has already done this work. Only check we don't need a different
3308          * RX function
3309          */
3310         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3311                 DPAA2_SEC_DEBUG("Device already init by primary process");
3312                 return 0;
3313         }
3314
3315         /* Initialize security_ctx only for primary process*/
3316         security_instance = rte_malloc("rte_security_instances_ops",
3317                                 sizeof(struct rte_security_ctx), 0);
3318         if (security_instance == NULL)
3319                 return -ENOMEM;
3320         security_instance->device = (void *)cryptodev;
3321         security_instance->ops = &dpaa2_sec_security_ops;
3322         security_instance->sess_cnt = 0;
3323         cryptodev->security_ctx = security_instance;
3324
3325         /*Open the rte device via MC and save the handle for further use*/
3326         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3327                                 sizeof(struct fsl_mc_io), 0);
3328         if (!dpseci) {
3329                 DPAA2_SEC_ERR(
3330                         "Error in allocating the memory for dpsec object");
3331                 return -1;
3332         }
3333         dpseci->regs = rte_mcp_ptr_list[0];
3334
3335         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3336         if (retcode != 0) {
3337                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3338                               retcode);
3339                 goto init_error;
3340         }
3341         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3342         if (retcode != 0) {
3343                 DPAA2_SEC_ERR(
3344                              "Cannot get dpsec device attributed: Error = %x",
3345                              retcode);
3346                 goto init_error;
3347         }
3348         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3349                         "dpsec-%u", hw_id);
3350
3351         internals->max_nb_queue_pairs = attr.num_tx_queues;
3352         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3353         internals->hw = dpseci;
3354         internals->token = token;
3355
3356         snprintf(str, sizeof(str), "fle_pool_%d", cryptodev->data->dev_id);
3357         internals->fle_pool = rte_mempool_create((const char *)str,
3358                         FLE_POOL_NUM_BUFS,
3359                         FLE_POOL_BUF_SIZE,
3360                         FLE_POOL_CACHE_SIZE, 0,
3361                         NULL, NULL, NULL, NULL,
3362                         SOCKET_ID_ANY, 0);
3363         if (!internals->fle_pool) {
3364                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3365                 goto init_error;
3366         }
3367
3368         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3369         return 0;
3370
3371 init_error:
3372         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3373
3374         /* dpaa2_sec_uninit(crypto_dev_name); */
3375         return -EFAULT;
3376 }
3377
3378 static int
3379 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3380                           struct rte_dpaa2_device *dpaa2_dev)
3381 {
3382         struct rte_cryptodev *cryptodev;
3383         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3384
3385         int retval;
3386
3387         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3388                         dpaa2_dev->object_id);
3389
3390         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3391         if (cryptodev == NULL)
3392                 return -ENOMEM;
3393
3394         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3395                 cryptodev->data->dev_private = rte_zmalloc_socket(
3396                                         "cryptodev private structure",
3397                                         sizeof(struct dpaa2_sec_dev_private),
3398                                         RTE_CACHE_LINE_SIZE,
3399                                         rte_socket_id());
3400
3401                 if (cryptodev->data->dev_private == NULL)
3402                         rte_panic("Cannot allocate memzone for private "
3403                                   "device data");
3404         }
3405
3406         dpaa2_dev->cryptodev = cryptodev;
3407         cryptodev->device = &dpaa2_dev->device;
3408
3409         /* init user callbacks */
3410         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3411
3412         /* Invoke PMD device initialization function */
3413         retval = dpaa2_sec_dev_init(cryptodev);
3414         if (retval == 0)
3415                 return 0;
3416
3417         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3418                 rte_free(cryptodev->data->dev_private);
3419
3420         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3421
3422         return -ENXIO;
3423 }
3424
3425 static int
3426 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3427 {
3428         struct rte_cryptodev *cryptodev;
3429         int ret;
3430
3431         cryptodev = dpaa2_dev->cryptodev;
3432         if (cryptodev == NULL)
3433                 return -ENODEV;
3434
3435         ret = dpaa2_sec_uninit(cryptodev);
3436         if (ret)
3437                 return ret;
3438
3439         return rte_cryptodev_pmd_destroy(cryptodev);
3440 }
3441
3442 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3443         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3444         .drv_type = DPAA2_CRYPTO,
3445         .driver = {
3446                 .name = "DPAA2 SEC PMD"
3447         },
3448         .probe = cryptodev_dpaa2_sec_probe,
3449         .remove = cryptodev_dpaa2_sec_remove,
3450 };
3451
3452 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3453
3454 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3455 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3456                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3457
3458 RTE_INIT(dpaa2_sec_init_log)
3459 {
3460         /* Bus level logs */
3461         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3462         if (dpaa2_logtype_sec >= 0)
3463                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3464 }