crypto/dpaax_sec: support HFN override
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35
36 /* Required types */
37 typedef uint64_t        dma_addr_t;
38
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
43
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS       32000
56 #define FLE_POOL_BUF_SIZE       256
57 #define FLE_POOL_CACHE_SIZE     512
58 #define FLE_SG_MEM_SIZE         2048
59 #define SEC_FLC_DHR_OUTBOUND    -114
60 #define SEC_FLC_DHR_INBOUND     0
61
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63
64 static uint8_t cryptodev_driver_id;
65
66 int dpaa2_logtype_sec;
67
68 static inline int
69 build_proto_compound_fd(dpaa2_sec_session *sess,
70                struct rte_crypto_op *op,
71                struct qbman_fd *fd, uint16_t bpid)
72 {
73         struct rte_crypto_sym_op *sym_op = op->sym;
74         struct ctxt_priv *priv = sess->ctxt;
75         struct qbman_fle *fle, *ip_fle, *op_fle;
76         struct sec_flow_context *flc;
77         struct rte_mbuf *src_mbuf = sym_op->m_src;
78         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
79         int retval;
80
81         if (!dst_mbuf)
82                 dst_mbuf = src_mbuf;
83
84         /* Save the shared descriptor */
85         flc = &priv->flc_desc[0].flc;
86
87         /* we are using the first FLE entry to store Mbuf */
88         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
89         if (retval) {
90                 DPAA2_SEC_ERR("Memory alloc failed");
91                 return -1;
92         }
93         memset(fle, 0, FLE_POOL_BUF_SIZE);
94         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96
97         op_fle = fle + 1;
98         ip_fle = fle + 2;
99
100         if (likely(bpid < MAX_BPID)) {
101                 DPAA2_SET_FD_BPID(fd, bpid);
102                 DPAA2_SET_FLE_BPID(op_fle, bpid);
103                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
104         } else {
105                 DPAA2_SET_FD_IVP(fd);
106                 DPAA2_SET_FLE_IVP(op_fle);
107                 DPAA2_SET_FLE_IVP(ip_fle);
108         }
109
110         /* Configure FD as a FRAME LIST */
111         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
112         DPAA2_SET_FD_COMPOUND_FMT(fd);
113         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
114
115         /* Configure Output FLE with dst mbuf data  */
116         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
117         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
118         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
119
120         /* Configure Input FLE with src mbuf data */
121         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
122         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
123         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
124
125         DPAA2_SET_FD_LEN(fd, ip_fle->length);
126         DPAA2_SET_FLE_FIN(ip_fle);
127
128         /* In case of PDCP, per packet HFN is stored in
129          * mbuf priv after sym_op.
130          */
131         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
132                 uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
133                 /*enable HFN override override */
134                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
135                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
136                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
137         }
138
139         return 0;
140
141 }
142
143 static inline int
144 build_proto_fd(dpaa2_sec_session *sess,
145                struct rte_crypto_op *op,
146                struct qbman_fd *fd, uint16_t bpid)
147 {
148         struct rte_crypto_sym_op *sym_op = op->sym;
149         if (sym_op->m_dst)
150                 return build_proto_compound_fd(sess, op, fd, bpid);
151
152         struct ctxt_priv *priv = sess->ctxt;
153         struct sec_flow_context *flc;
154         struct rte_mbuf *mbuf = sym_op->m_src;
155
156         if (likely(bpid < MAX_BPID))
157                 DPAA2_SET_FD_BPID(fd, bpid);
158         else
159                 DPAA2_SET_FD_IVP(fd);
160
161         /* Save the shared descriptor */
162         flc = &priv->flc_desc[0].flc;
163
164         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
165         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
166         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
167         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
168
169         /* save physical address of mbuf */
170         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
171         mbuf->buf_iova = (size_t)op;
172
173         return 0;
174 }
175
176 static inline int
177 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
178                  struct rte_crypto_op *op,
179                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
180 {
181         struct rte_crypto_sym_op *sym_op = op->sym;
182         struct ctxt_priv *priv = sess->ctxt;
183         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
184         struct sec_flow_context *flc;
185         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
186         int icv_len = sess->digest_length;
187         uint8_t *old_icv;
188         struct rte_mbuf *mbuf;
189         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
190                         sess->iv.offset);
191
192         PMD_INIT_FUNC_TRACE();
193
194         if (sym_op->m_dst)
195                 mbuf = sym_op->m_dst;
196         else
197                 mbuf = sym_op->m_src;
198
199         /* first FLE entry used to store mbuf and session ctxt */
200         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
201                         RTE_CACHE_LINE_SIZE);
202         if (unlikely(!fle)) {
203                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
204                 return -1;
205         }
206         memset(fle, 0, FLE_SG_MEM_SIZE);
207         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
208         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
209
210         op_fle = fle + 1;
211         ip_fle = fle + 2;
212         sge = fle + 3;
213
214         /* Save the shared descriptor */
215         flc = &priv->flc_desc[0].flc;
216
217         /* Configure FD as a FRAME LIST */
218         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
219         DPAA2_SET_FD_COMPOUND_FMT(fd);
220         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
221
222         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
223                    "iv-len=%d data_off: 0x%x\n",
224                    sym_op->aead.data.offset,
225                    sym_op->aead.data.length,
226                    sess->digest_length,
227                    sess->iv.length,
228                    sym_op->m_src->data_off);
229
230         /* Configure Output FLE with Scatter/Gather Entry */
231         DPAA2_SET_FLE_SG_EXT(op_fle);
232         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
233
234         if (auth_only_len)
235                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
236
237         op_fle->length = (sess->dir == DIR_ENC) ?
238                         (sym_op->aead.data.length + icv_len + auth_only_len) :
239                         sym_op->aead.data.length + auth_only_len;
240
241         /* Configure Output SGE for Encap/Decap */
242         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
243         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
244                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
245         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
246
247         mbuf = mbuf->next;
248         /* o/p segs */
249         while (mbuf) {
250                 sge++;
251                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
252                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
253                 sge->length = mbuf->data_len;
254                 mbuf = mbuf->next;
255         }
256         sge->length -= icv_len;
257
258         if (sess->dir == DIR_ENC) {
259                 sge++;
260                 DPAA2_SET_FLE_ADDR(sge,
261                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
262                 sge->length = icv_len;
263         }
264         DPAA2_SET_FLE_FIN(sge);
265
266         sge++;
267         mbuf = sym_op->m_src;
268
269         /* Configure Input FLE with Scatter/Gather Entry */
270         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
271         DPAA2_SET_FLE_SG_EXT(ip_fle);
272         DPAA2_SET_FLE_FIN(ip_fle);
273         ip_fle->length = (sess->dir == DIR_ENC) ?
274                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
275                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
276                  icv_len);
277
278         /* Configure Input SGE for Encap/Decap */
279         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
280         sge->length = sess->iv.length;
281
282         sge++;
283         if (auth_only_len) {
284                 DPAA2_SET_FLE_ADDR(sge,
285                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
286                 sge->length = auth_only_len;
287                 sge++;
288         }
289
290         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
291         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
292                                 mbuf->data_off);
293         sge->length = mbuf->data_len - sym_op->aead.data.offset;
294
295         mbuf = mbuf->next;
296         /* i/p segs */
297         while (mbuf) {
298                 sge++;
299                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
300                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
301                 sge->length = mbuf->data_len;
302                 mbuf = mbuf->next;
303         }
304
305         if (sess->dir == DIR_DEC) {
306                 sge++;
307                 old_icv = (uint8_t *)(sge + 1);
308                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
309                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
310                 sge->length = icv_len;
311         }
312
313         DPAA2_SET_FLE_FIN(sge);
314         if (auth_only_len) {
315                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
316                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
317         }
318         DPAA2_SET_FD_LEN(fd, ip_fle->length);
319
320         return 0;
321 }
322
323 static inline int
324 build_authenc_gcm_fd(dpaa2_sec_session *sess,
325                      struct rte_crypto_op *op,
326                      struct qbman_fd *fd, uint16_t bpid)
327 {
328         struct rte_crypto_sym_op *sym_op = op->sym;
329         struct ctxt_priv *priv = sess->ctxt;
330         struct qbman_fle *fle, *sge;
331         struct sec_flow_context *flc;
332         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
333         int icv_len = sess->digest_length, retval;
334         uint8_t *old_icv;
335         struct rte_mbuf *dst;
336         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
337                         sess->iv.offset);
338
339         PMD_INIT_FUNC_TRACE();
340
341         if (sym_op->m_dst)
342                 dst = sym_op->m_dst;
343         else
344                 dst = sym_op->m_src;
345
346         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
347          * Currently we donot know which FLE has the mbuf stored.
348          * So while retreiving we can go back 1 FLE from the FD -ADDR
349          * to get the MBUF Addr from the previous FLE.
350          * We can have a better approach to use the inline Mbuf
351          */
352         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
353         if (retval) {
354                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
355                 return -1;
356         }
357         memset(fle, 0, FLE_POOL_BUF_SIZE);
358         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
359         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
360         fle = fle + 1;
361         sge = fle + 2;
362         if (likely(bpid < MAX_BPID)) {
363                 DPAA2_SET_FD_BPID(fd, bpid);
364                 DPAA2_SET_FLE_BPID(fle, bpid);
365                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
366                 DPAA2_SET_FLE_BPID(sge, bpid);
367                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
368                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
369                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
370         } else {
371                 DPAA2_SET_FD_IVP(fd);
372                 DPAA2_SET_FLE_IVP(fle);
373                 DPAA2_SET_FLE_IVP((fle + 1));
374                 DPAA2_SET_FLE_IVP(sge);
375                 DPAA2_SET_FLE_IVP((sge + 1));
376                 DPAA2_SET_FLE_IVP((sge + 2));
377                 DPAA2_SET_FLE_IVP((sge + 3));
378         }
379
380         /* Save the shared descriptor */
381         flc = &priv->flc_desc[0].flc;
382         /* Configure FD as a FRAME LIST */
383         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
384         DPAA2_SET_FD_COMPOUND_FMT(fd);
385         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
386
387         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
388                    "iv-len=%d data_off: 0x%x\n",
389                    sym_op->aead.data.offset,
390                    sym_op->aead.data.length,
391                    sess->digest_length,
392                    sess->iv.length,
393                    sym_op->m_src->data_off);
394
395         /* Configure Output FLE with Scatter/Gather Entry */
396         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
397         if (auth_only_len)
398                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
399         fle->length = (sess->dir == DIR_ENC) ?
400                         (sym_op->aead.data.length + icv_len + auth_only_len) :
401                         sym_op->aead.data.length + auth_only_len;
402
403         DPAA2_SET_FLE_SG_EXT(fle);
404
405         /* Configure Output SGE for Encap/Decap */
406         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
407         DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
408                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
409         sge->length = sym_op->aead.data.length + auth_only_len;
410
411         if (sess->dir == DIR_ENC) {
412                 sge++;
413                 DPAA2_SET_FLE_ADDR(sge,
414                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
415                 sge->length = sess->digest_length;
416                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
417                                         sess->iv.length + auth_only_len));
418         }
419         DPAA2_SET_FLE_FIN(sge);
420
421         sge++;
422         fle++;
423
424         /* Configure Input FLE with Scatter/Gather Entry */
425         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
426         DPAA2_SET_FLE_SG_EXT(fle);
427         DPAA2_SET_FLE_FIN(fle);
428         fle->length = (sess->dir == DIR_ENC) ?
429                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
430                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
431                  sess->digest_length);
432
433         /* Configure Input SGE for Encap/Decap */
434         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
435         sge->length = sess->iv.length;
436         sge++;
437         if (auth_only_len) {
438                 DPAA2_SET_FLE_ADDR(sge,
439                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
440                 sge->length = auth_only_len;
441                 DPAA2_SET_FLE_BPID(sge, bpid);
442                 sge++;
443         }
444
445         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
446         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
447                                 sym_op->m_src->data_off);
448         sge->length = sym_op->aead.data.length;
449         if (sess->dir == DIR_DEC) {
450                 sge++;
451                 old_icv = (uint8_t *)(sge + 1);
452                 memcpy(old_icv, sym_op->aead.digest.data,
453                        sess->digest_length);
454                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
455                 sge->length = sess->digest_length;
456                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
457                                  sess->digest_length +
458                                  sess->iv.length +
459                                  auth_only_len));
460         }
461         DPAA2_SET_FLE_FIN(sge);
462
463         if (auth_only_len) {
464                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
465                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
466         }
467
468         return 0;
469 }
470
471 static inline int
472 build_authenc_sg_fd(dpaa2_sec_session *sess,
473                  struct rte_crypto_op *op,
474                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
475 {
476         struct rte_crypto_sym_op *sym_op = op->sym;
477         struct ctxt_priv *priv = sess->ctxt;
478         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
479         struct sec_flow_context *flc;
480         uint32_t auth_only_len = sym_op->auth.data.length -
481                                 sym_op->cipher.data.length;
482         int icv_len = sess->digest_length;
483         uint8_t *old_icv;
484         struct rte_mbuf *mbuf;
485         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
486                         sess->iv.offset);
487
488         PMD_INIT_FUNC_TRACE();
489
490         if (sym_op->m_dst)
491                 mbuf = sym_op->m_dst;
492         else
493                 mbuf = sym_op->m_src;
494
495         /* first FLE entry used to store mbuf and session ctxt */
496         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
497                         RTE_CACHE_LINE_SIZE);
498         if (unlikely(!fle)) {
499                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
500                 return -1;
501         }
502         memset(fle, 0, FLE_SG_MEM_SIZE);
503         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
504         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
505
506         op_fle = fle + 1;
507         ip_fle = fle + 2;
508         sge = fle + 3;
509
510         /* Save the shared descriptor */
511         flc = &priv->flc_desc[0].flc;
512
513         /* Configure FD as a FRAME LIST */
514         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
515         DPAA2_SET_FD_COMPOUND_FMT(fd);
516         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
517
518         DPAA2_SEC_DP_DEBUG(
519                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
520                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
521                 sym_op->auth.data.offset,
522                 sym_op->auth.data.length,
523                 sess->digest_length,
524                 sym_op->cipher.data.offset,
525                 sym_op->cipher.data.length,
526                 sess->iv.length,
527                 sym_op->m_src->data_off);
528
529         /* Configure Output FLE with Scatter/Gather Entry */
530         DPAA2_SET_FLE_SG_EXT(op_fle);
531         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
532
533         if (auth_only_len)
534                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
535
536         op_fle->length = (sess->dir == DIR_ENC) ?
537                         (sym_op->cipher.data.length + icv_len) :
538                         sym_op->cipher.data.length;
539
540         /* Configure Output SGE for Encap/Decap */
541         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
542         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
543         sge->length = mbuf->data_len - sym_op->auth.data.offset;
544
545         mbuf = mbuf->next;
546         /* o/p segs */
547         while (mbuf) {
548                 sge++;
549                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
550                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
551                 sge->length = mbuf->data_len;
552                 mbuf = mbuf->next;
553         }
554         sge->length -= icv_len;
555
556         if (sess->dir == DIR_ENC) {
557                 sge++;
558                 DPAA2_SET_FLE_ADDR(sge,
559                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
560                 sge->length = icv_len;
561         }
562         DPAA2_SET_FLE_FIN(sge);
563
564         sge++;
565         mbuf = sym_op->m_src;
566
567         /* Configure Input FLE with Scatter/Gather Entry */
568         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
569         DPAA2_SET_FLE_SG_EXT(ip_fle);
570         DPAA2_SET_FLE_FIN(ip_fle);
571         ip_fle->length = (sess->dir == DIR_ENC) ?
572                         (sym_op->auth.data.length + sess->iv.length) :
573                         (sym_op->auth.data.length + sess->iv.length +
574                          icv_len);
575
576         /* Configure Input SGE for Encap/Decap */
577         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
578         sge->length = sess->iv.length;
579
580         sge++;
581         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
582         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
583                                 mbuf->data_off);
584         sge->length = mbuf->data_len - sym_op->auth.data.offset;
585
586         mbuf = mbuf->next;
587         /* i/p segs */
588         while (mbuf) {
589                 sge++;
590                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
591                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
592                 sge->length = mbuf->data_len;
593                 mbuf = mbuf->next;
594         }
595         sge->length -= icv_len;
596
597         if (sess->dir == DIR_DEC) {
598                 sge++;
599                 old_icv = (uint8_t *)(sge + 1);
600                 memcpy(old_icv, sym_op->auth.digest.data,
601                        icv_len);
602                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
603                 sge->length = icv_len;
604         }
605
606         DPAA2_SET_FLE_FIN(sge);
607         if (auth_only_len) {
608                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
609                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
610         }
611         DPAA2_SET_FD_LEN(fd, ip_fle->length);
612
613         return 0;
614 }
615
616 static inline int
617 build_authenc_fd(dpaa2_sec_session *sess,
618                  struct rte_crypto_op *op,
619                  struct qbman_fd *fd, uint16_t bpid)
620 {
621         struct rte_crypto_sym_op *sym_op = op->sym;
622         struct ctxt_priv *priv = sess->ctxt;
623         struct qbman_fle *fle, *sge;
624         struct sec_flow_context *flc;
625         uint32_t auth_only_len = sym_op->auth.data.length -
626                                 sym_op->cipher.data.length;
627         int icv_len = sess->digest_length, retval;
628         uint8_t *old_icv;
629         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
630                         sess->iv.offset);
631         struct rte_mbuf *dst;
632
633         PMD_INIT_FUNC_TRACE();
634
635         if (sym_op->m_dst)
636                 dst = sym_op->m_dst;
637         else
638                 dst = sym_op->m_src;
639
640         /* we are using the first FLE entry to store Mbuf.
641          * Currently we donot know which FLE has the mbuf stored.
642          * So while retreiving we can go back 1 FLE from the FD -ADDR
643          * to get the MBUF Addr from the previous FLE.
644          * We can have a better approach to use the inline Mbuf
645          */
646         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
647         if (retval) {
648                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
649                 return -1;
650         }
651         memset(fle, 0, FLE_POOL_BUF_SIZE);
652         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
653         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
654         fle = fle + 1;
655         sge = fle + 2;
656         if (likely(bpid < MAX_BPID)) {
657                 DPAA2_SET_FD_BPID(fd, bpid);
658                 DPAA2_SET_FLE_BPID(fle, bpid);
659                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
660                 DPAA2_SET_FLE_BPID(sge, bpid);
661                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
662                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
663                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
664         } else {
665                 DPAA2_SET_FD_IVP(fd);
666                 DPAA2_SET_FLE_IVP(fle);
667                 DPAA2_SET_FLE_IVP((fle + 1));
668                 DPAA2_SET_FLE_IVP(sge);
669                 DPAA2_SET_FLE_IVP((sge + 1));
670                 DPAA2_SET_FLE_IVP((sge + 2));
671                 DPAA2_SET_FLE_IVP((sge + 3));
672         }
673
674         /* Save the shared descriptor */
675         flc = &priv->flc_desc[0].flc;
676         /* Configure FD as a FRAME LIST */
677         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
678         DPAA2_SET_FD_COMPOUND_FMT(fd);
679         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
680
681         DPAA2_SEC_DP_DEBUG(
682                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
683                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
684                 sym_op->auth.data.offset,
685                 sym_op->auth.data.length,
686                 sess->digest_length,
687                 sym_op->cipher.data.offset,
688                 sym_op->cipher.data.length,
689                 sess->iv.length,
690                 sym_op->m_src->data_off);
691
692         /* Configure Output FLE with Scatter/Gather Entry */
693         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
694         if (auth_only_len)
695                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
696         fle->length = (sess->dir == DIR_ENC) ?
697                         (sym_op->cipher.data.length + icv_len) :
698                         sym_op->cipher.data.length;
699
700         DPAA2_SET_FLE_SG_EXT(fle);
701
702         /* Configure Output SGE for Encap/Decap */
703         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
704         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
705                                 dst->data_off);
706         sge->length = sym_op->cipher.data.length;
707
708         if (sess->dir == DIR_ENC) {
709                 sge++;
710                 DPAA2_SET_FLE_ADDR(sge,
711                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
712                 sge->length = sess->digest_length;
713                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
714                                         sess->iv.length));
715         }
716         DPAA2_SET_FLE_FIN(sge);
717
718         sge++;
719         fle++;
720
721         /* Configure Input FLE with Scatter/Gather Entry */
722         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
723         DPAA2_SET_FLE_SG_EXT(fle);
724         DPAA2_SET_FLE_FIN(fle);
725         fle->length = (sess->dir == DIR_ENC) ?
726                         (sym_op->auth.data.length + sess->iv.length) :
727                         (sym_op->auth.data.length + sess->iv.length +
728                          sess->digest_length);
729
730         /* Configure Input SGE for Encap/Decap */
731         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
732         sge->length = sess->iv.length;
733         sge++;
734
735         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
736         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
737                                 sym_op->m_src->data_off);
738         sge->length = sym_op->auth.data.length;
739         if (sess->dir == DIR_DEC) {
740                 sge++;
741                 old_icv = (uint8_t *)(sge + 1);
742                 memcpy(old_icv, sym_op->auth.digest.data,
743                        sess->digest_length);
744                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
745                 sge->length = sess->digest_length;
746                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
747                                  sess->digest_length +
748                                  sess->iv.length));
749         }
750         DPAA2_SET_FLE_FIN(sge);
751         if (auth_only_len) {
752                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
753                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
754         }
755         return 0;
756 }
757
758 static inline int build_auth_sg_fd(
759                 dpaa2_sec_session *sess,
760                 struct rte_crypto_op *op,
761                 struct qbman_fd *fd,
762                 __rte_unused uint16_t bpid)
763 {
764         struct rte_crypto_sym_op *sym_op = op->sym;
765         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
766         struct sec_flow_context *flc;
767         struct ctxt_priv *priv = sess->ctxt;
768         uint8_t *old_digest;
769         struct rte_mbuf *mbuf;
770
771         PMD_INIT_FUNC_TRACE();
772
773         mbuf = sym_op->m_src;
774         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
775                         RTE_CACHE_LINE_SIZE);
776         if (unlikely(!fle)) {
777                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
778                 return -1;
779         }
780         memset(fle, 0, FLE_SG_MEM_SIZE);
781         /* first FLE entry used to store mbuf and session ctxt */
782         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
783         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
784         op_fle = fle + 1;
785         ip_fle = fle + 2;
786         sge = fle + 3;
787
788         flc = &priv->flc_desc[DESC_INITFINAL].flc;
789         /* sg FD */
790         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
791         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
792         DPAA2_SET_FD_COMPOUND_FMT(fd);
793
794         /* o/p fle */
795         DPAA2_SET_FLE_ADDR(op_fle,
796                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
797         op_fle->length = sess->digest_length;
798
799         /* i/p fle */
800         DPAA2_SET_FLE_SG_EXT(ip_fle);
801         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
802         /* i/p 1st seg */
803         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
804         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
805         sge->length = mbuf->data_len - sym_op->auth.data.offset;
806
807         /* i/p segs */
808         mbuf = mbuf->next;
809         while (mbuf) {
810                 sge++;
811                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
812                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
813                 sge->length = mbuf->data_len;
814                 mbuf = mbuf->next;
815         }
816         if (sess->dir == DIR_ENC) {
817                 /* Digest calculation case */
818                 sge->length -= sess->digest_length;
819                 ip_fle->length = sym_op->auth.data.length;
820         } else {
821                 /* Digest verification case */
822                 sge++;
823                 old_digest = (uint8_t *)(sge + 1);
824                 rte_memcpy(old_digest, sym_op->auth.digest.data,
825                            sess->digest_length);
826                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
827                 sge->length = sess->digest_length;
828                 ip_fle->length = sym_op->auth.data.length +
829                                 sess->digest_length;
830         }
831         DPAA2_SET_FLE_FIN(sge);
832         DPAA2_SET_FLE_FIN(ip_fle);
833         DPAA2_SET_FD_LEN(fd, ip_fle->length);
834
835         return 0;
836 }
837
838 static inline int
839 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
840               struct qbman_fd *fd, uint16_t bpid)
841 {
842         struct rte_crypto_sym_op *sym_op = op->sym;
843         struct qbman_fle *fle, *sge;
844         struct sec_flow_context *flc;
845         struct ctxt_priv *priv = sess->ctxt;
846         uint8_t *old_digest;
847         int retval;
848
849         PMD_INIT_FUNC_TRACE();
850
851         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
852         if (retval) {
853                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
854                 return -1;
855         }
856         memset(fle, 0, FLE_POOL_BUF_SIZE);
857         /* TODO we are using the first FLE entry to store Mbuf.
858          * Currently we donot know which FLE has the mbuf stored.
859          * So while retreiving we can go back 1 FLE from the FD -ADDR
860          * to get the MBUF Addr from the previous FLE.
861          * We can have a better approach to use the inline Mbuf
862          */
863         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
864         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
865         fle = fle + 1;
866
867         if (likely(bpid < MAX_BPID)) {
868                 DPAA2_SET_FD_BPID(fd, bpid);
869                 DPAA2_SET_FLE_BPID(fle, bpid);
870                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
871         } else {
872                 DPAA2_SET_FD_IVP(fd);
873                 DPAA2_SET_FLE_IVP(fle);
874                 DPAA2_SET_FLE_IVP((fle + 1));
875         }
876         flc = &priv->flc_desc[DESC_INITFINAL].flc;
877         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
878
879         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
880         fle->length = sess->digest_length;
881
882         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
883         DPAA2_SET_FD_COMPOUND_FMT(fd);
884         fle++;
885
886         if (sess->dir == DIR_ENC) {
887                 DPAA2_SET_FLE_ADDR(fle,
888                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
889                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
890                                      sym_op->m_src->data_off);
891                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
892                 fle->length = sym_op->auth.data.length;
893         } else {
894                 sge = fle + 2;
895                 DPAA2_SET_FLE_SG_EXT(fle);
896                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
897
898                 if (likely(bpid < MAX_BPID)) {
899                         DPAA2_SET_FLE_BPID(sge, bpid);
900                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
901                 } else {
902                         DPAA2_SET_FLE_IVP(sge);
903                         DPAA2_SET_FLE_IVP((sge + 1));
904                 }
905                 DPAA2_SET_FLE_ADDR(sge,
906                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
907                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
908                                      sym_op->m_src->data_off);
909
910                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
911                                  sess->digest_length);
912                 sge->length = sym_op->auth.data.length;
913                 sge++;
914                 old_digest = (uint8_t *)(sge + 1);
915                 rte_memcpy(old_digest, sym_op->auth.digest.data,
916                            sess->digest_length);
917                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
918                 sge->length = sess->digest_length;
919                 fle->length = sym_op->auth.data.length +
920                                 sess->digest_length;
921                 DPAA2_SET_FLE_FIN(sge);
922         }
923         DPAA2_SET_FLE_FIN(fle);
924
925         return 0;
926 }
927
928 static int
929 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
930                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
931 {
932         struct rte_crypto_sym_op *sym_op = op->sym;
933         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
934         struct sec_flow_context *flc;
935         struct ctxt_priv *priv = sess->ctxt;
936         struct rte_mbuf *mbuf;
937         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
938                         sess->iv.offset);
939
940         PMD_INIT_FUNC_TRACE();
941
942         if (sym_op->m_dst)
943                 mbuf = sym_op->m_dst;
944         else
945                 mbuf = sym_op->m_src;
946
947         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
948                         RTE_CACHE_LINE_SIZE);
949         if (!fle) {
950                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
951                 return -1;
952         }
953         memset(fle, 0, FLE_SG_MEM_SIZE);
954         /* first FLE entry used to store mbuf and session ctxt */
955         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
956         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
957
958         op_fle = fle + 1;
959         ip_fle = fle + 2;
960         sge = fle + 3;
961
962         flc = &priv->flc_desc[0].flc;
963
964         DPAA2_SEC_DP_DEBUG(
965                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
966                 " data_off: 0x%x\n",
967                 sym_op->cipher.data.offset,
968                 sym_op->cipher.data.length,
969                 sess->iv.length,
970                 sym_op->m_src->data_off);
971
972         /* o/p fle */
973         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
974         op_fle->length = sym_op->cipher.data.length;
975         DPAA2_SET_FLE_SG_EXT(op_fle);
976
977         /* o/p 1st seg */
978         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
979         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
980         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
981
982         mbuf = mbuf->next;
983         /* o/p segs */
984         while (mbuf) {
985                 sge++;
986                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
987                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
988                 sge->length = mbuf->data_len;
989                 mbuf = mbuf->next;
990         }
991         DPAA2_SET_FLE_FIN(sge);
992
993         DPAA2_SEC_DP_DEBUG(
994                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
995                 flc, fle, fle->addr_hi, fle->addr_lo,
996                 fle->length);
997
998         /* i/p fle */
999         mbuf = sym_op->m_src;
1000         sge++;
1001         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1002         ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
1003         DPAA2_SET_FLE_SG_EXT(ip_fle);
1004
1005         /* i/p IV */
1006         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1007         DPAA2_SET_FLE_OFFSET(sge, 0);
1008         sge->length = sess->iv.length;
1009
1010         sge++;
1011
1012         /* i/p 1st seg */
1013         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1014         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1015                              mbuf->data_off);
1016         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1017
1018         mbuf = mbuf->next;
1019         /* i/p segs */
1020         while (mbuf) {
1021                 sge++;
1022                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1023                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1024                 sge->length = mbuf->data_len;
1025                 mbuf = mbuf->next;
1026         }
1027         DPAA2_SET_FLE_FIN(sge);
1028         DPAA2_SET_FLE_FIN(ip_fle);
1029
1030         /* sg fd */
1031         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1032         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1033         DPAA2_SET_FD_COMPOUND_FMT(fd);
1034         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1035
1036         DPAA2_SEC_DP_DEBUG(
1037                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1038                 " off =%d, len =%d\n",
1039                 DPAA2_GET_FD_ADDR(fd),
1040                 DPAA2_GET_FD_BPID(fd),
1041                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1042                 DPAA2_GET_FD_OFFSET(fd),
1043                 DPAA2_GET_FD_LEN(fd));
1044         return 0;
1045 }
1046
1047 static int
1048 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1049                 struct qbman_fd *fd, uint16_t bpid)
1050 {
1051         struct rte_crypto_sym_op *sym_op = op->sym;
1052         struct qbman_fle *fle, *sge;
1053         int retval;
1054         struct sec_flow_context *flc;
1055         struct ctxt_priv *priv = sess->ctxt;
1056         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1057                         sess->iv.offset);
1058         struct rte_mbuf *dst;
1059
1060         PMD_INIT_FUNC_TRACE();
1061
1062         if (sym_op->m_dst)
1063                 dst = sym_op->m_dst;
1064         else
1065                 dst = sym_op->m_src;
1066
1067         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1068         if (retval) {
1069                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1070                 return -1;
1071         }
1072         memset(fle, 0, FLE_POOL_BUF_SIZE);
1073         /* TODO we are using the first FLE entry to store Mbuf.
1074          * Currently we donot know which FLE has the mbuf stored.
1075          * So while retreiving we can go back 1 FLE from the FD -ADDR
1076          * to get the MBUF Addr from the previous FLE.
1077          * We can have a better approach to use the inline Mbuf
1078          */
1079         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1080         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1081         fle = fle + 1;
1082         sge = fle + 2;
1083
1084         if (likely(bpid < MAX_BPID)) {
1085                 DPAA2_SET_FD_BPID(fd, bpid);
1086                 DPAA2_SET_FLE_BPID(fle, bpid);
1087                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1088                 DPAA2_SET_FLE_BPID(sge, bpid);
1089                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1090         } else {
1091                 DPAA2_SET_FD_IVP(fd);
1092                 DPAA2_SET_FLE_IVP(fle);
1093                 DPAA2_SET_FLE_IVP((fle + 1));
1094                 DPAA2_SET_FLE_IVP(sge);
1095                 DPAA2_SET_FLE_IVP((sge + 1));
1096         }
1097
1098         flc = &priv->flc_desc[0].flc;
1099         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1100         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1101                          sess->iv.length);
1102         DPAA2_SET_FD_COMPOUND_FMT(fd);
1103         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1104
1105         DPAA2_SEC_DP_DEBUG(
1106                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1107                 " data_off: 0x%x\n",
1108                 sym_op->cipher.data.offset,
1109                 sym_op->cipher.data.length,
1110                 sess->iv.length,
1111                 sym_op->m_src->data_off);
1112
1113         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1114         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1115                              dst->data_off);
1116
1117         fle->length = sym_op->cipher.data.length + sess->iv.length;
1118
1119         DPAA2_SEC_DP_DEBUG(
1120                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1121                 flc, fle, fle->addr_hi, fle->addr_lo,
1122                 fle->length);
1123
1124         fle++;
1125
1126         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1127         fle->length = sym_op->cipher.data.length + sess->iv.length;
1128
1129         DPAA2_SET_FLE_SG_EXT(fle);
1130
1131         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1132         sge->length = sess->iv.length;
1133
1134         sge++;
1135         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1136         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1137                              sym_op->m_src->data_off);
1138
1139         sge->length = sym_op->cipher.data.length;
1140         DPAA2_SET_FLE_FIN(sge);
1141         DPAA2_SET_FLE_FIN(fle);
1142
1143         DPAA2_SEC_DP_DEBUG(
1144                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1145                 " off =%d, len =%d\n",
1146                 DPAA2_GET_FD_ADDR(fd),
1147                 DPAA2_GET_FD_BPID(fd),
1148                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1149                 DPAA2_GET_FD_OFFSET(fd),
1150                 DPAA2_GET_FD_LEN(fd));
1151
1152         return 0;
1153 }
1154
1155 static inline int
1156 build_sec_fd(struct rte_crypto_op *op,
1157              struct qbman_fd *fd, uint16_t bpid)
1158 {
1159         int ret = -1;
1160         dpaa2_sec_session *sess;
1161
1162         PMD_INIT_FUNC_TRACE();
1163
1164         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1165                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1166                                 op->sym->session, cryptodev_driver_id);
1167         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1168                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1169                                 op->sym->sec_session);
1170         else
1171                 return -1;
1172
1173         /* Segmented buffer */
1174         if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1175                 switch (sess->ctxt_type) {
1176                 case DPAA2_SEC_CIPHER:
1177                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1178                         break;
1179                 case DPAA2_SEC_AUTH:
1180                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1181                         break;
1182                 case DPAA2_SEC_AEAD:
1183                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1184                         break;
1185                 case DPAA2_SEC_CIPHER_HASH:
1186                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1187                         break;
1188                 case DPAA2_SEC_HASH_CIPHER:
1189                 default:
1190                         DPAA2_SEC_ERR("error: Unsupported session");
1191                 }
1192         } else {
1193                 switch (sess->ctxt_type) {
1194                 case DPAA2_SEC_CIPHER:
1195                         ret = build_cipher_fd(sess, op, fd, bpid);
1196                         break;
1197                 case DPAA2_SEC_AUTH:
1198                         ret = build_auth_fd(sess, op, fd, bpid);
1199                         break;
1200                 case DPAA2_SEC_AEAD:
1201                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1202                         break;
1203                 case DPAA2_SEC_CIPHER_HASH:
1204                         ret = build_authenc_fd(sess, op, fd, bpid);
1205                         break;
1206                 case DPAA2_SEC_IPSEC:
1207                         ret = build_proto_fd(sess, op, fd, bpid);
1208                         break;
1209                 case DPAA2_SEC_PDCP:
1210                         ret = build_proto_compound_fd(sess, op, fd, bpid);
1211                         break;
1212                 case DPAA2_SEC_HASH_CIPHER:
1213                 default:
1214                         DPAA2_SEC_ERR("error: Unsupported session");
1215                 }
1216         }
1217         return ret;
1218 }
1219
1220 static uint16_t
1221 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1222                         uint16_t nb_ops)
1223 {
1224         /* Function to transmit the frames to given device and VQ*/
1225         uint32_t loop;
1226         int32_t ret;
1227         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1228         uint32_t frames_to_send;
1229         struct qbman_eq_desc eqdesc;
1230         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1231         struct qbman_swp *swp;
1232         uint16_t num_tx = 0;
1233         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1234         /*todo - need to support multiple buffer pools */
1235         uint16_t bpid;
1236         struct rte_mempool *mb_pool;
1237
1238         if (unlikely(nb_ops == 0))
1239                 return 0;
1240
1241         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1242                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1243                 return 0;
1244         }
1245         /*Prepare enqueue descriptor*/
1246         qbman_eq_desc_clear(&eqdesc);
1247         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1248         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1249         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1250
1251         if (!DPAA2_PER_LCORE_DPIO) {
1252                 ret = dpaa2_affine_qbman_swp();
1253                 if (ret) {
1254                         DPAA2_SEC_ERR("Failure in affining portal");
1255                         return 0;
1256                 }
1257         }
1258         swp = DPAA2_PER_LCORE_PORTAL;
1259
1260         while (nb_ops) {
1261                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1262                         dpaa2_eqcr_size : nb_ops;
1263
1264                 for (loop = 0; loop < frames_to_send; loop++) {
1265                         if ((*ops)->sym->m_src->seqn) {
1266                          uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1267
1268                          flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1269                          DPAA2_PER_LCORE_DQRR_SIZE--;
1270                          DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1271                          (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1272                         }
1273
1274                         /*Clear the unused FD fields before sending*/
1275                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1276                         mb_pool = (*ops)->sym->m_src->pool;
1277                         bpid = mempool_to_bpid(mb_pool);
1278                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1279                         if (ret) {
1280                                 DPAA2_SEC_ERR("error: Improper packet contents"
1281                                               " for crypto operation");
1282                                 goto skip_tx;
1283                         }
1284                         ops++;
1285                 }
1286                 loop = 0;
1287                 while (loop < frames_to_send) {
1288                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1289                                                         &fd_arr[loop],
1290                                                         &flags[loop],
1291                                                         frames_to_send - loop);
1292                 }
1293
1294                 num_tx += frames_to_send;
1295                 nb_ops -= frames_to_send;
1296         }
1297 skip_tx:
1298         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1299         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1300         return num_tx;
1301 }
1302
1303 static inline struct rte_crypto_op *
1304 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1305 {
1306         struct rte_crypto_op *op;
1307         uint16_t len = DPAA2_GET_FD_LEN(fd);
1308         uint16_t diff = 0;
1309         dpaa2_sec_session *sess_priv;
1310
1311         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1312                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1313                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1314
1315         diff = len - mbuf->pkt_len;
1316         mbuf->pkt_len += diff;
1317         mbuf->data_len += diff;
1318         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1319         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1320         op->sym->aead.digest.phys_addr = 0L;
1321
1322         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1323                                 op->sym->sec_session);
1324         if (sess_priv->dir == DIR_ENC)
1325                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1326         else
1327                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1328
1329         return op;
1330 }
1331
1332 static inline struct rte_crypto_op *
1333 sec_fd_to_mbuf(const struct qbman_fd *fd)
1334 {
1335         struct qbman_fle *fle;
1336         struct rte_crypto_op *op;
1337         struct ctxt_priv *priv;
1338         struct rte_mbuf *dst, *src;
1339
1340         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1341                 return sec_simple_fd_to_mbuf(fd);
1342
1343         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1344
1345         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1346                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1347
1348         /* we are using the first FLE entry to store Mbuf.
1349          * Currently we donot know which FLE has the mbuf stored.
1350          * So while retreiving we can go back 1 FLE from the FD -ADDR
1351          * to get the MBUF Addr from the previous FLE.
1352          * We can have a better approach to use the inline Mbuf
1353          */
1354
1355         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1356                 /* TODO complete it. */
1357                 DPAA2_SEC_ERR("error: non inline buffer");
1358                 return NULL;
1359         }
1360         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1361
1362         /* Prefeth op */
1363         src = op->sym->m_src;
1364         rte_prefetch0(src);
1365
1366         if (op->sym->m_dst) {
1367                 dst = op->sym->m_dst;
1368                 rte_prefetch0(dst);
1369         } else
1370                 dst = src;
1371
1372         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1373                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1374                         get_sec_session_private_data(op->sym->sec_session);
1375                 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1376                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1377                         dst->pkt_len = len;
1378                         dst->data_len = len;
1379                 }
1380         }
1381
1382         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1383                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1384                 (void *)dst,
1385                 dst->buf_addr,
1386                 DPAA2_GET_FD_ADDR(fd),
1387                 DPAA2_GET_FD_BPID(fd),
1388                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1389                 DPAA2_GET_FD_OFFSET(fd),
1390                 DPAA2_GET_FD_LEN(fd));
1391
1392         /* free the fle memory */
1393         if (likely(rte_pktmbuf_is_contiguous(src))) {
1394                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1395                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1396         } else
1397                 rte_free((void *)(fle-1));
1398
1399         return op;
1400 }
1401
1402 static uint16_t
1403 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1404                         uint16_t nb_ops)
1405 {
1406         /* Function is responsible to receive frames for a given device and VQ*/
1407         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1408         struct qbman_result *dq_storage;
1409         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1410         int ret, num_rx = 0;
1411         uint8_t is_last = 0, status;
1412         struct qbman_swp *swp;
1413         const struct qbman_fd *fd;
1414         struct qbman_pull_desc pulldesc;
1415
1416         if (!DPAA2_PER_LCORE_DPIO) {
1417                 ret = dpaa2_affine_qbman_swp();
1418                 if (ret) {
1419                         DPAA2_SEC_ERR("Failure in affining portal");
1420                         return 0;
1421                 }
1422         }
1423         swp = DPAA2_PER_LCORE_PORTAL;
1424         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1425
1426         qbman_pull_desc_clear(&pulldesc);
1427         qbman_pull_desc_set_numframes(&pulldesc,
1428                                       (nb_ops > dpaa2_dqrr_size) ?
1429                                       dpaa2_dqrr_size : nb_ops);
1430         qbman_pull_desc_set_fq(&pulldesc, fqid);
1431         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1432                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1433                                     1);
1434
1435         /*Issue a volatile dequeue command. */
1436         while (1) {
1437                 if (qbman_swp_pull(swp, &pulldesc)) {
1438                         DPAA2_SEC_WARN(
1439                                 "SEC VDQ command is not issued : QBMAN busy");
1440                         /* Portal was busy, try again */
1441                         continue;
1442                 }
1443                 break;
1444         };
1445
1446         /* Receive the packets till Last Dequeue entry is found with
1447          * respect to the above issues PULL command.
1448          */
1449         while (!is_last) {
1450                 /* Check if the previous issued command is completed.
1451                  * Also seems like the SWP is shared between the Ethernet Driver
1452                  * and the SEC driver.
1453                  */
1454                 while (!qbman_check_command_complete(dq_storage))
1455                         ;
1456
1457                 /* Loop until the dq_storage is updated with
1458                  * new token by QBMAN
1459                  */
1460                 while (!qbman_check_new_result(dq_storage))
1461                         ;
1462                 /* Check whether Last Pull command is Expired and
1463                  * setting Condition for Loop termination
1464                  */
1465                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1466                         is_last = 1;
1467                         /* Check for valid frame. */
1468                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1469                         if (unlikely(
1470                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1471                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1472                                 continue;
1473                         }
1474                 }
1475
1476                 fd = qbman_result_DQ_fd(dq_storage);
1477                 ops[num_rx] = sec_fd_to_mbuf(fd);
1478
1479                 if (unlikely(fd->simple.frc)) {
1480                         /* TODO Parse SEC errors */
1481                         DPAA2_SEC_ERR("SEC returned Error - %x",
1482                                       fd->simple.frc);
1483                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1484                 } else {
1485                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1486                 }
1487
1488                 num_rx++;
1489                 dq_storage++;
1490         } /* End of Packet Rx loop */
1491
1492         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1493
1494         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1495         /*Return the total number of packets received to DPAA2 app*/
1496         return num_rx;
1497 }
1498
1499 /** Release queue pair */
1500 static int
1501 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1502 {
1503         struct dpaa2_sec_qp *qp =
1504                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1505
1506         PMD_INIT_FUNC_TRACE();
1507
1508         if (qp->rx_vq.q_storage) {
1509                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1510                 rte_free(qp->rx_vq.q_storage);
1511         }
1512         rte_free(qp);
1513
1514         dev->data->queue_pairs[queue_pair_id] = NULL;
1515
1516         return 0;
1517 }
1518
1519 /** Setup a queue pair */
1520 static int
1521 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1522                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1523                 __rte_unused int socket_id)
1524 {
1525         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1526         struct dpaa2_sec_qp *qp;
1527         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1528         struct dpseci_rx_queue_cfg cfg;
1529         int32_t retcode;
1530
1531         PMD_INIT_FUNC_TRACE();
1532
1533         /* If qp is already in use free ring memory and qp metadata. */
1534         if (dev->data->queue_pairs[qp_id] != NULL) {
1535                 DPAA2_SEC_INFO("QP already setup");
1536                 return 0;
1537         }
1538
1539         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1540                     dev, qp_id, qp_conf);
1541
1542         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1543
1544         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1545                         RTE_CACHE_LINE_SIZE);
1546         if (!qp) {
1547                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1548                 return -1;
1549         }
1550
1551         qp->rx_vq.crypto_data = dev->data;
1552         qp->tx_vq.crypto_data = dev->data;
1553         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1554                 sizeof(struct queue_storage_info_t),
1555                 RTE_CACHE_LINE_SIZE);
1556         if (!qp->rx_vq.q_storage) {
1557                 DPAA2_SEC_ERR("malloc failed for q_storage");
1558                 return -1;
1559         }
1560         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1561
1562         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1563                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1564                 return -1;
1565         }
1566
1567         dev->data->queue_pairs[qp_id] = qp;
1568
1569         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1570         cfg.user_ctx = (size_t)(&qp->rx_vq);
1571         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1572                                       qp_id, &cfg);
1573         return retcode;
1574 }
1575
1576 /** Return the number of allocated queue pairs */
1577 static uint32_t
1578 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1579 {
1580         PMD_INIT_FUNC_TRACE();
1581
1582         return dev->data->nb_queue_pairs;
1583 }
1584
1585 /** Returns the size of the aesni gcm session structure */
1586 static unsigned int
1587 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1588 {
1589         PMD_INIT_FUNC_TRACE();
1590
1591         return sizeof(dpaa2_sec_session);
1592 }
1593
1594 static int
1595 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1596                       struct rte_crypto_sym_xform *xform,
1597                       dpaa2_sec_session *session)
1598 {
1599         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1600         struct alginfo cipherdata;
1601         int bufsize, i;
1602         struct ctxt_priv *priv;
1603         struct sec_flow_context *flc;
1604
1605         PMD_INIT_FUNC_TRACE();
1606
1607         /* For SEC CIPHER only one descriptor is required. */
1608         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1609                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1610                         RTE_CACHE_LINE_SIZE);
1611         if (priv == NULL) {
1612                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1613                 return -1;
1614         }
1615
1616         priv->fle_pool = dev_priv->fle_pool;
1617
1618         flc = &priv->flc_desc[0].flc;
1619
1620         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1621                         RTE_CACHE_LINE_SIZE);
1622         if (session->cipher_key.data == NULL) {
1623                 DPAA2_SEC_ERR("No Memory for cipher key");
1624                 rte_free(priv);
1625                 return -1;
1626         }
1627         session->cipher_key.length = xform->cipher.key.length;
1628
1629         memcpy(session->cipher_key.data, xform->cipher.key.data,
1630                xform->cipher.key.length);
1631         cipherdata.key = (size_t)session->cipher_key.data;
1632         cipherdata.keylen = session->cipher_key.length;
1633         cipherdata.key_enc_flags = 0;
1634         cipherdata.key_type = RTA_DATA_IMM;
1635
1636         /* Set IV parameters */
1637         session->iv.offset = xform->cipher.iv.offset;
1638         session->iv.length = xform->cipher.iv.length;
1639
1640         switch (xform->cipher.algo) {
1641         case RTE_CRYPTO_CIPHER_AES_CBC:
1642                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1643                 cipherdata.algmode = OP_ALG_AAI_CBC;
1644                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1645                 break;
1646         case RTE_CRYPTO_CIPHER_3DES_CBC:
1647                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1648                 cipherdata.algmode = OP_ALG_AAI_CBC;
1649                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1650                 break;
1651         case RTE_CRYPTO_CIPHER_AES_CTR:
1652                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1653                 cipherdata.algmode = OP_ALG_AAI_CTR;
1654                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1655                 break;
1656         case RTE_CRYPTO_CIPHER_3DES_CTR:
1657         case RTE_CRYPTO_CIPHER_AES_ECB:
1658         case RTE_CRYPTO_CIPHER_3DES_ECB:
1659         case RTE_CRYPTO_CIPHER_AES_XTS:
1660         case RTE_CRYPTO_CIPHER_AES_F8:
1661         case RTE_CRYPTO_CIPHER_ARC4:
1662         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1663         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1664         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1665         case RTE_CRYPTO_CIPHER_NULL:
1666                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1667                         xform->cipher.algo);
1668                 goto error_out;
1669         default:
1670                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1671                         xform->cipher.algo);
1672                 goto error_out;
1673         }
1674         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1675                                 DIR_ENC : DIR_DEC;
1676
1677         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1678                                         &cipherdata, NULL, session->iv.length,
1679                                         session->dir);
1680         if (bufsize < 0) {
1681                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1682                 goto error_out;
1683         }
1684
1685         flc->word1_sdl = (uint8_t)bufsize;
1686         session->ctxt = priv;
1687
1688         for (i = 0; i < bufsize; i++)
1689                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1690
1691         return 0;
1692
1693 error_out:
1694         rte_free(session->cipher_key.data);
1695         rte_free(priv);
1696         return -1;
1697 }
1698
1699 static int
1700 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1701                     struct rte_crypto_sym_xform *xform,
1702                     dpaa2_sec_session *session)
1703 {
1704         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1705         struct alginfo authdata;
1706         int bufsize, i;
1707         struct ctxt_priv *priv;
1708         struct sec_flow_context *flc;
1709
1710         PMD_INIT_FUNC_TRACE();
1711
1712         /* For SEC AUTH three descriptors are required for various stages */
1713         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1714                         sizeof(struct ctxt_priv) + 3 *
1715                         sizeof(struct sec_flc_desc),
1716                         RTE_CACHE_LINE_SIZE);
1717         if (priv == NULL) {
1718                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1719                 return -1;
1720         }
1721
1722         priv->fle_pool = dev_priv->fle_pool;
1723         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1724
1725         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1726                         RTE_CACHE_LINE_SIZE);
1727         if (session->auth_key.data == NULL) {
1728                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1729                 rte_free(priv);
1730                 return -1;
1731         }
1732         session->auth_key.length = xform->auth.key.length;
1733
1734         memcpy(session->auth_key.data, xform->auth.key.data,
1735                xform->auth.key.length);
1736         authdata.key = (size_t)session->auth_key.data;
1737         authdata.keylen = session->auth_key.length;
1738         authdata.key_enc_flags = 0;
1739         authdata.key_type = RTA_DATA_IMM;
1740
1741         session->digest_length = xform->auth.digest_length;
1742
1743         switch (xform->auth.algo) {
1744         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1745                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1746                 authdata.algmode = OP_ALG_AAI_HMAC;
1747                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1748                 break;
1749         case RTE_CRYPTO_AUTH_MD5_HMAC:
1750                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1751                 authdata.algmode = OP_ALG_AAI_HMAC;
1752                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1753                 break;
1754         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1755                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1756                 authdata.algmode = OP_ALG_AAI_HMAC;
1757                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1758                 break;
1759         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1760                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1761                 authdata.algmode = OP_ALG_AAI_HMAC;
1762                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1763                 break;
1764         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1765                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1766                 authdata.algmode = OP_ALG_AAI_HMAC;
1767                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1768                 break;
1769         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1770                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1771                 authdata.algmode = OP_ALG_AAI_HMAC;
1772                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1773                 break;
1774         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1775         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1776         case RTE_CRYPTO_AUTH_NULL:
1777         case RTE_CRYPTO_AUTH_SHA1:
1778         case RTE_CRYPTO_AUTH_SHA256:
1779         case RTE_CRYPTO_AUTH_SHA512:
1780         case RTE_CRYPTO_AUTH_SHA224:
1781         case RTE_CRYPTO_AUTH_SHA384:
1782         case RTE_CRYPTO_AUTH_MD5:
1783         case RTE_CRYPTO_AUTH_AES_GMAC:
1784         case RTE_CRYPTO_AUTH_KASUMI_F9:
1785         case RTE_CRYPTO_AUTH_AES_CMAC:
1786         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1787         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1788                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1789                               xform->auth.algo);
1790                 goto error_out;
1791         default:
1792                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1793                               xform->auth.algo);
1794                 goto error_out;
1795         }
1796         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1797                                 DIR_ENC : DIR_DEC;
1798
1799         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1800                                    1, 0, SHR_NEVER, &authdata, !session->dir,
1801                                    session->digest_length);
1802         if (bufsize < 0) {
1803                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1804                 goto error_out;
1805         }
1806
1807         flc->word1_sdl = (uint8_t)bufsize;
1808         session->ctxt = priv;
1809         for (i = 0; i < bufsize; i++)
1810                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1811                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1812
1813
1814         return 0;
1815
1816 error_out:
1817         rte_free(session->auth_key.data);
1818         rte_free(priv);
1819         return -1;
1820 }
1821
1822 static int
1823 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1824                     struct rte_crypto_sym_xform *xform,
1825                     dpaa2_sec_session *session)
1826 {
1827         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1828         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1829         struct alginfo aeaddata;
1830         int bufsize, i;
1831         struct ctxt_priv *priv;
1832         struct sec_flow_context *flc;
1833         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1834         int err;
1835
1836         PMD_INIT_FUNC_TRACE();
1837
1838         /* Set IV parameters */
1839         session->iv.offset = aead_xform->iv.offset;
1840         session->iv.length = aead_xform->iv.length;
1841         session->ctxt_type = DPAA2_SEC_AEAD;
1842
1843         /* For SEC AEAD only one descriptor is required */
1844         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1845                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1846                         RTE_CACHE_LINE_SIZE);
1847         if (priv == NULL) {
1848                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1849                 return -1;
1850         }
1851
1852         priv->fle_pool = dev_priv->fle_pool;
1853         flc = &priv->flc_desc[0].flc;
1854
1855         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1856                                                RTE_CACHE_LINE_SIZE);
1857         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1858                 DPAA2_SEC_ERR("No Memory for aead key");
1859                 rte_free(priv);
1860                 return -1;
1861         }
1862         memcpy(session->aead_key.data, aead_xform->key.data,
1863                aead_xform->key.length);
1864
1865         session->digest_length = aead_xform->digest_length;
1866         session->aead_key.length = aead_xform->key.length;
1867         ctxt->auth_only_len = aead_xform->aad_length;
1868
1869         aeaddata.key = (size_t)session->aead_key.data;
1870         aeaddata.keylen = session->aead_key.length;
1871         aeaddata.key_enc_flags = 0;
1872         aeaddata.key_type = RTA_DATA_IMM;
1873
1874         switch (aead_xform->algo) {
1875         case RTE_CRYPTO_AEAD_AES_GCM:
1876                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1877                 aeaddata.algmode = OP_ALG_AAI_GCM;
1878                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1879                 break;
1880         case RTE_CRYPTO_AEAD_AES_CCM:
1881                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1882                               aead_xform->algo);
1883                 goto error_out;
1884         default:
1885                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1886                               aead_xform->algo);
1887                 goto error_out;
1888         }
1889         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1890                                 DIR_ENC : DIR_DEC;
1891
1892         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1893         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1894                                MIN_JOB_DESC_SIZE,
1895                                (unsigned int *)priv->flc_desc[0].desc,
1896                                &priv->flc_desc[0].desc[1], 1);
1897
1898         if (err < 0) {
1899                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1900                 goto error_out;
1901         }
1902         if (priv->flc_desc[0].desc[1] & 1) {
1903                 aeaddata.key_type = RTA_DATA_IMM;
1904         } else {
1905                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1906                 aeaddata.key_type = RTA_DATA_PTR;
1907         }
1908         priv->flc_desc[0].desc[0] = 0;
1909         priv->flc_desc[0].desc[1] = 0;
1910
1911         if (session->dir == DIR_ENC)
1912                 bufsize = cnstr_shdsc_gcm_encap(
1913                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1914                                 &aeaddata, session->iv.length,
1915                                 session->digest_length);
1916         else
1917                 bufsize = cnstr_shdsc_gcm_decap(
1918                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1919                                 &aeaddata, session->iv.length,
1920                                 session->digest_length);
1921         if (bufsize < 0) {
1922                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1923                 goto error_out;
1924         }
1925
1926         flc->word1_sdl = (uint8_t)bufsize;
1927         session->ctxt = priv;
1928         for (i = 0; i < bufsize; i++)
1929                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1930                             i, priv->flc_desc[0].desc[i]);
1931
1932         return 0;
1933
1934 error_out:
1935         rte_free(session->aead_key.data);
1936         rte_free(priv);
1937         return -1;
1938 }
1939
1940
1941 static int
1942 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1943                     struct rte_crypto_sym_xform *xform,
1944                     dpaa2_sec_session *session)
1945 {
1946         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1947         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1948         struct alginfo authdata, cipherdata;
1949         int bufsize, i;
1950         struct ctxt_priv *priv;
1951         struct sec_flow_context *flc;
1952         struct rte_crypto_cipher_xform *cipher_xform;
1953         struct rte_crypto_auth_xform *auth_xform;
1954         int err;
1955
1956         PMD_INIT_FUNC_TRACE();
1957
1958         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1959                 cipher_xform = &xform->cipher;
1960                 auth_xform = &xform->next->auth;
1961                 session->ctxt_type =
1962                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1963                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1964         } else {
1965                 cipher_xform = &xform->next->cipher;
1966                 auth_xform = &xform->auth;
1967                 session->ctxt_type =
1968                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1969                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1970         }
1971
1972         /* Set IV parameters */
1973         session->iv.offset = cipher_xform->iv.offset;
1974         session->iv.length = cipher_xform->iv.length;
1975
1976         /* For SEC AEAD only one descriptor is required */
1977         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1978                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1979                         RTE_CACHE_LINE_SIZE);
1980         if (priv == NULL) {
1981                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1982                 return -1;
1983         }
1984
1985         priv->fle_pool = dev_priv->fle_pool;
1986         flc = &priv->flc_desc[0].flc;
1987
1988         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1989                                                RTE_CACHE_LINE_SIZE);
1990         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1991                 DPAA2_SEC_ERR("No Memory for cipher key");
1992                 rte_free(priv);
1993                 return -1;
1994         }
1995         session->cipher_key.length = cipher_xform->key.length;
1996         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1997                                              RTE_CACHE_LINE_SIZE);
1998         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1999                 DPAA2_SEC_ERR("No Memory for auth key");
2000                 rte_free(session->cipher_key.data);
2001                 rte_free(priv);
2002                 return -1;
2003         }
2004         session->auth_key.length = auth_xform->key.length;
2005         memcpy(session->cipher_key.data, cipher_xform->key.data,
2006                cipher_xform->key.length);
2007         memcpy(session->auth_key.data, auth_xform->key.data,
2008                auth_xform->key.length);
2009
2010         authdata.key = (size_t)session->auth_key.data;
2011         authdata.keylen = session->auth_key.length;
2012         authdata.key_enc_flags = 0;
2013         authdata.key_type = RTA_DATA_IMM;
2014
2015         session->digest_length = auth_xform->digest_length;
2016
2017         switch (auth_xform->algo) {
2018         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2019                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2020                 authdata.algmode = OP_ALG_AAI_HMAC;
2021                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2022                 break;
2023         case RTE_CRYPTO_AUTH_MD5_HMAC:
2024                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2025                 authdata.algmode = OP_ALG_AAI_HMAC;
2026                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2027                 break;
2028         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2029                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2030                 authdata.algmode = OP_ALG_AAI_HMAC;
2031                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2032                 break;
2033         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2034                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2035                 authdata.algmode = OP_ALG_AAI_HMAC;
2036                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2037                 break;
2038         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2039                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2040                 authdata.algmode = OP_ALG_AAI_HMAC;
2041                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2042                 break;
2043         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2044                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2045                 authdata.algmode = OP_ALG_AAI_HMAC;
2046                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2047                 break;
2048         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2049         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2050         case RTE_CRYPTO_AUTH_NULL:
2051         case RTE_CRYPTO_AUTH_SHA1:
2052         case RTE_CRYPTO_AUTH_SHA256:
2053         case RTE_CRYPTO_AUTH_SHA512:
2054         case RTE_CRYPTO_AUTH_SHA224:
2055         case RTE_CRYPTO_AUTH_SHA384:
2056         case RTE_CRYPTO_AUTH_MD5:
2057         case RTE_CRYPTO_AUTH_AES_GMAC:
2058         case RTE_CRYPTO_AUTH_KASUMI_F9:
2059         case RTE_CRYPTO_AUTH_AES_CMAC:
2060         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2061         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2062                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2063                               auth_xform->algo);
2064                 goto error_out;
2065         default:
2066                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2067                               auth_xform->algo);
2068                 goto error_out;
2069         }
2070         cipherdata.key = (size_t)session->cipher_key.data;
2071         cipherdata.keylen = session->cipher_key.length;
2072         cipherdata.key_enc_flags = 0;
2073         cipherdata.key_type = RTA_DATA_IMM;
2074
2075         switch (cipher_xform->algo) {
2076         case RTE_CRYPTO_CIPHER_AES_CBC:
2077                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2078                 cipherdata.algmode = OP_ALG_AAI_CBC;
2079                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2080                 break;
2081         case RTE_CRYPTO_CIPHER_3DES_CBC:
2082                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2083                 cipherdata.algmode = OP_ALG_AAI_CBC;
2084                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2085                 break;
2086         case RTE_CRYPTO_CIPHER_AES_CTR:
2087                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2088                 cipherdata.algmode = OP_ALG_AAI_CTR;
2089                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2090                 break;
2091         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2092         case RTE_CRYPTO_CIPHER_NULL:
2093         case RTE_CRYPTO_CIPHER_3DES_ECB:
2094         case RTE_CRYPTO_CIPHER_AES_ECB:
2095         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2096                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2097                               cipher_xform->algo);
2098                 goto error_out;
2099         default:
2100                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2101                               cipher_xform->algo);
2102                 goto error_out;
2103         }
2104         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2105                                 DIR_ENC : DIR_DEC;
2106
2107         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2108         priv->flc_desc[0].desc[1] = authdata.keylen;
2109         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2110                                MIN_JOB_DESC_SIZE,
2111                                (unsigned int *)priv->flc_desc[0].desc,
2112                                &priv->flc_desc[0].desc[2], 2);
2113
2114         if (err < 0) {
2115                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2116                 goto error_out;
2117         }
2118         if (priv->flc_desc[0].desc[2] & 1) {
2119                 cipherdata.key_type = RTA_DATA_IMM;
2120         } else {
2121                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2122                 cipherdata.key_type = RTA_DATA_PTR;
2123         }
2124         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2125                 authdata.key_type = RTA_DATA_IMM;
2126         } else {
2127                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2128                 authdata.key_type = RTA_DATA_PTR;
2129         }
2130         priv->flc_desc[0].desc[0] = 0;
2131         priv->flc_desc[0].desc[1] = 0;
2132         priv->flc_desc[0].desc[2] = 0;
2133
2134         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2135                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2136                                               0, SHR_SERIAL,
2137                                               &cipherdata, &authdata,
2138                                               session->iv.length,
2139                                               ctxt->auth_only_len,
2140                                               session->digest_length,
2141                                               session->dir);
2142                 if (bufsize < 0) {
2143                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2144                         goto error_out;
2145                 }
2146         } else {
2147                 DPAA2_SEC_ERR("Hash before cipher not supported");
2148                 goto error_out;
2149         }
2150
2151         flc->word1_sdl = (uint8_t)bufsize;
2152         session->ctxt = priv;
2153         for (i = 0; i < bufsize; i++)
2154                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2155                             i, priv->flc_desc[0].desc[i]);
2156
2157         return 0;
2158
2159 error_out:
2160         rte_free(session->cipher_key.data);
2161         rte_free(session->auth_key.data);
2162         rte_free(priv);
2163         return -1;
2164 }
2165
2166 static int
2167 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2168                             struct rte_crypto_sym_xform *xform, void *sess)
2169 {
2170         dpaa2_sec_session *session = sess;
2171         int ret;
2172
2173         PMD_INIT_FUNC_TRACE();
2174
2175         if (unlikely(sess == NULL)) {
2176                 DPAA2_SEC_ERR("Invalid session struct");
2177                 return -1;
2178         }
2179
2180         memset(session, 0, sizeof(dpaa2_sec_session));
2181         /* Default IV length = 0 */
2182         session->iv.length = 0;
2183
2184         /* Cipher Only */
2185         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2186                 session->ctxt_type = DPAA2_SEC_CIPHER;
2187                 ret = dpaa2_sec_cipher_init(dev, xform, session);
2188
2189         /* Authentication Only */
2190         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2191                    xform->next == NULL) {
2192                 session->ctxt_type = DPAA2_SEC_AUTH;
2193                 ret = dpaa2_sec_auth_init(dev, xform, session);
2194
2195         /* Cipher then Authenticate */
2196         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2197                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2198                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2199                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2200
2201         /* Authenticate then Cipher */
2202         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2203                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2204                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2205                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2206
2207         /* AEAD operation for AES-GCM kind of Algorithms */
2208         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2209                    xform->next == NULL) {
2210                 ret = dpaa2_sec_aead_init(dev, xform, session);
2211
2212         } else {
2213                 DPAA2_SEC_ERR("Invalid crypto type");
2214                 return -EINVAL;
2215         }
2216
2217         return ret;
2218 }
2219
2220 static int
2221 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2222                         dpaa2_sec_session *session,
2223                         struct alginfo *aeaddata)
2224 {
2225         PMD_INIT_FUNC_TRACE();
2226
2227         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2228                                                RTE_CACHE_LINE_SIZE);
2229         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2230                 DPAA2_SEC_ERR("No Memory for aead key");
2231                 return -1;
2232         }
2233         memcpy(session->aead_key.data, aead_xform->key.data,
2234                aead_xform->key.length);
2235
2236         session->digest_length = aead_xform->digest_length;
2237         session->aead_key.length = aead_xform->key.length;
2238
2239         aeaddata->key = (size_t)session->aead_key.data;
2240         aeaddata->keylen = session->aead_key.length;
2241         aeaddata->key_enc_flags = 0;
2242         aeaddata->key_type = RTA_DATA_IMM;
2243
2244         switch (aead_xform->algo) {
2245         case RTE_CRYPTO_AEAD_AES_GCM:
2246                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2247                 aeaddata->algmode = OP_ALG_AAI_GCM;
2248                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2249                 break;
2250         case RTE_CRYPTO_AEAD_AES_CCM:
2251                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2252                 aeaddata->algmode = OP_ALG_AAI_CCM;
2253                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2254                 break;
2255         default:
2256                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2257                               aead_xform->algo);
2258                 return -1;
2259         }
2260         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2261                                 DIR_ENC : DIR_DEC;
2262
2263         return 0;
2264 }
2265
2266 static int
2267 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2268         struct rte_crypto_auth_xform *auth_xform,
2269         dpaa2_sec_session *session,
2270         struct alginfo *cipherdata,
2271         struct alginfo *authdata)
2272 {
2273         if (cipher_xform) {
2274                 session->cipher_key.data = rte_zmalloc(NULL,
2275                                                        cipher_xform->key.length,
2276                                                        RTE_CACHE_LINE_SIZE);
2277                 if (session->cipher_key.data == NULL &&
2278                                 cipher_xform->key.length > 0) {
2279                         DPAA2_SEC_ERR("No Memory for cipher key");
2280                         return -ENOMEM;
2281                 }
2282
2283                 session->cipher_key.length = cipher_xform->key.length;
2284                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2285                                 cipher_xform->key.length);
2286                 session->cipher_alg = cipher_xform->algo;
2287         } else {
2288                 session->cipher_key.data = NULL;
2289                 session->cipher_key.length = 0;
2290                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2291         }
2292
2293         if (auth_xform) {
2294                 session->auth_key.data = rte_zmalloc(NULL,
2295                                                 auth_xform->key.length,
2296                                                 RTE_CACHE_LINE_SIZE);
2297                 if (session->auth_key.data == NULL &&
2298                                 auth_xform->key.length > 0) {
2299                         DPAA2_SEC_ERR("No Memory for auth key");
2300                         return -ENOMEM;
2301                 }
2302                 session->auth_key.length = auth_xform->key.length;
2303                 memcpy(session->auth_key.data, auth_xform->key.data,
2304                                 auth_xform->key.length);
2305                 session->auth_alg = auth_xform->algo;
2306         } else {
2307                 session->auth_key.data = NULL;
2308                 session->auth_key.length = 0;
2309                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2310         }
2311
2312         authdata->key = (size_t)session->auth_key.data;
2313         authdata->keylen = session->auth_key.length;
2314         authdata->key_enc_flags = 0;
2315         authdata->key_type = RTA_DATA_IMM;
2316         switch (session->auth_alg) {
2317         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2318                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2319                 authdata->algmode = OP_ALG_AAI_HMAC;
2320                 break;
2321         case RTE_CRYPTO_AUTH_MD5_HMAC:
2322                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2323                 authdata->algmode = OP_ALG_AAI_HMAC;
2324                 break;
2325         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2326                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2327                 authdata->algmode = OP_ALG_AAI_HMAC;
2328                 break;
2329         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2330                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2331                 authdata->algmode = OP_ALG_AAI_HMAC;
2332                 break;
2333         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2334                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2335                 authdata->algmode = OP_ALG_AAI_HMAC;
2336                 break;
2337         case RTE_CRYPTO_AUTH_AES_CMAC:
2338                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2339                 break;
2340         case RTE_CRYPTO_AUTH_NULL:
2341                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2342                 break;
2343         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2344         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2345         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2346         case RTE_CRYPTO_AUTH_SHA1:
2347         case RTE_CRYPTO_AUTH_SHA256:
2348         case RTE_CRYPTO_AUTH_SHA512:
2349         case RTE_CRYPTO_AUTH_SHA224:
2350         case RTE_CRYPTO_AUTH_SHA384:
2351         case RTE_CRYPTO_AUTH_MD5:
2352         case RTE_CRYPTO_AUTH_AES_GMAC:
2353         case RTE_CRYPTO_AUTH_KASUMI_F9:
2354         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2355         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2356                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2357                               session->auth_alg);
2358                 return -1;
2359         default:
2360                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2361                               session->auth_alg);
2362                 return -1;
2363         }
2364         cipherdata->key = (size_t)session->cipher_key.data;
2365         cipherdata->keylen = session->cipher_key.length;
2366         cipherdata->key_enc_flags = 0;
2367         cipherdata->key_type = RTA_DATA_IMM;
2368
2369         switch (session->cipher_alg) {
2370         case RTE_CRYPTO_CIPHER_AES_CBC:
2371                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2372                 cipherdata->algmode = OP_ALG_AAI_CBC;
2373                 break;
2374         case RTE_CRYPTO_CIPHER_3DES_CBC:
2375                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2376                 cipherdata->algmode = OP_ALG_AAI_CBC;
2377                 break;
2378         case RTE_CRYPTO_CIPHER_AES_CTR:
2379                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2380                 cipherdata->algmode = OP_ALG_AAI_CTR;
2381                 break;
2382         case RTE_CRYPTO_CIPHER_NULL:
2383                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2384                 break;
2385         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2386         case RTE_CRYPTO_CIPHER_3DES_ECB:
2387         case RTE_CRYPTO_CIPHER_AES_ECB:
2388         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2389                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2390                               session->cipher_alg);
2391                 return -1;
2392         default:
2393                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2394                               session->cipher_alg);
2395                 return -1;
2396         }
2397
2398         return 0;
2399 }
2400
2401 #ifdef RTE_LIBRTE_SECURITY_TEST
2402 static uint8_t aes_cbc_iv[] = {
2403         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2404         0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2405 #endif
2406
2407 static int
2408 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2409                             struct rte_security_session_conf *conf,
2410                             void *sess)
2411 {
2412         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2413         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2414         struct rte_crypto_auth_xform *auth_xform = NULL;
2415         struct rte_crypto_aead_xform *aead_xform = NULL;
2416         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2417         struct ctxt_priv *priv;
2418         struct ipsec_encap_pdb encap_pdb;
2419         struct ipsec_decap_pdb decap_pdb;
2420         struct alginfo authdata, cipherdata;
2421         int bufsize;
2422         struct sec_flow_context *flc;
2423         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2424         int ret = -1;
2425
2426         PMD_INIT_FUNC_TRACE();
2427
2428         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2429                                 sizeof(struct ctxt_priv) +
2430                                 sizeof(struct sec_flc_desc),
2431                                 RTE_CACHE_LINE_SIZE);
2432
2433         if (priv == NULL) {
2434                 DPAA2_SEC_ERR("No memory for priv CTXT");
2435                 return -ENOMEM;
2436         }
2437
2438         priv->fle_pool = dev_priv->fle_pool;
2439         flc = &priv->flc_desc[0].flc;
2440
2441         memset(session, 0, sizeof(dpaa2_sec_session));
2442
2443         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2444                 cipher_xform = &conf->crypto_xform->cipher;
2445                 if (conf->crypto_xform->next)
2446                         auth_xform = &conf->crypto_xform->next->auth;
2447                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2448                                         session, &cipherdata, &authdata);
2449         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2450                 auth_xform = &conf->crypto_xform->auth;
2451                 if (conf->crypto_xform->next)
2452                         cipher_xform = &conf->crypto_xform->next->cipher;
2453                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2454                                         session, &cipherdata, &authdata);
2455         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2456                 aead_xform = &conf->crypto_xform->aead;
2457                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2458                                         session, &cipherdata);
2459         } else {
2460                 DPAA2_SEC_ERR("XFORM not specified");
2461                 ret = -EINVAL;
2462                 goto out;
2463         }
2464         if (ret) {
2465                 DPAA2_SEC_ERR("Failed to process xform");
2466                 goto out;
2467         }
2468
2469         session->ctxt_type = DPAA2_SEC_IPSEC;
2470         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2471                 uint8_t *hdr = NULL;
2472                 struct ip ip4_hdr;
2473                 struct rte_ipv6_hdr ip6_hdr;
2474
2475                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2476                 /* For Sec Proto only one descriptor is required. */
2477                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2478                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2479                         PDBOPTS_ESP_OIHI_PDB_INL |
2480                         PDBOPTS_ESP_IVSRC |
2481                         PDBHMO_ESP_ENCAP_DTTL |
2482                         PDBHMO_ESP_SNR;
2483                 if (ipsec_xform->options.esn)
2484                         encap_pdb.options |= PDBOPTS_ESP_ESN;
2485                 encap_pdb.spi = ipsec_xform->spi;
2486                 session->dir = DIR_ENC;
2487                 if (ipsec_xform->tunnel.type ==
2488                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2489                         encap_pdb.ip_hdr_len = sizeof(struct ip);
2490                         ip4_hdr.ip_v = IPVERSION;
2491                         ip4_hdr.ip_hl = 5;
2492                         ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2493                         ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2494                         ip4_hdr.ip_id = 0;
2495                         ip4_hdr.ip_off = 0;
2496                         ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2497                         ip4_hdr.ip_p = IPPROTO_ESP;
2498                         ip4_hdr.ip_sum = 0;
2499                         ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2500                         ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2501                         ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2502                                         &ip4_hdr, sizeof(struct ip));
2503                         hdr = (uint8_t *)&ip4_hdr;
2504                 } else if (ipsec_xform->tunnel.type ==
2505                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2506                         ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2507                                 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2508                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2509                                         RTE_IPV6_HDR_TC_SHIFT) &
2510                                         RTE_IPV6_HDR_TC_MASK) |
2511                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2512                                         RTE_IPV6_HDR_FL_SHIFT) &
2513                                         RTE_IPV6_HDR_FL_MASK));
2514                         /* Payload length will be updated by HW */
2515                         ip6_hdr.payload_len = 0;
2516                         ip6_hdr.hop_limits =
2517                                         ipsec_xform->tunnel.ipv6.hlimit;
2518                         ip6_hdr.proto = (ipsec_xform->proto ==
2519                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2520                                         IPPROTO_ESP : IPPROTO_AH;
2521                         memcpy(&ip6_hdr.src_addr,
2522                                 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2523                         memcpy(&ip6_hdr.dst_addr,
2524                                 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2525                         encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2526                         hdr = (uint8_t *)&ip6_hdr;
2527                 }
2528
2529                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2530                                 1, 0, SHR_SERIAL, &encap_pdb,
2531                                 hdr, &cipherdata, &authdata);
2532         } else if (ipsec_xform->direction ==
2533                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2534                 flc->dhr = SEC_FLC_DHR_INBOUND;
2535                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2536                 decap_pdb.options = sizeof(struct ip) << 16;
2537                 if (ipsec_xform->options.esn)
2538                         decap_pdb.options |= PDBOPTS_ESP_ESN;
2539                 decap_pdb.options = (ipsec_xform->tunnel.type ==
2540                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2541                                 sizeof(struct ip) << 16 :
2542                                 sizeof(struct rte_ipv6_hdr) << 16;
2543                 session->dir = DIR_DEC;
2544                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2545                                 1, 0, SHR_SERIAL,
2546                                 &decap_pdb, &cipherdata, &authdata);
2547         } else
2548                 goto out;
2549
2550         if (bufsize < 0) {
2551                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2552                 goto out;
2553         }
2554
2555         flc->word1_sdl = (uint8_t)bufsize;
2556
2557         /* Enable the stashing control bit */
2558         DPAA2_SET_FLC_RSC(flc);
2559         flc->word2_rflc_31_0 = lower_32_bits(
2560                         (size_t)&(((struct dpaa2_sec_qp *)
2561                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2562         flc->word3_rflc_63_32 = upper_32_bits(
2563                         (size_t)&(((struct dpaa2_sec_qp *)
2564                         dev->data->queue_pairs[0])->rx_vq));
2565
2566         /* Set EWS bit i.e. enable write-safe */
2567         DPAA2_SET_FLC_EWS(flc);
2568         /* Set BS = 1 i.e reuse input buffers as output buffers */
2569         DPAA2_SET_FLC_REUSE_BS(flc);
2570         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2571         DPAA2_SET_FLC_REUSE_FF(flc);
2572
2573         session->ctxt = priv;
2574
2575         return 0;
2576 out:
2577         rte_free(session->auth_key.data);
2578         rte_free(session->cipher_key.data);
2579         rte_free(priv);
2580         return ret;
2581 }
2582
2583 static int
2584 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2585                            struct rte_security_session_conf *conf,
2586                            void *sess)
2587 {
2588         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2589         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2590         struct rte_crypto_auth_xform *auth_xform = NULL;
2591         struct rte_crypto_cipher_xform *cipher_xform;
2592         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2593         struct ctxt_priv *priv;
2594         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2595         struct alginfo authdata, cipherdata;
2596         struct alginfo *p_authdata = NULL;
2597         int bufsize = -1;
2598         struct sec_flow_context *flc;
2599 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2600         int swap = true;
2601 #else
2602         int swap = false;
2603 #endif
2604
2605         PMD_INIT_FUNC_TRACE();
2606
2607         memset(session, 0, sizeof(dpaa2_sec_session));
2608
2609         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2610                                 sizeof(struct ctxt_priv) +
2611                                 sizeof(struct sec_flc_desc),
2612                                 RTE_CACHE_LINE_SIZE);
2613
2614         if (priv == NULL) {
2615                 DPAA2_SEC_ERR("No memory for priv CTXT");
2616                 return -ENOMEM;
2617         }
2618
2619         priv->fle_pool = dev_priv->fle_pool;
2620         flc = &priv->flc_desc[0].flc;
2621
2622         /* find xfrm types */
2623         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2624                 cipher_xform = &xform->cipher;
2625         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2626                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2627                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2628                 cipher_xform = &xform->cipher;
2629                 auth_xform = &xform->next->auth;
2630         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2631                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2632                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2633                 cipher_xform = &xform->next->cipher;
2634                 auth_xform = &xform->auth;
2635         } else {
2636                 DPAA2_SEC_ERR("Invalid crypto type");
2637                 return -EINVAL;
2638         }
2639
2640         session->ctxt_type = DPAA2_SEC_PDCP;
2641         if (cipher_xform) {
2642                 session->cipher_key.data = rte_zmalloc(NULL,
2643                                                cipher_xform->key.length,
2644                                                RTE_CACHE_LINE_SIZE);
2645                 if (session->cipher_key.data == NULL &&
2646                                 cipher_xform->key.length > 0) {
2647                         DPAA2_SEC_ERR("No Memory for cipher key");
2648                         rte_free(priv);
2649                         return -ENOMEM;
2650                 }
2651                 session->cipher_key.length = cipher_xform->key.length;
2652                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2653                         cipher_xform->key.length);
2654                 session->dir =
2655                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2656                                         DIR_ENC : DIR_DEC;
2657                 session->cipher_alg = cipher_xform->algo;
2658         } else {
2659                 session->cipher_key.data = NULL;
2660                 session->cipher_key.length = 0;
2661                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2662                 session->dir = DIR_ENC;
2663         }
2664
2665         session->pdcp.domain = pdcp_xform->domain;
2666         session->pdcp.bearer = pdcp_xform->bearer;
2667         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2668         session->pdcp.sn_size = pdcp_xform->sn_size;
2669         session->pdcp.hfn = pdcp_xform->hfn;
2670         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2671         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2672         /* hfv ovd offset location is stored in iv.offset value*/
2673         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2674
2675         cipherdata.key = (size_t)session->cipher_key.data;
2676         cipherdata.keylen = session->cipher_key.length;
2677         cipherdata.key_enc_flags = 0;
2678         cipherdata.key_type = RTA_DATA_IMM;
2679
2680         switch (session->cipher_alg) {
2681         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2682                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2683                 break;
2684         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2685                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2686                 break;
2687         case RTE_CRYPTO_CIPHER_AES_CTR:
2688                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2689                 break;
2690         case RTE_CRYPTO_CIPHER_NULL:
2691                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2692                 break;
2693         default:
2694                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2695                               session->cipher_alg);
2696                 goto out;
2697         }
2698
2699         if (auth_xform) {
2700                 session->auth_key.data = rte_zmalloc(NULL,
2701                                                      auth_xform->key.length,
2702                                                      RTE_CACHE_LINE_SIZE);
2703                 if (!session->auth_key.data &&
2704                     auth_xform->key.length > 0) {
2705                         DPAA2_SEC_ERR("No Memory for auth key");
2706                         rte_free(session->cipher_key.data);
2707                         rte_free(priv);
2708                         return -ENOMEM;
2709                 }
2710                 session->auth_key.length = auth_xform->key.length;
2711                 memcpy(session->auth_key.data, auth_xform->key.data,
2712                        auth_xform->key.length);
2713                 session->auth_alg = auth_xform->algo;
2714         } else {
2715                 session->auth_key.data = NULL;
2716                 session->auth_key.length = 0;
2717                 session->auth_alg = 0;
2718         }
2719         authdata.key = (size_t)session->auth_key.data;
2720         authdata.keylen = session->auth_key.length;
2721         authdata.key_enc_flags = 0;
2722         authdata.key_type = RTA_DATA_IMM;
2723
2724         if (session->auth_alg) {
2725                 switch (session->auth_alg) {
2726                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2727                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2728                         break;
2729                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2730                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2731                         break;
2732                 case RTE_CRYPTO_AUTH_AES_CMAC:
2733                         authdata.algtype = PDCP_AUTH_TYPE_AES;
2734                         break;
2735                 case RTE_CRYPTO_AUTH_NULL:
2736                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
2737                         break;
2738                 default:
2739                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2740                                       session->auth_alg);
2741                         goto out;
2742                 }
2743
2744                 p_authdata = &authdata;
2745         } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2746                 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
2747                 goto out;
2748         }
2749
2750         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2751                 if (session->dir == DIR_ENC)
2752                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2753                                         priv->flc_desc[0].desc, 1, swap,
2754                                         pdcp_xform->hfn,
2755                                         session->pdcp.sn_size,
2756                                         pdcp_xform->bearer,
2757                                         pdcp_xform->pkt_dir,
2758                                         pdcp_xform->hfn_threshold,
2759                                         &cipherdata, &authdata,
2760                                         0);
2761                 else if (session->dir == DIR_DEC)
2762                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2763                                         priv->flc_desc[0].desc, 1, swap,
2764                                         pdcp_xform->hfn,
2765                                         session->pdcp.sn_size,
2766                                         pdcp_xform->bearer,
2767                                         pdcp_xform->pkt_dir,
2768                                         pdcp_xform->hfn_threshold,
2769                                         &cipherdata, &authdata,
2770                                         0);
2771         } else {
2772                 if (session->dir == DIR_ENC)
2773                         bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2774                                         priv->flc_desc[0].desc, 1, swap,
2775                                         session->pdcp.sn_size,
2776                                         pdcp_xform->hfn,
2777                                         pdcp_xform->bearer,
2778                                         pdcp_xform->pkt_dir,
2779                                         pdcp_xform->hfn_threshold,
2780                                         &cipherdata, p_authdata, 0);
2781                 else if (session->dir == DIR_DEC)
2782                         bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2783                                         priv->flc_desc[0].desc, 1, swap,
2784                                         session->pdcp.sn_size,
2785                                         pdcp_xform->hfn,
2786                                         pdcp_xform->bearer,
2787                                         pdcp_xform->pkt_dir,
2788                                         pdcp_xform->hfn_threshold,
2789                                         &cipherdata, p_authdata, 0);
2790         }
2791
2792         if (bufsize < 0) {
2793                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2794                 goto out;
2795         }
2796
2797         /* Enable the stashing control bit */
2798         DPAA2_SET_FLC_RSC(flc);
2799         flc->word2_rflc_31_0 = lower_32_bits(
2800                         (size_t)&(((struct dpaa2_sec_qp *)
2801                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2802         flc->word3_rflc_63_32 = upper_32_bits(
2803                         (size_t)&(((struct dpaa2_sec_qp *)
2804                         dev->data->queue_pairs[0])->rx_vq));
2805
2806         flc->word1_sdl = (uint8_t)bufsize;
2807
2808         /* Set EWS bit i.e. enable write-safe */
2809         DPAA2_SET_FLC_EWS(flc);
2810         /* Set BS = 1 i.e reuse input buffers as output buffers */
2811         DPAA2_SET_FLC_REUSE_BS(flc);
2812         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2813         DPAA2_SET_FLC_REUSE_FF(flc);
2814
2815         session->ctxt = priv;
2816
2817         return 0;
2818 out:
2819         rte_free(session->auth_key.data);
2820         rte_free(session->cipher_key.data);
2821         rte_free(priv);
2822         return -1;
2823 }
2824
2825 static int
2826 dpaa2_sec_security_session_create(void *dev,
2827                                   struct rte_security_session_conf *conf,
2828                                   struct rte_security_session *sess,
2829                                   struct rte_mempool *mempool)
2830 {
2831         void *sess_private_data;
2832         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2833         int ret;
2834
2835         if (rte_mempool_get(mempool, &sess_private_data)) {
2836                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2837                 return -ENOMEM;
2838         }
2839
2840         switch (conf->protocol) {
2841         case RTE_SECURITY_PROTOCOL_IPSEC:
2842                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2843                                 sess_private_data);
2844                 break;
2845         case RTE_SECURITY_PROTOCOL_MACSEC:
2846                 return -ENOTSUP;
2847         case RTE_SECURITY_PROTOCOL_PDCP:
2848                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2849                                 sess_private_data);
2850                 break;
2851         default:
2852                 return -EINVAL;
2853         }
2854         if (ret != 0) {
2855                 DPAA2_SEC_ERR("Failed to configure session parameters");
2856                 /* Return session to mempool */
2857                 rte_mempool_put(mempool, sess_private_data);
2858                 return ret;
2859         }
2860
2861         set_sec_session_private_data(sess, sess_private_data);
2862
2863         return ret;
2864 }
2865
2866 /** Clear the memory of session so it doesn't leave key material behind */
2867 static int
2868 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2869                 struct rte_security_session *sess)
2870 {
2871         PMD_INIT_FUNC_TRACE();
2872         void *sess_priv = get_sec_session_private_data(sess);
2873
2874         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2875
2876         if (sess_priv) {
2877                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2878
2879                 rte_free(s->ctxt);
2880                 rte_free(s->cipher_key.data);
2881                 rte_free(s->auth_key.data);
2882                 memset(s, 0, sizeof(dpaa2_sec_session));
2883                 set_sec_session_private_data(sess, NULL);
2884                 rte_mempool_put(sess_mp, sess_priv);
2885         }
2886         return 0;
2887 }
2888
2889 static int
2890 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2891                 struct rte_crypto_sym_xform *xform,
2892                 struct rte_cryptodev_sym_session *sess,
2893                 struct rte_mempool *mempool)
2894 {
2895         void *sess_private_data;
2896         int ret;
2897
2898         if (rte_mempool_get(mempool, &sess_private_data)) {
2899                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2900                 return -ENOMEM;
2901         }
2902
2903         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2904         if (ret != 0) {
2905                 DPAA2_SEC_ERR("Failed to configure session parameters");
2906                 /* Return session to mempool */
2907                 rte_mempool_put(mempool, sess_private_data);
2908                 return ret;
2909         }
2910
2911         set_sym_session_private_data(sess, dev->driver_id,
2912                 sess_private_data);
2913
2914         return 0;
2915 }
2916
2917 /** Clear the memory of session so it doesn't leave key material behind */
2918 static void
2919 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2920                 struct rte_cryptodev_sym_session *sess)
2921 {
2922         PMD_INIT_FUNC_TRACE();
2923         uint8_t index = dev->driver_id;
2924         void *sess_priv = get_sym_session_private_data(sess, index);
2925         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2926
2927         if (sess_priv) {
2928                 rte_free(s->ctxt);
2929                 rte_free(s->cipher_key.data);
2930                 rte_free(s->auth_key.data);
2931                 memset(s, 0, sizeof(dpaa2_sec_session));
2932                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2933                 set_sym_session_private_data(sess, index, NULL);
2934                 rte_mempool_put(sess_mp, sess_priv);
2935         }
2936 }
2937
2938 static int
2939 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2940                         struct rte_cryptodev_config *config __rte_unused)
2941 {
2942         PMD_INIT_FUNC_TRACE();
2943
2944         return 0;
2945 }
2946
2947 static int
2948 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2949 {
2950         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2951         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2952         struct dpseci_attr attr;
2953         struct dpaa2_queue *dpaa2_q;
2954         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2955                                         dev->data->queue_pairs;
2956         struct dpseci_rx_queue_attr rx_attr;
2957         struct dpseci_tx_queue_attr tx_attr;
2958         int ret, i;
2959
2960         PMD_INIT_FUNC_TRACE();
2961
2962         memset(&attr, 0, sizeof(struct dpseci_attr));
2963
2964         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2965         if (ret) {
2966                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2967                               priv->hw_id);
2968                 goto get_attr_failure;
2969         }
2970         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2971         if (ret) {
2972                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2973                 goto get_attr_failure;
2974         }
2975         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2976                 dpaa2_q = &qp[i]->rx_vq;
2977                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2978                                     &rx_attr);
2979                 dpaa2_q->fqid = rx_attr.fqid;
2980                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2981         }
2982         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2983                 dpaa2_q = &qp[i]->tx_vq;
2984                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2985                                     &tx_attr);
2986                 dpaa2_q->fqid = tx_attr.fqid;
2987                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2988         }
2989
2990         return 0;
2991 get_attr_failure:
2992         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2993         return -1;
2994 }
2995
2996 static void
2997 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2998 {
2999         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3000         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3001         int ret;
3002
3003         PMD_INIT_FUNC_TRACE();
3004
3005         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3006         if (ret) {
3007                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3008                              priv->hw_id);
3009                 return;
3010         }
3011
3012         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3013         if (ret < 0) {
3014                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3015                 return;
3016         }
3017 }
3018
3019 static int
3020 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3021 {
3022         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3023         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3024         int ret;
3025
3026         PMD_INIT_FUNC_TRACE();
3027
3028         /* Function is reverse of dpaa2_sec_dev_init.
3029          * It does the following:
3030          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3031          * 2. Close the DPSECI device
3032          * 3. Free the allocated resources.
3033          */
3034
3035         /*Close the device at underlying layer*/
3036         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3037         if (ret) {
3038                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3039                 return -1;
3040         }
3041
3042         /*Free the allocated memory for ethernet private data and dpseci*/
3043         priv->hw = NULL;
3044         rte_free(dpseci);
3045
3046         return 0;
3047 }
3048
3049 static void
3050 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3051                         struct rte_cryptodev_info *info)
3052 {
3053         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3054
3055         PMD_INIT_FUNC_TRACE();
3056         if (info != NULL) {
3057                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3058                 info->feature_flags = dev->feature_flags;
3059                 info->capabilities = dpaa2_sec_capabilities;
3060                 /* No limit of number of sessions */
3061                 info->sym.max_nb_sessions = 0;
3062                 info->driver_id = cryptodev_driver_id;
3063         }
3064 }
3065
3066 static
3067 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3068                          struct rte_cryptodev_stats *stats)
3069 {
3070         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3071         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3072         struct dpseci_sec_counters counters = {0};
3073         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3074                                         dev->data->queue_pairs;
3075         int ret, i;
3076
3077         PMD_INIT_FUNC_TRACE();
3078         if (stats == NULL) {
3079                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3080                 return;
3081         }
3082         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3083                 if (qp[i] == NULL) {
3084                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3085                         continue;
3086                 }
3087
3088                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3089                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3090                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3091                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3092         }
3093
3094         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3095                                       &counters);
3096         if (ret) {
3097                 DPAA2_SEC_ERR("SEC counters failed");
3098         } else {
3099                 DPAA2_SEC_INFO("dpseci hardware stats:"
3100                             "\n\tNum of Requests Dequeued = %" PRIu64
3101                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3102                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3103                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3104                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3105                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3106                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3107                             counters.dequeued_requests,
3108                             counters.ob_enc_requests,
3109                             counters.ib_dec_requests,
3110                             counters.ob_enc_bytes,
3111                             counters.ob_prot_bytes,
3112                             counters.ib_dec_bytes,
3113                             counters.ib_valid_bytes);
3114         }
3115 }
3116
3117 static
3118 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3119 {
3120         int i;
3121         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3122                                    (dev->data->queue_pairs);
3123
3124         PMD_INIT_FUNC_TRACE();
3125
3126         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3127                 if (qp[i] == NULL) {
3128                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3129                         continue;
3130                 }
3131                 qp[i]->tx_vq.rx_pkts = 0;
3132                 qp[i]->tx_vq.tx_pkts = 0;
3133                 qp[i]->tx_vq.err_pkts = 0;
3134                 qp[i]->rx_vq.rx_pkts = 0;
3135                 qp[i]->rx_vq.tx_pkts = 0;
3136                 qp[i]->rx_vq.err_pkts = 0;
3137         }
3138 }
3139
3140 static void __attribute__((hot))
3141 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3142                                  const struct qbman_fd *fd,
3143                                  const struct qbman_result *dq,
3144                                  struct dpaa2_queue *rxq,
3145                                  struct rte_event *ev)
3146 {
3147         /* Prefetching mbuf */
3148         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3149                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3150
3151         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3152         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3153
3154         ev->flow_id = rxq->ev.flow_id;
3155         ev->sub_event_type = rxq->ev.sub_event_type;
3156         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3157         ev->op = RTE_EVENT_OP_NEW;
3158         ev->sched_type = rxq->ev.sched_type;
3159         ev->queue_id = rxq->ev.queue_id;
3160         ev->priority = rxq->ev.priority;
3161         ev->event_ptr = sec_fd_to_mbuf(fd);
3162
3163         qbman_swp_dqrr_consume(swp, dq);
3164 }
3165 static void
3166 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3167                                  const struct qbman_fd *fd,
3168                                  const struct qbman_result *dq,
3169                                  struct dpaa2_queue *rxq,
3170                                  struct rte_event *ev)
3171 {
3172         uint8_t dqrr_index;
3173         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3174         /* Prefetching mbuf */
3175         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3176                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3177
3178         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3179         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3180
3181         ev->flow_id = rxq->ev.flow_id;
3182         ev->sub_event_type = rxq->ev.sub_event_type;
3183         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3184         ev->op = RTE_EVENT_OP_NEW;
3185         ev->sched_type = rxq->ev.sched_type;
3186         ev->queue_id = rxq->ev.queue_id;
3187         ev->priority = rxq->ev.priority;
3188
3189         ev->event_ptr = sec_fd_to_mbuf(fd);
3190         dqrr_index = qbman_get_dqrr_idx(dq);
3191         crypto_op->sym->m_src->seqn = dqrr_index + 1;
3192         DPAA2_PER_LCORE_DQRR_SIZE++;
3193         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3194         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3195 }
3196
3197 int
3198 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3199                 int qp_id,
3200                 uint16_t dpcon_id,
3201                 const struct rte_event *event)
3202 {
3203         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3204         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3205         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3206         struct dpseci_rx_queue_cfg cfg;
3207         int ret;
3208
3209         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3210                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3211         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3212                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3213         else
3214                 return -EINVAL;
3215
3216         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3217         cfg.options = DPSECI_QUEUE_OPT_DEST;
3218         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3219         cfg.dest_cfg.dest_id = dpcon_id;
3220         cfg.dest_cfg.priority = event->priority;
3221
3222         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3223         cfg.user_ctx = (size_t)(qp);
3224         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3225                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3226                 cfg.order_preservation_en = 1;
3227         }
3228         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3229                                   qp_id, &cfg);
3230         if (ret) {
3231                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3232                 return ret;
3233         }
3234
3235         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3236
3237         return 0;
3238 }
3239
3240 int
3241 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3242                         int qp_id)
3243 {
3244         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3245         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3246         struct dpseci_rx_queue_cfg cfg;
3247         int ret;
3248
3249         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3250         cfg.options = DPSECI_QUEUE_OPT_DEST;
3251         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3252
3253         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3254                                   qp_id, &cfg);
3255         if (ret)
3256                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3257
3258         return ret;
3259 }
3260
3261 static struct rte_cryptodev_ops crypto_ops = {
3262         .dev_configure        = dpaa2_sec_dev_configure,
3263         .dev_start            = dpaa2_sec_dev_start,
3264         .dev_stop             = dpaa2_sec_dev_stop,
3265         .dev_close            = dpaa2_sec_dev_close,
3266         .dev_infos_get        = dpaa2_sec_dev_infos_get,
3267         .stats_get            = dpaa2_sec_stats_get,
3268         .stats_reset          = dpaa2_sec_stats_reset,
3269         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3270         .queue_pair_release   = dpaa2_sec_queue_pair_release,
3271         .queue_pair_count     = dpaa2_sec_queue_pair_count,
3272         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3273         .sym_session_configure    = dpaa2_sec_sym_session_configure,
3274         .sym_session_clear        = dpaa2_sec_sym_session_clear,
3275 };
3276
3277 static const struct rte_security_capability *
3278 dpaa2_sec_capabilities_get(void *device __rte_unused)
3279 {
3280         return dpaa2_sec_security_cap;
3281 }
3282
3283 static const struct rte_security_ops dpaa2_sec_security_ops = {
3284         .session_create = dpaa2_sec_security_session_create,
3285         .session_update = NULL,
3286         .session_stats_get = NULL,
3287         .session_destroy = dpaa2_sec_security_session_destroy,
3288         .set_pkt_metadata = NULL,
3289         .capabilities_get = dpaa2_sec_capabilities_get
3290 };
3291
3292 static int
3293 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3294 {
3295         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3296
3297         rte_free(dev->security_ctx);
3298
3299         rte_mempool_free(internals->fle_pool);
3300
3301         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3302                        dev->data->name, rte_socket_id());
3303
3304         return 0;
3305 }
3306
3307 static int
3308 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3309 {
3310         struct dpaa2_sec_dev_private *internals;
3311         struct rte_device *dev = cryptodev->device;
3312         struct rte_dpaa2_device *dpaa2_dev;
3313         struct rte_security_ctx *security_instance;
3314         struct fsl_mc_io *dpseci;
3315         uint16_t token;
3316         struct dpseci_attr attr;
3317         int retcode, hw_id;
3318         char str[30];
3319
3320         PMD_INIT_FUNC_TRACE();
3321         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3322         if (dpaa2_dev == NULL) {
3323                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3324                 return -1;
3325         }
3326         hw_id = dpaa2_dev->object_id;
3327
3328         cryptodev->driver_id = cryptodev_driver_id;
3329         cryptodev->dev_ops = &crypto_ops;
3330
3331         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3332         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3333         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3334                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3335                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3336                         RTE_CRYPTODEV_FF_SECURITY |
3337                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3338                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3339                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3340                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3341                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3342
3343         internals = cryptodev->data->dev_private;
3344
3345         /*
3346          * For secondary processes, we don't initialise any further as primary
3347          * has already done this work. Only check we don't need a different
3348          * RX function
3349          */
3350         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3351                 DPAA2_SEC_DEBUG("Device already init by primary process");
3352                 return 0;
3353         }
3354
3355         /* Initialize security_ctx only for primary process*/
3356         security_instance = rte_malloc("rte_security_instances_ops",
3357                                 sizeof(struct rte_security_ctx), 0);
3358         if (security_instance == NULL)
3359                 return -ENOMEM;
3360         security_instance->device = (void *)cryptodev;
3361         security_instance->ops = &dpaa2_sec_security_ops;
3362         security_instance->sess_cnt = 0;
3363         cryptodev->security_ctx = security_instance;
3364
3365         /*Open the rte device via MC and save the handle for further use*/
3366         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3367                                 sizeof(struct fsl_mc_io), 0);
3368         if (!dpseci) {
3369                 DPAA2_SEC_ERR(
3370                         "Error in allocating the memory for dpsec object");
3371                 return -1;
3372         }
3373         dpseci->regs = rte_mcp_ptr_list[0];
3374
3375         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3376         if (retcode != 0) {
3377                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3378                               retcode);
3379                 goto init_error;
3380         }
3381         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3382         if (retcode != 0) {
3383                 DPAA2_SEC_ERR(
3384                              "Cannot get dpsec device attributed: Error = %x",
3385                              retcode);
3386                 goto init_error;
3387         }
3388         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3389                         "dpsec-%u", hw_id);
3390
3391         internals->max_nb_queue_pairs = attr.num_tx_queues;
3392         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3393         internals->hw = dpseci;
3394         internals->token = token;
3395
3396         snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3397                         getpid(), cryptodev->data->dev_id);
3398         internals->fle_pool = rte_mempool_create((const char *)str,
3399                         FLE_POOL_NUM_BUFS,
3400                         FLE_POOL_BUF_SIZE,
3401                         FLE_POOL_CACHE_SIZE, 0,
3402                         NULL, NULL, NULL, NULL,
3403                         SOCKET_ID_ANY, 0);
3404         if (!internals->fle_pool) {
3405                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3406                 goto init_error;
3407         }
3408
3409         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3410         return 0;
3411
3412 init_error:
3413         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3414
3415         /* dpaa2_sec_uninit(crypto_dev_name); */
3416         return -EFAULT;
3417 }
3418
3419 static int
3420 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3421                           struct rte_dpaa2_device *dpaa2_dev)
3422 {
3423         struct rte_cryptodev *cryptodev;
3424         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3425
3426         int retval;
3427
3428         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3429                         dpaa2_dev->object_id);
3430
3431         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3432         if (cryptodev == NULL)
3433                 return -ENOMEM;
3434
3435         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3436                 cryptodev->data->dev_private = rte_zmalloc_socket(
3437                                         "cryptodev private structure",
3438                                         sizeof(struct dpaa2_sec_dev_private),
3439                                         RTE_CACHE_LINE_SIZE,
3440                                         rte_socket_id());
3441
3442                 if (cryptodev->data->dev_private == NULL)
3443                         rte_panic("Cannot allocate memzone for private "
3444                                   "device data");
3445         }
3446
3447         dpaa2_dev->cryptodev = cryptodev;
3448         cryptodev->device = &dpaa2_dev->device;
3449
3450         /* init user callbacks */
3451         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3452
3453         /* Invoke PMD device initialization function */
3454         retval = dpaa2_sec_dev_init(cryptodev);
3455         if (retval == 0)
3456                 return 0;
3457
3458         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3459                 rte_free(cryptodev->data->dev_private);
3460
3461         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3462
3463         return -ENXIO;
3464 }
3465
3466 static int
3467 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3468 {
3469         struct rte_cryptodev *cryptodev;
3470         int ret;
3471
3472         cryptodev = dpaa2_dev->cryptodev;
3473         if (cryptodev == NULL)
3474                 return -ENODEV;
3475
3476         ret = dpaa2_sec_uninit(cryptodev);
3477         if (ret)
3478                 return ret;
3479
3480         return rte_cryptodev_pmd_destroy(cryptodev);
3481 }
3482
3483 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3484         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3485         .drv_type = DPAA2_CRYPTO,
3486         .driver = {
3487                 .name = "DPAA2 SEC PMD"
3488         },
3489         .probe = cryptodev_dpaa2_sec_probe,
3490         .remove = cryptodev_dpaa2_sec_remove,
3491 };
3492
3493 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3494
3495 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3496 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3497                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3498
3499 RTE_INIT(dpaa2_sec_init_log)
3500 {
3501         /* Bus level logs */
3502         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3503         if (dpaa2_logtype_sec >= 0)
3504                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3505 }