crypto/dpaax_sec: support PDCP U-Plane with integrity
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2018 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35
36 /* Required types */
37 typedef uint64_t        dma_addr_t;
38
39 /* RTA header files */
40 #include <hw/desc/ipsec.h>
41 #include <hw/desc/pdcp.h>
42 #include <hw/desc/algo.h>
43
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52
53 #define NO_PREFETCH 0
54 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
55 #define FLE_POOL_NUM_BUFS       32000
56 #define FLE_POOL_BUF_SIZE       256
57 #define FLE_POOL_CACHE_SIZE     512
58 #define FLE_SG_MEM_SIZE         2048
59 #define SEC_FLC_DHR_OUTBOUND    -114
60 #define SEC_FLC_DHR_INBOUND     0
61
62 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
63
64 static uint8_t cryptodev_driver_id;
65
66 int dpaa2_logtype_sec;
67
68 static inline int
69 build_proto_compound_fd(dpaa2_sec_session *sess,
70                struct rte_crypto_op *op,
71                struct qbman_fd *fd, uint16_t bpid)
72 {
73         struct rte_crypto_sym_op *sym_op = op->sym;
74         struct ctxt_priv *priv = sess->ctxt;
75         struct qbman_fle *fle, *ip_fle, *op_fle;
76         struct sec_flow_context *flc;
77         struct rte_mbuf *src_mbuf = sym_op->m_src;
78         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
79         int retval;
80
81         if (!dst_mbuf)
82                 dst_mbuf = src_mbuf;
83
84         /* Save the shared descriptor */
85         flc = &priv->flc_desc[0].flc;
86
87         /* we are using the first FLE entry to store Mbuf */
88         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
89         if (retval) {
90                 DPAA2_SEC_ERR("Memory alloc failed");
91                 return -1;
92         }
93         memset(fle, 0, FLE_POOL_BUF_SIZE);
94         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96
97         op_fle = fle + 1;
98         ip_fle = fle + 2;
99
100         if (likely(bpid < MAX_BPID)) {
101                 DPAA2_SET_FD_BPID(fd, bpid);
102                 DPAA2_SET_FLE_BPID(op_fle, bpid);
103                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
104         } else {
105                 DPAA2_SET_FD_IVP(fd);
106                 DPAA2_SET_FLE_IVP(op_fle);
107                 DPAA2_SET_FLE_IVP(ip_fle);
108         }
109
110         /* Configure FD as a FRAME LIST */
111         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
112         DPAA2_SET_FD_COMPOUND_FMT(fd);
113         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
114
115         /* Configure Output FLE with dst mbuf data  */
116         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
117         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
118         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
119
120         /* Configure Input FLE with src mbuf data */
121         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
122         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
123         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
124
125         DPAA2_SET_FD_LEN(fd, ip_fle->length);
126         DPAA2_SET_FLE_FIN(ip_fle);
127
128 #ifdef ENABLE_HFN_OVERRIDE
129         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
130                 /*enable HFN override override */
131                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
132                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
133                 DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
134         }
135 #endif
136
137         return 0;
138
139 }
140
141 static inline int
142 build_proto_fd(dpaa2_sec_session *sess,
143                struct rte_crypto_op *op,
144                struct qbman_fd *fd, uint16_t bpid)
145 {
146         struct rte_crypto_sym_op *sym_op = op->sym;
147         if (sym_op->m_dst)
148                 return build_proto_compound_fd(sess, op, fd, bpid);
149
150         struct ctxt_priv *priv = sess->ctxt;
151         struct sec_flow_context *flc;
152         struct rte_mbuf *mbuf = sym_op->m_src;
153
154         if (likely(bpid < MAX_BPID))
155                 DPAA2_SET_FD_BPID(fd, bpid);
156         else
157                 DPAA2_SET_FD_IVP(fd);
158
159         /* Save the shared descriptor */
160         flc = &priv->flc_desc[0].flc;
161
162         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
163         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
164         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
165         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
166
167         /* save physical address of mbuf */
168         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
169         mbuf->buf_iova = (size_t)op;
170
171         return 0;
172 }
173
174 static inline int
175 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
176                  struct rte_crypto_op *op,
177                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
178 {
179         struct rte_crypto_sym_op *sym_op = op->sym;
180         struct ctxt_priv *priv = sess->ctxt;
181         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
182         struct sec_flow_context *flc;
183         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
184         int icv_len = sess->digest_length;
185         uint8_t *old_icv;
186         struct rte_mbuf *mbuf;
187         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
188                         sess->iv.offset);
189
190         PMD_INIT_FUNC_TRACE();
191
192         if (sym_op->m_dst)
193                 mbuf = sym_op->m_dst;
194         else
195                 mbuf = sym_op->m_src;
196
197         /* first FLE entry used to store mbuf and session ctxt */
198         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
199                         RTE_CACHE_LINE_SIZE);
200         if (unlikely(!fle)) {
201                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
202                 return -1;
203         }
204         memset(fle, 0, FLE_SG_MEM_SIZE);
205         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
206         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
207
208         op_fle = fle + 1;
209         ip_fle = fle + 2;
210         sge = fle + 3;
211
212         /* Save the shared descriptor */
213         flc = &priv->flc_desc[0].flc;
214
215         /* Configure FD as a FRAME LIST */
216         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
217         DPAA2_SET_FD_COMPOUND_FMT(fd);
218         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
219
220         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
221                    "iv-len=%d data_off: 0x%x\n",
222                    sym_op->aead.data.offset,
223                    sym_op->aead.data.length,
224                    sess->digest_length,
225                    sess->iv.length,
226                    sym_op->m_src->data_off);
227
228         /* Configure Output FLE with Scatter/Gather Entry */
229         DPAA2_SET_FLE_SG_EXT(op_fle);
230         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
231
232         if (auth_only_len)
233                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
234
235         op_fle->length = (sess->dir == DIR_ENC) ?
236                         (sym_op->aead.data.length + icv_len + auth_only_len) :
237                         sym_op->aead.data.length + auth_only_len;
238
239         /* Configure Output SGE for Encap/Decap */
240         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
241         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off +
242                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
243         sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
244
245         mbuf = mbuf->next;
246         /* o/p segs */
247         while (mbuf) {
248                 sge++;
249                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
250                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
251                 sge->length = mbuf->data_len;
252                 mbuf = mbuf->next;
253         }
254         sge->length -= icv_len;
255
256         if (sess->dir == DIR_ENC) {
257                 sge++;
258                 DPAA2_SET_FLE_ADDR(sge,
259                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
260                 sge->length = icv_len;
261         }
262         DPAA2_SET_FLE_FIN(sge);
263
264         sge++;
265         mbuf = sym_op->m_src;
266
267         /* Configure Input FLE with Scatter/Gather Entry */
268         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
269         DPAA2_SET_FLE_SG_EXT(ip_fle);
270         DPAA2_SET_FLE_FIN(ip_fle);
271         ip_fle->length = (sess->dir == DIR_ENC) ?
272                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
273                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
274                  icv_len);
275
276         /* Configure Input SGE for Encap/Decap */
277         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
278         sge->length = sess->iv.length;
279
280         sge++;
281         if (auth_only_len) {
282                 DPAA2_SET_FLE_ADDR(sge,
283                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
284                 sge->length = auth_only_len;
285                 sge++;
286         }
287
288         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
289         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
290                                 mbuf->data_off);
291         sge->length = mbuf->data_len - sym_op->aead.data.offset;
292
293         mbuf = mbuf->next;
294         /* i/p segs */
295         while (mbuf) {
296                 sge++;
297                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
298                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
299                 sge->length = mbuf->data_len;
300                 mbuf = mbuf->next;
301         }
302
303         if (sess->dir == DIR_DEC) {
304                 sge++;
305                 old_icv = (uint8_t *)(sge + 1);
306                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
307                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
308                 sge->length = icv_len;
309         }
310
311         DPAA2_SET_FLE_FIN(sge);
312         if (auth_only_len) {
313                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
314                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
315         }
316         DPAA2_SET_FD_LEN(fd, ip_fle->length);
317
318         return 0;
319 }
320
321 static inline int
322 build_authenc_gcm_fd(dpaa2_sec_session *sess,
323                      struct rte_crypto_op *op,
324                      struct qbman_fd *fd, uint16_t bpid)
325 {
326         struct rte_crypto_sym_op *sym_op = op->sym;
327         struct ctxt_priv *priv = sess->ctxt;
328         struct qbman_fle *fle, *sge;
329         struct sec_flow_context *flc;
330         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
331         int icv_len = sess->digest_length, retval;
332         uint8_t *old_icv;
333         struct rte_mbuf *dst;
334         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
335                         sess->iv.offset);
336
337         PMD_INIT_FUNC_TRACE();
338
339         if (sym_op->m_dst)
340                 dst = sym_op->m_dst;
341         else
342                 dst = sym_op->m_src;
343
344         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
345          * Currently we donot know which FLE has the mbuf stored.
346          * So while retreiving we can go back 1 FLE from the FD -ADDR
347          * to get the MBUF Addr from the previous FLE.
348          * We can have a better approach to use the inline Mbuf
349          */
350         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
351         if (retval) {
352                 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
353                 return -1;
354         }
355         memset(fle, 0, FLE_POOL_BUF_SIZE);
356         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
357         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
358         fle = fle + 1;
359         sge = fle + 2;
360         if (likely(bpid < MAX_BPID)) {
361                 DPAA2_SET_FD_BPID(fd, bpid);
362                 DPAA2_SET_FLE_BPID(fle, bpid);
363                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
364                 DPAA2_SET_FLE_BPID(sge, bpid);
365                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
366                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
367                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
368         } else {
369                 DPAA2_SET_FD_IVP(fd);
370                 DPAA2_SET_FLE_IVP(fle);
371                 DPAA2_SET_FLE_IVP((fle + 1));
372                 DPAA2_SET_FLE_IVP(sge);
373                 DPAA2_SET_FLE_IVP((sge + 1));
374                 DPAA2_SET_FLE_IVP((sge + 2));
375                 DPAA2_SET_FLE_IVP((sge + 3));
376         }
377
378         /* Save the shared descriptor */
379         flc = &priv->flc_desc[0].flc;
380         /* Configure FD as a FRAME LIST */
381         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
382         DPAA2_SET_FD_COMPOUND_FMT(fd);
383         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
384
385         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
386                    "iv-len=%d data_off: 0x%x\n",
387                    sym_op->aead.data.offset,
388                    sym_op->aead.data.length,
389                    sess->digest_length,
390                    sess->iv.length,
391                    sym_op->m_src->data_off);
392
393         /* Configure Output FLE with Scatter/Gather Entry */
394         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
395         if (auth_only_len)
396                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
397         fle->length = (sess->dir == DIR_ENC) ?
398                         (sym_op->aead.data.length + icv_len + auth_only_len) :
399                         sym_op->aead.data.length + auth_only_len;
400
401         DPAA2_SET_FLE_SG_EXT(fle);
402
403         /* Configure Output SGE for Encap/Decap */
404         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
405         DPAA2_SET_FLE_OFFSET(sge, dst->data_off +
406                         RTE_ALIGN_CEIL(auth_only_len, 16) - auth_only_len);
407         sge->length = sym_op->aead.data.length + auth_only_len;
408
409         if (sess->dir == DIR_ENC) {
410                 sge++;
411                 DPAA2_SET_FLE_ADDR(sge,
412                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
413                 sge->length = sess->digest_length;
414                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
415                                         sess->iv.length + auth_only_len));
416         }
417         DPAA2_SET_FLE_FIN(sge);
418
419         sge++;
420         fle++;
421
422         /* Configure Input FLE with Scatter/Gather Entry */
423         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
424         DPAA2_SET_FLE_SG_EXT(fle);
425         DPAA2_SET_FLE_FIN(fle);
426         fle->length = (sess->dir == DIR_ENC) ?
427                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
428                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
429                  sess->digest_length);
430
431         /* Configure Input SGE for Encap/Decap */
432         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
433         sge->length = sess->iv.length;
434         sge++;
435         if (auth_only_len) {
436                 DPAA2_SET_FLE_ADDR(sge,
437                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
438                 sge->length = auth_only_len;
439                 DPAA2_SET_FLE_BPID(sge, bpid);
440                 sge++;
441         }
442
443         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
444         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
445                                 sym_op->m_src->data_off);
446         sge->length = sym_op->aead.data.length;
447         if (sess->dir == DIR_DEC) {
448                 sge++;
449                 old_icv = (uint8_t *)(sge + 1);
450                 memcpy(old_icv, sym_op->aead.digest.data,
451                        sess->digest_length);
452                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
453                 sge->length = sess->digest_length;
454                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
455                                  sess->digest_length +
456                                  sess->iv.length +
457                                  auth_only_len));
458         }
459         DPAA2_SET_FLE_FIN(sge);
460
461         if (auth_only_len) {
462                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
463                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
464         }
465
466         return 0;
467 }
468
469 static inline int
470 build_authenc_sg_fd(dpaa2_sec_session *sess,
471                  struct rte_crypto_op *op,
472                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
473 {
474         struct rte_crypto_sym_op *sym_op = op->sym;
475         struct ctxt_priv *priv = sess->ctxt;
476         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
477         struct sec_flow_context *flc;
478         uint32_t auth_only_len = sym_op->auth.data.length -
479                                 sym_op->cipher.data.length;
480         int icv_len = sess->digest_length;
481         uint8_t *old_icv;
482         struct rte_mbuf *mbuf;
483         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
484                         sess->iv.offset);
485
486         PMD_INIT_FUNC_TRACE();
487
488         if (sym_op->m_dst)
489                 mbuf = sym_op->m_dst;
490         else
491                 mbuf = sym_op->m_src;
492
493         /* first FLE entry used to store mbuf and session ctxt */
494         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
495                         RTE_CACHE_LINE_SIZE);
496         if (unlikely(!fle)) {
497                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
498                 return -1;
499         }
500         memset(fle, 0, FLE_SG_MEM_SIZE);
501         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
502         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
503
504         op_fle = fle + 1;
505         ip_fle = fle + 2;
506         sge = fle + 3;
507
508         /* Save the shared descriptor */
509         flc = &priv->flc_desc[0].flc;
510
511         /* Configure FD as a FRAME LIST */
512         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
513         DPAA2_SET_FD_COMPOUND_FMT(fd);
514         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
515
516         DPAA2_SEC_DP_DEBUG(
517                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
518                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
519                 sym_op->auth.data.offset,
520                 sym_op->auth.data.length,
521                 sess->digest_length,
522                 sym_op->cipher.data.offset,
523                 sym_op->cipher.data.length,
524                 sess->iv.length,
525                 sym_op->m_src->data_off);
526
527         /* Configure Output FLE with Scatter/Gather Entry */
528         DPAA2_SET_FLE_SG_EXT(op_fle);
529         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
530
531         if (auth_only_len)
532                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
533
534         op_fle->length = (sess->dir == DIR_ENC) ?
535                         (sym_op->cipher.data.length + icv_len) :
536                         sym_op->cipher.data.length;
537
538         /* Configure Output SGE for Encap/Decap */
539         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
540         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
541         sge->length = mbuf->data_len - sym_op->auth.data.offset;
542
543         mbuf = mbuf->next;
544         /* o/p segs */
545         while (mbuf) {
546                 sge++;
547                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
548                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
549                 sge->length = mbuf->data_len;
550                 mbuf = mbuf->next;
551         }
552         sge->length -= icv_len;
553
554         if (sess->dir == DIR_ENC) {
555                 sge++;
556                 DPAA2_SET_FLE_ADDR(sge,
557                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
558                 sge->length = icv_len;
559         }
560         DPAA2_SET_FLE_FIN(sge);
561
562         sge++;
563         mbuf = sym_op->m_src;
564
565         /* Configure Input FLE with Scatter/Gather Entry */
566         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
567         DPAA2_SET_FLE_SG_EXT(ip_fle);
568         DPAA2_SET_FLE_FIN(ip_fle);
569         ip_fle->length = (sess->dir == DIR_ENC) ?
570                         (sym_op->auth.data.length + sess->iv.length) :
571                         (sym_op->auth.data.length + sess->iv.length +
572                          icv_len);
573
574         /* Configure Input SGE for Encap/Decap */
575         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
576         sge->length = sess->iv.length;
577
578         sge++;
579         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
580         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
581                                 mbuf->data_off);
582         sge->length = mbuf->data_len - sym_op->auth.data.offset;
583
584         mbuf = mbuf->next;
585         /* i/p segs */
586         while (mbuf) {
587                 sge++;
588                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
589                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
590                 sge->length = mbuf->data_len;
591                 mbuf = mbuf->next;
592         }
593         sge->length -= icv_len;
594
595         if (sess->dir == DIR_DEC) {
596                 sge++;
597                 old_icv = (uint8_t *)(sge + 1);
598                 memcpy(old_icv, sym_op->auth.digest.data,
599                        icv_len);
600                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
601                 sge->length = icv_len;
602         }
603
604         DPAA2_SET_FLE_FIN(sge);
605         if (auth_only_len) {
606                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
607                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
608         }
609         DPAA2_SET_FD_LEN(fd, ip_fle->length);
610
611         return 0;
612 }
613
614 static inline int
615 build_authenc_fd(dpaa2_sec_session *sess,
616                  struct rte_crypto_op *op,
617                  struct qbman_fd *fd, uint16_t bpid)
618 {
619         struct rte_crypto_sym_op *sym_op = op->sym;
620         struct ctxt_priv *priv = sess->ctxt;
621         struct qbman_fle *fle, *sge;
622         struct sec_flow_context *flc;
623         uint32_t auth_only_len = sym_op->auth.data.length -
624                                 sym_op->cipher.data.length;
625         int icv_len = sess->digest_length, retval;
626         uint8_t *old_icv;
627         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
628                         sess->iv.offset);
629         struct rte_mbuf *dst;
630
631         PMD_INIT_FUNC_TRACE();
632
633         if (sym_op->m_dst)
634                 dst = sym_op->m_dst;
635         else
636                 dst = sym_op->m_src;
637
638         /* we are using the first FLE entry to store Mbuf.
639          * Currently we donot know which FLE has the mbuf stored.
640          * So while retreiving we can go back 1 FLE from the FD -ADDR
641          * to get the MBUF Addr from the previous FLE.
642          * We can have a better approach to use the inline Mbuf
643          */
644         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
645         if (retval) {
646                 DPAA2_SEC_ERR("Memory alloc failed for SGE");
647                 return -1;
648         }
649         memset(fle, 0, FLE_POOL_BUF_SIZE);
650         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
651         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
652         fle = fle + 1;
653         sge = fle + 2;
654         if (likely(bpid < MAX_BPID)) {
655                 DPAA2_SET_FD_BPID(fd, bpid);
656                 DPAA2_SET_FLE_BPID(fle, bpid);
657                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
658                 DPAA2_SET_FLE_BPID(sge, bpid);
659                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
660                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
661                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
662         } else {
663                 DPAA2_SET_FD_IVP(fd);
664                 DPAA2_SET_FLE_IVP(fle);
665                 DPAA2_SET_FLE_IVP((fle + 1));
666                 DPAA2_SET_FLE_IVP(sge);
667                 DPAA2_SET_FLE_IVP((sge + 1));
668                 DPAA2_SET_FLE_IVP((sge + 2));
669                 DPAA2_SET_FLE_IVP((sge + 3));
670         }
671
672         /* Save the shared descriptor */
673         flc = &priv->flc_desc[0].flc;
674         /* Configure FD as a FRAME LIST */
675         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
676         DPAA2_SET_FD_COMPOUND_FMT(fd);
677         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
678
679         DPAA2_SEC_DP_DEBUG(
680                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
681                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
682                 sym_op->auth.data.offset,
683                 sym_op->auth.data.length,
684                 sess->digest_length,
685                 sym_op->cipher.data.offset,
686                 sym_op->cipher.data.length,
687                 sess->iv.length,
688                 sym_op->m_src->data_off);
689
690         /* Configure Output FLE with Scatter/Gather Entry */
691         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
692         if (auth_only_len)
693                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
694         fle->length = (sess->dir == DIR_ENC) ?
695                         (sym_op->cipher.data.length + icv_len) :
696                         sym_op->cipher.data.length;
697
698         DPAA2_SET_FLE_SG_EXT(fle);
699
700         /* Configure Output SGE for Encap/Decap */
701         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
702         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
703                                 dst->data_off);
704         sge->length = sym_op->cipher.data.length;
705
706         if (sess->dir == DIR_ENC) {
707                 sge++;
708                 DPAA2_SET_FLE_ADDR(sge,
709                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
710                 sge->length = sess->digest_length;
711                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
712                                         sess->iv.length));
713         }
714         DPAA2_SET_FLE_FIN(sge);
715
716         sge++;
717         fle++;
718
719         /* Configure Input FLE with Scatter/Gather Entry */
720         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
721         DPAA2_SET_FLE_SG_EXT(fle);
722         DPAA2_SET_FLE_FIN(fle);
723         fle->length = (sess->dir == DIR_ENC) ?
724                         (sym_op->auth.data.length + sess->iv.length) :
725                         (sym_op->auth.data.length + sess->iv.length +
726                          sess->digest_length);
727
728         /* Configure Input SGE for Encap/Decap */
729         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
730         sge->length = sess->iv.length;
731         sge++;
732
733         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
734         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
735                                 sym_op->m_src->data_off);
736         sge->length = sym_op->auth.data.length;
737         if (sess->dir == DIR_DEC) {
738                 sge++;
739                 old_icv = (uint8_t *)(sge + 1);
740                 memcpy(old_icv, sym_op->auth.digest.data,
741                        sess->digest_length);
742                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
743                 sge->length = sess->digest_length;
744                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
745                                  sess->digest_length +
746                                  sess->iv.length));
747         }
748         DPAA2_SET_FLE_FIN(sge);
749         if (auth_only_len) {
750                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
751                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
752         }
753         return 0;
754 }
755
756 static inline int build_auth_sg_fd(
757                 dpaa2_sec_session *sess,
758                 struct rte_crypto_op *op,
759                 struct qbman_fd *fd,
760                 __rte_unused uint16_t bpid)
761 {
762         struct rte_crypto_sym_op *sym_op = op->sym;
763         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
764         struct sec_flow_context *flc;
765         struct ctxt_priv *priv = sess->ctxt;
766         uint8_t *old_digest;
767         struct rte_mbuf *mbuf;
768
769         PMD_INIT_FUNC_TRACE();
770
771         mbuf = sym_op->m_src;
772         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
773                         RTE_CACHE_LINE_SIZE);
774         if (unlikely(!fle)) {
775                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
776                 return -1;
777         }
778         memset(fle, 0, FLE_SG_MEM_SIZE);
779         /* first FLE entry used to store mbuf and session ctxt */
780         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
781         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
782         op_fle = fle + 1;
783         ip_fle = fle + 2;
784         sge = fle + 3;
785
786         flc = &priv->flc_desc[DESC_INITFINAL].flc;
787         /* sg FD */
788         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
789         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
790         DPAA2_SET_FD_COMPOUND_FMT(fd);
791
792         /* o/p fle */
793         DPAA2_SET_FLE_ADDR(op_fle,
794                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
795         op_fle->length = sess->digest_length;
796
797         /* i/p fle */
798         DPAA2_SET_FLE_SG_EXT(ip_fle);
799         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
800         /* i/p 1st seg */
801         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
802         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
803         sge->length = mbuf->data_len - sym_op->auth.data.offset;
804
805         /* i/p segs */
806         mbuf = mbuf->next;
807         while (mbuf) {
808                 sge++;
809                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
810                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
811                 sge->length = mbuf->data_len;
812                 mbuf = mbuf->next;
813         }
814         if (sess->dir == DIR_ENC) {
815                 /* Digest calculation case */
816                 sge->length -= sess->digest_length;
817                 ip_fle->length = sym_op->auth.data.length;
818         } else {
819                 /* Digest verification case */
820                 sge++;
821                 old_digest = (uint8_t *)(sge + 1);
822                 rte_memcpy(old_digest, sym_op->auth.digest.data,
823                            sess->digest_length);
824                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
825                 sge->length = sess->digest_length;
826                 ip_fle->length = sym_op->auth.data.length +
827                                 sess->digest_length;
828         }
829         DPAA2_SET_FLE_FIN(sge);
830         DPAA2_SET_FLE_FIN(ip_fle);
831         DPAA2_SET_FD_LEN(fd, ip_fle->length);
832
833         return 0;
834 }
835
836 static inline int
837 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
838               struct qbman_fd *fd, uint16_t bpid)
839 {
840         struct rte_crypto_sym_op *sym_op = op->sym;
841         struct qbman_fle *fle, *sge;
842         struct sec_flow_context *flc;
843         struct ctxt_priv *priv = sess->ctxt;
844         uint8_t *old_digest;
845         int retval;
846
847         PMD_INIT_FUNC_TRACE();
848
849         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
850         if (retval) {
851                 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
852                 return -1;
853         }
854         memset(fle, 0, FLE_POOL_BUF_SIZE);
855         /* TODO we are using the first FLE entry to store Mbuf.
856          * Currently we donot know which FLE has the mbuf stored.
857          * So while retreiving we can go back 1 FLE from the FD -ADDR
858          * to get the MBUF Addr from the previous FLE.
859          * We can have a better approach to use the inline Mbuf
860          */
861         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
862         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
863         fle = fle + 1;
864
865         if (likely(bpid < MAX_BPID)) {
866                 DPAA2_SET_FD_BPID(fd, bpid);
867                 DPAA2_SET_FLE_BPID(fle, bpid);
868                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
869         } else {
870                 DPAA2_SET_FD_IVP(fd);
871                 DPAA2_SET_FLE_IVP(fle);
872                 DPAA2_SET_FLE_IVP((fle + 1));
873         }
874         flc = &priv->flc_desc[DESC_INITFINAL].flc;
875         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
876
877         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
878         fle->length = sess->digest_length;
879
880         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
881         DPAA2_SET_FD_COMPOUND_FMT(fd);
882         fle++;
883
884         if (sess->dir == DIR_ENC) {
885                 DPAA2_SET_FLE_ADDR(fle,
886                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
887                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
888                                      sym_op->m_src->data_off);
889                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
890                 fle->length = sym_op->auth.data.length;
891         } else {
892                 sge = fle + 2;
893                 DPAA2_SET_FLE_SG_EXT(fle);
894                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
895
896                 if (likely(bpid < MAX_BPID)) {
897                         DPAA2_SET_FLE_BPID(sge, bpid);
898                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
899                 } else {
900                         DPAA2_SET_FLE_IVP(sge);
901                         DPAA2_SET_FLE_IVP((sge + 1));
902                 }
903                 DPAA2_SET_FLE_ADDR(sge,
904                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
905                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
906                                      sym_op->m_src->data_off);
907
908                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
909                                  sess->digest_length);
910                 sge->length = sym_op->auth.data.length;
911                 sge++;
912                 old_digest = (uint8_t *)(sge + 1);
913                 rte_memcpy(old_digest, sym_op->auth.digest.data,
914                            sess->digest_length);
915                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
916                 sge->length = sess->digest_length;
917                 fle->length = sym_op->auth.data.length +
918                                 sess->digest_length;
919                 DPAA2_SET_FLE_FIN(sge);
920         }
921         DPAA2_SET_FLE_FIN(fle);
922
923         return 0;
924 }
925
926 static int
927 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
928                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
929 {
930         struct rte_crypto_sym_op *sym_op = op->sym;
931         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
932         struct sec_flow_context *flc;
933         struct ctxt_priv *priv = sess->ctxt;
934         struct rte_mbuf *mbuf;
935         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
936                         sess->iv.offset);
937
938         PMD_INIT_FUNC_TRACE();
939
940         if (sym_op->m_dst)
941                 mbuf = sym_op->m_dst;
942         else
943                 mbuf = sym_op->m_src;
944
945         fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
946                         RTE_CACHE_LINE_SIZE);
947         if (!fle) {
948                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
949                 return -1;
950         }
951         memset(fle, 0, FLE_SG_MEM_SIZE);
952         /* first FLE entry used to store mbuf and session ctxt */
953         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
954         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
955
956         op_fle = fle + 1;
957         ip_fle = fle + 2;
958         sge = fle + 3;
959
960         flc = &priv->flc_desc[0].flc;
961
962         DPAA2_SEC_DP_DEBUG(
963                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
964                 " data_off: 0x%x\n",
965                 sym_op->cipher.data.offset,
966                 sym_op->cipher.data.length,
967                 sess->iv.length,
968                 sym_op->m_src->data_off);
969
970         /* o/p fle */
971         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
972         op_fle->length = sym_op->cipher.data.length;
973         DPAA2_SET_FLE_SG_EXT(op_fle);
974
975         /* o/p 1st seg */
976         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
977         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
978         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
979
980         mbuf = mbuf->next;
981         /* o/p segs */
982         while (mbuf) {
983                 sge++;
984                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
985                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
986                 sge->length = mbuf->data_len;
987                 mbuf = mbuf->next;
988         }
989         DPAA2_SET_FLE_FIN(sge);
990
991         DPAA2_SEC_DP_DEBUG(
992                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
993                 flc, fle, fle->addr_hi, fle->addr_lo,
994                 fle->length);
995
996         /* i/p fle */
997         mbuf = sym_op->m_src;
998         sge++;
999         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1000         ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
1001         DPAA2_SET_FLE_SG_EXT(ip_fle);
1002
1003         /* i/p IV */
1004         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1005         DPAA2_SET_FLE_OFFSET(sge, 0);
1006         sge->length = sess->iv.length;
1007
1008         sge++;
1009
1010         /* i/p 1st seg */
1011         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1012         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1013                              mbuf->data_off);
1014         sge->length = mbuf->data_len - sym_op->cipher.data.offset;
1015
1016         mbuf = mbuf->next;
1017         /* i/p segs */
1018         while (mbuf) {
1019                 sge++;
1020                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1021                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1022                 sge->length = mbuf->data_len;
1023                 mbuf = mbuf->next;
1024         }
1025         DPAA2_SET_FLE_FIN(sge);
1026         DPAA2_SET_FLE_FIN(ip_fle);
1027
1028         /* sg fd */
1029         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1030         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1031         DPAA2_SET_FD_COMPOUND_FMT(fd);
1032         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1033
1034         DPAA2_SEC_DP_DEBUG(
1035                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1036                 " off =%d, len =%d\n",
1037                 DPAA2_GET_FD_ADDR(fd),
1038                 DPAA2_GET_FD_BPID(fd),
1039                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1040                 DPAA2_GET_FD_OFFSET(fd),
1041                 DPAA2_GET_FD_LEN(fd));
1042         return 0;
1043 }
1044
1045 static int
1046 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1047                 struct qbman_fd *fd, uint16_t bpid)
1048 {
1049         struct rte_crypto_sym_op *sym_op = op->sym;
1050         struct qbman_fle *fle, *sge;
1051         int retval;
1052         struct sec_flow_context *flc;
1053         struct ctxt_priv *priv = sess->ctxt;
1054         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1055                         sess->iv.offset);
1056         struct rte_mbuf *dst;
1057
1058         PMD_INIT_FUNC_TRACE();
1059
1060         if (sym_op->m_dst)
1061                 dst = sym_op->m_dst;
1062         else
1063                 dst = sym_op->m_src;
1064
1065         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1066         if (retval) {
1067                 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1068                 return -1;
1069         }
1070         memset(fle, 0, FLE_POOL_BUF_SIZE);
1071         /* TODO we are using the first FLE entry to store Mbuf.
1072          * Currently we donot know which FLE has the mbuf stored.
1073          * So while retreiving we can go back 1 FLE from the FD -ADDR
1074          * to get the MBUF Addr from the previous FLE.
1075          * We can have a better approach to use the inline Mbuf
1076          */
1077         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1078         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1079         fle = fle + 1;
1080         sge = fle + 2;
1081
1082         if (likely(bpid < MAX_BPID)) {
1083                 DPAA2_SET_FD_BPID(fd, bpid);
1084                 DPAA2_SET_FLE_BPID(fle, bpid);
1085                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1086                 DPAA2_SET_FLE_BPID(sge, bpid);
1087                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1088         } else {
1089                 DPAA2_SET_FD_IVP(fd);
1090                 DPAA2_SET_FLE_IVP(fle);
1091                 DPAA2_SET_FLE_IVP((fle + 1));
1092                 DPAA2_SET_FLE_IVP(sge);
1093                 DPAA2_SET_FLE_IVP((sge + 1));
1094         }
1095
1096         flc = &priv->flc_desc[0].flc;
1097         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1098         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1099                          sess->iv.length);
1100         DPAA2_SET_FD_COMPOUND_FMT(fd);
1101         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1102
1103         DPAA2_SEC_DP_DEBUG(
1104                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1105                 " data_off: 0x%x\n",
1106                 sym_op->cipher.data.offset,
1107                 sym_op->cipher.data.length,
1108                 sess->iv.length,
1109                 sym_op->m_src->data_off);
1110
1111         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1112         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1113                              dst->data_off);
1114
1115         fle->length = sym_op->cipher.data.length + sess->iv.length;
1116
1117         DPAA2_SEC_DP_DEBUG(
1118                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1119                 flc, fle, fle->addr_hi, fle->addr_lo,
1120                 fle->length);
1121
1122         fle++;
1123
1124         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1125         fle->length = sym_op->cipher.data.length + sess->iv.length;
1126
1127         DPAA2_SET_FLE_SG_EXT(fle);
1128
1129         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1130         sge->length = sess->iv.length;
1131
1132         sge++;
1133         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1134         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1135                              sym_op->m_src->data_off);
1136
1137         sge->length = sym_op->cipher.data.length;
1138         DPAA2_SET_FLE_FIN(sge);
1139         DPAA2_SET_FLE_FIN(fle);
1140
1141         DPAA2_SEC_DP_DEBUG(
1142                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1143                 " off =%d, len =%d\n",
1144                 DPAA2_GET_FD_ADDR(fd),
1145                 DPAA2_GET_FD_BPID(fd),
1146                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1147                 DPAA2_GET_FD_OFFSET(fd),
1148                 DPAA2_GET_FD_LEN(fd));
1149
1150         return 0;
1151 }
1152
1153 static inline int
1154 build_sec_fd(struct rte_crypto_op *op,
1155              struct qbman_fd *fd, uint16_t bpid)
1156 {
1157         int ret = -1;
1158         dpaa2_sec_session *sess;
1159
1160         PMD_INIT_FUNC_TRACE();
1161
1162         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1163                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1164                                 op->sym->session, cryptodev_driver_id);
1165         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1166                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1167                                 op->sym->sec_session);
1168         else
1169                 return -1;
1170
1171         /* Segmented buffer */
1172         if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1173                 switch (sess->ctxt_type) {
1174                 case DPAA2_SEC_CIPHER:
1175                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1176                         break;
1177                 case DPAA2_SEC_AUTH:
1178                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1179                         break;
1180                 case DPAA2_SEC_AEAD:
1181                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1182                         break;
1183                 case DPAA2_SEC_CIPHER_HASH:
1184                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1185                         break;
1186                 case DPAA2_SEC_HASH_CIPHER:
1187                 default:
1188                         DPAA2_SEC_ERR("error: Unsupported session");
1189                 }
1190         } else {
1191                 switch (sess->ctxt_type) {
1192                 case DPAA2_SEC_CIPHER:
1193                         ret = build_cipher_fd(sess, op, fd, bpid);
1194                         break;
1195                 case DPAA2_SEC_AUTH:
1196                         ret = build_auth_fd(sess, op, fd, bpid);
1197                         break;
1198                 case DPAA2_SEC_AEAD:
1199                         ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1200                         break;
1201                 case DPAA2_SEC_CIPHER_HASH:
1202                         ret = build_authenc_fd(sess, op, fd, bpid);
1203                         break;
1204                 case DPAA2_SEC_IPSEC:
1205                         ret = build_proto_fd(sess, op, fd, bpid);
1206                         break;
1207                 case DPAA2_SEC_PDCP:
1208                         ret = build_proto_compound_fd(sess, op, fd, bpid);
1209                         break;
1210                 case DPAA2_SEC_HASH_CIPHER:
1211                 default:
1212                         DPAA2_SEC_ERR("error: Unsupported session");
1213                 }
1214         }
1215         return ret;
1216 }
1217
1218 static uint16_t
1219 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1220                         uint16_t nb_ops)
1221 {
1222         /* Function to transmit the frames to given device and VQ*/
1223         uint32_t loop;
1224         int32_t ret;
1225         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1226         uint32_t frames_to_send;
1227         struct qbman_eq_desc eqdesc;
1228         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1229         struct qbman_swp *swp;
1230         uint16_t num_tx = 0;
1231         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1232         /*todo - need to support multiple buffer pools */
1233         uint16_t bpid;
1234         struct rte_mempool *mb_pool;
1235
1236         if (unlikely(nb_ops == 0))
1237                 return 0;
1238
1239         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1240                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1241                 return 0;
1242         }
1243         /*Prepare enqueue descriptor*/
1244         qbman_eq_desc_clear(&eqdesc);
1245         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1246         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1247         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1248
1249         if (!DPAA2_PER_LCORE_DPIO) {
1250                 ret = dpaa2_affine_qbman_swp();
1251                 if (ret) {
1252                         DPAA2_SEC_ERR("Failure in affining portal");
1253                         return 0;
1254                 }
1255         }
1256         swp = DPAA2_PER_LCORE_PORTAL;
1257
1258         while (nb_ops) {
1259                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1260                         dpaa2_eqcr_size : nb_ops;
1261
1262                 for (loop = 0; loop < frames_to_send; loop++) {
1263                         if ((*ops)->sym->m_src->seqn) {
1264                          uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
1265
1266                          flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1267                          DPAA2_PER_LCORE_DQRR_SIZE--;
1268                          DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1269                          (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
1270                         }
1271
1272                         /*Clear the unused FD fields before sending*/
1273                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1274                         mb_pool = (*ops)->sym->m_src->pool;
1275                         bpid = mempool_to_bpid(mb_pool);
1276                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1277                         if (ret) {
1278                                 DPAA2_SEC_ERR("error: Improper packet contents"
1279                                               " for crypto operation");
1280                                 goto skip_tx;
1281                         }
1282                         ops++;
1283                 }
1284                 loop = 0;
1285                 while (loop < frames_to_send) {
1286                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1287                                                         &fd_arr[loop],
1288                                                         &flags[loop],
1289                                                         frames_to_send - loop);
1290                 }
1291
1292                 num_tx += frames_to_send;
1293                 nb_ops -= frames_to_send;
1294         }
1295 skip_tx:
1296         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1297         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1298         return num_tx;
1299 }
1300
1301 static inline struct rte_crypto_op *
1302 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1303 {
1304         struct rte_crypto_op *op;
1305         uint16_t len = DPAA2_GET_FD_LEN(fd);
1306         uint16_t diff = 0;
1307         dpaa2_sec_session *sess_priv;
1308
1309         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1310                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1311                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1312
1313         diff = len - mbuf->pkt_len;
1314         mbuf->pkt_len += diff;
1315         mbuf->data_len += diff;
1316         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1317         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1318         op->sym->aead.digest.phys_addr = 0L;
1319
1320         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1321                                 op->sym->sec_session);
1322         if (sess_priv->dir == DIR_ENC)
1323                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1324         else
1325                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1326
1327         return op;
1328 }
1329
1330 static inline struct rte_crypto_op *
1331 sec_fd_to_mbuf(const struct qbman_fd *fd)
1332 {
1333         struct qbman_fle *fle;
1334         struct rte_crypto_op *op;
1335         struct ctxt_priv *priv;
1336         struct rte_mbuf *dst, *src;
1337
1338         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1339                 return sec_simple_fd_to_mbuf(fd);
1340
1341         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1342
1343         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1344                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1345
1346         /* we are using the first FLE entry to store Mbuf.
1347          * Currently we donot know which FLE has the mbuf stored.
1348          * So while retreiving we can go back 1 FLE from the FD -ADDR
1349          * to get the MBUF Addr from the previous FLE.
1350          * We can have a better approach to use the inline Mbuf
1351          */
1352
1353         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1354                 /* TODO complete it. */
1355                 DPAA2_SEC_ERR("error: non inline buffer");
1356                 return NULL;
1357         }
1358         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1359
1360         /* Prefeth op */
1361         src = op->sym->m_src;
1362         rte_prefetch0(src);
1363
1364         if (op->sym->m_dst) {
1365                 dst = op->sym->m_dst;
1366                 rte_prefetch0(dst);
1367         } else
1368                 dst = src;
1369
1370         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1371                 dpaa2_sec_session *sess = (dpaa2_sec_session *)
1372                         get_sec_session_private_data(op->sym->sec_session);
1373                 if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
1374                         uint16_t len = DPAA2_GET_FD_LEN(fd);
1375                         dst->pkt_len = len;
1376                         dst->data_len = len;
1377                 }
1378         }
1379
1380         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1381                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1382                 (void *)dst,
1383                 dst->buf_addr,
1384                 DPAA2_GET_FD_ADDR(fd),
1385                 DPAA2_GET_FD_BPID(fd),
1386                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1387                 DPAA2_GET_FD_OFFSET(fd),
1388                 DPAA2_GET_FD_LEN(fd));
1389
1390         /* free the fle memory */
1391         if (likely(rte_pktmbuf_is_contiguous(src))) {
1392                 priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1393                 rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1394         } else
1395                 rte_free((void *)(fle-1));
1396
1397         return op;
1398 }
1399
1400 static uint16_t
1401 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1402                         uint16_t nb_ops)
1403 {
1404         /* Function is responsible to receive frames for a given device and VQ*/
1405         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1406         struct qbman_result *dq_storage;
1407         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1408         int ret, num_rx = 0;
1409         uint8_t is_last = 0, status;
1410         struct qbman_swp *swp;
1411         const struct qbman_fd *fd;
1412         struct qbman_pull_desc pulldesc;
1413
1414         if (!DPAA2_PER_LCORE_DPIO) {
1415                 ret = dpaa2_affine_qbman_swp();
1416                 if (ret) {
1417                         DPAA2_SEC_ERR("Failure in affining portal");
1418                         return 0;
1419                 }
1420         }
1421         swp = DPAA2_PER_LCORE_PORTAL;
1422         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1423
1424         qbman_pull_desc_clear(&pulldesc);
1425         qbman_pull_desc_set_numframes(&pulldesc,
1426                                       (nb_ops > dpaa2_dqrr_size) ?
1427                                       dpaa2_dqrr_size : nb_ops);
1428         qbman_pull_desc_set_fq(&pulldesc, fqid);
1429         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1430                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1431                                     1);
1432
1433         /*Issue a volatile dequeue command. */
1434         while (1) {
1435                 if (qbman_swp_pull(swp, &pulldesc)) {
1436                         DPAA2_SEC_WARN(
1437                                 "SEC VDQ command is not issued : QBMAN busy");
1438                         /* Portal was busy, try again */
1439                         continue;
1440                 }
1441                 break;
1442         };
1443
1444         /* Receive the packets till Last Dequeue entry is found with
1445          * respect to the above issues PULL command.
1446          */
1447         while (!is_last) {
1448                 /* Check if the previous issued command is completed.
1449                  * Also seems like the SWP is shared between the Ethernet Driver
1450                  * and the SEC driver.
1451                  */
1452                 while (!qbman_check_command_complete(dq_storage))
1453                         ;
1454
1455                 /* Loop until the dq_storage is updated with
1456                  * new token by QBMAN
1457                  */
1458                 while (!qbman_check_new_result(dq_storage))
1459                         ;
1460                 /* Check whether Last Pull command is Expired and
1461                  * setting Condition for Loop termination
1462                  */
1463                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1464                         is_last = 1;
1465                         /* Check for valid frame. */
1466                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1467                         if (unlikely(
1468                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1469                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1470                                 continue;
1471                         }
1472                 }
1473
1474                 fd = qbman_result_DQ_fd(dq_storage);
1475                 ops[num_rx] = sec_fd_to_mbuf(fd);
1476
1477                 if (unlikely(fd->simple.frc)) {
1478                         /* TODO Parse SEC errors */
1479                         DPAA2_SEC_ERR("SEC returned Error - %x",
1480                                       fd->simple.frc);
1481                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1482                 } else {
1483                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1484                 }
1485
1486                 num_rx++;
1487                 dq_storage++;
1488         } /* End of Packet Rx loop */
1489
1490         dpaa2_qp->rx_vq.rx_pkts += num_rx;
1491
1492         DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1493         /*Return the total number of packets received to DPAA2 app*/
1494         return num_rx;
1495 }
1496
1497 /** Release queue pair */
1498 static int
1499 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1500 {
1501         struct dpaa2_sec_qp *qp =
1502                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1503
1504         PMD_INIT_FUNC_TRACE();
1505
1506         if (qp->rx_vq.q_storage) {
1507                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1508                 rte_free(qp->rx_vq.q_storage);
1509         }
1510         rte_free(qp);
1511
1512         dev->data->queue_pairs[queue_pair_id] = NULL;
1513
1514         return 0;
1515 }
1516
1517 /** Setup a queue pair */
1518 static int
1519 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1520                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1521                 __rte_unused int socket_id)
1522 {
1523         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1524         struct dpaa2_sec_qp *qp;
1525         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1526         struct dpseci_rx_queue_cfg cfg;
1527         int32_t retcode;
1528
1529         PMD_INIT_FUNC_TRACE();
1530
1531         /* If qp is already in use free ring memory and qp metadata. */
1532         if (dev->data->queue_pairs[qp_id] != NULL) {
1533                 DPAA2_SEC_INFO("QP already setup");
1534                 return 0;
1535         }
1536
1537         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1538                     dev, qp_id, qp_conf);
1539
1540         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1541
1542         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1543                         RTE_CACHE_LINE_SIZE);
1544         if (!qp) {
1545                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1546                 return -1;
1547         }
1548
1549         qp->rx_vq.crypto_data = dev->data;
1550         qp->tx_vq.crypto_data = dev->data;
1551         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1552                 sizeof(struct queue_storage_info_t),
1553                 RTE_CACHE_LINE_SIZE);
1554         if (!qp->rx_vq.q_storage) {
1555                 DPAA2_SEC_ERR("malloc failed for q_storage");
1556                 return -1;
1557         }
1558         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1559
1560         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1561                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1562                 return -1;
1563         }
1564
1565         dev->data->queue_pairs[qp_id] = qp;
1566
1567         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1568         cfg.user_ctx = (size_t)(&qp->rx_vq);
1569         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1570                                       qp_id, &cfg);
1571         return retcode;
1572 }
1573
1574 /** Return the number of allocated queue pairs */
1575 static uint32_t
1576 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1577 {
1578         PMD_INIT_FUNC_TRACE();
1579
1580         return dev->data->nb_queue_pairs;
1581 }
1582
1583 /** Returns the size of the aesni gcm session structure */
1584 static unsigned int
1585 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1586 {
1587         PMD_INIT_FUNC_TRACE();
1588
1589         return sizeof(dpaa2_sec_session);
1590 }
1591
1592 static int
1593 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1594                       struct rte_crypto_sym_xform *xform,
1595                       dpaa2_sec_session *session)
1596 {
1597         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1598         struct alginfo cipherdata;
1599         int bufsize, i;
1600         struct ctxt_priv *priv;
1601         struct sec_flow_context *flc;
1602
1603         PMD_INIT_FUNC_TRACE();
1604
1605         /* For SEC CIPHER only one descriptor is required. */
1606         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1607                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1608                         RTE_CACHE_LINE_SIZE);
1609         if (priv == NULL) {
1610                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1611                 return -1;
1612         }
1613
1614         priv->fle_pool = dev_priv->fle_pool;
1615
1616         flc = &priv->flc_desc[0].flc;
1617
1618         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1619                         RTE_CACHE_LINE_SIZE);
1620         if (session->cipher_key.data == NULL) {
1621                 DPAA2_SEC_ERR("No Memory for cipher key");
1622                 rte_free(priv);
1623                 return -1;
1624         }
1625         session->cipher_key.length = xform->cipher.key.length;
1626
1627         memcpy(session->cipher_key.data, xform->cipher.key.data,
1628                xform->cipher.key.length);
1629         cipherdata.key = (size_t)session->cipher_key.data;
1630         cipherdata.keylen = session->cipher_key.length;
1631         cipherdata.key_enc_flags = 0;
1632         cipherdata.key_type = RTA_DATA_IMM;
1633
1634         /* Set IV parameters */
1635         session->iv.offset = xform->cipher.iv.offset;
1636         session->iv.length = xform->cipher.iv.length;
1637
1638         switch (xform->cipher.algo) {
1639         case RTE_CRYPTO_CIPHER_AES_CBC:
1640                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1641                 cipherdata.algmode = OP_ALG_AAI_CBC;
1642                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1643                 break;
1644         case RTE_CRYPTO_CIPHER_3DES_CBC:
1645                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1646                 cipherdata.algmode = OP_ALG_AAI_CBC;
1647                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1648                 break;
1649         case RTE_CRYPTO_CIPHER_AES_CTR:
1650                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1651                 cipherdata.algmode = OP_ALG_AAI_CTR;
1652                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1653                 break;
1654         case RTE_CRYPTO_CIPHER_3DES_CTR:
1655         case RTE_CRYPTO_CIPHER_AES_ECB:
1656         case RTE_CRYPTO_CIPHER_3DES_ECB:
1657         case RTE_CRYPTO_CIPHER_AES_XTS:
1658         case RTE_CRYPTO_CIPHER_AES_F8:
1659         case RTE_CRYPTO_CIPHER_ARC4:
1660         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1661         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1662         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1663         case RTE_CRYPTO_CIPHER_NULL:
1664                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1665                         xform->cipher.algo);
1666                 goto error_out;
1667         default:
1668                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1669                         xform->cipher.algo);
1670                 goto error_out;
1671         }
1672         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1673                                 DIR_ENC : DIR_DEC;
1674
1675         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1676                                         &cipherdata, NULL, session->iv.length,
1677                                         session->dir);
1678         if (bufsize < 0) {
1679                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1680                 goto error_out;
1681         }
1682
1683         flc->word1_sdl = (uint8_t)bufsize;
1684         session->ctxt = priv;
1685
1686         for (i = 0; i < bufsize; i++)
1687                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1688
1689         return 0;
1690
1691 error_out:
1692         rte_free(session->cipher_key.data);
1693         rte_free(priv);
1694         return -1;
1695 }
1696
1697 static int
1698 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1699                     struct rte_crypto_sym_xform *xform,
1700                     dpaa2_sec_session *session)
1701 {
1702         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1703         struct alginfo authdata;
1704         int bufsize, i;
1705         struct ctxt_priv *priv;
1706         struct sec_flow_context *flc;
1707
1708         PMD_INIT_FUNC_TRACE();
1709
1710         /* For SEC AUTH three descriptors are required for various stages */
1711         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1712                         sizeof(struct ctxt_priv) + 3 *
1713                         sizeof(struct sec_flc_desc),
1714                         RTE_CACHE_LINE_SIZE);
1715         if (priv == NULL) {
1716                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1717                 return -1;
1718         }
1719
1720         priv->fle_pool = dev_priv->fle_pool;
1721         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1722
1723         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1724                         RTE_CACHE_LINE_SIZE);
1725         if (session->auth_key.data == NULL) {
1726                 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1727                 rte_free(priv);
1728                 return -1;
1729         }
1730         session->auth_key.length = xform->auth.key.length;
1731
1732         memcpy(session->auth_key.data, xform->auth.key.data,
1733                xform->auth.key.length);
1734         authdata.key = (size_t)session->auth_key.data;
1735         authdata.keylen = session->auth_key.length;
1736         authdata.key_enc_flags = 0;
1737         authdata.key_type = RTA_DATA_IMM;
1738
1739         session->digest_length = xform->auth.digest_length;
1740
1741         switch (xform->auth.algo) {
1742         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1743                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1744                 authdata.algmode = OP_ALG_AAI_HMAC;
1745                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1746                 break;
1747         case RTE_CRYPTO_AUTH_MD5_HMAC:
1748                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1749                 authdata.algmode = OP_ALG_AAI_HMAC;
1750                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1751                 break;
1752         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1753                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1754                 authdata.algmode = OP_ALG_AAI_HMAC;
1755                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1756                 break;
1757         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1758                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1759                 authdata.algmode = OP_ALG_AAI_HMAC;
1760                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1761                 break;
1762         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1763                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1764                 authdata.algmode = OP_ALG_AAI_HMAC;
1765                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1766                 break;
1767         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1768                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1769                 authdata.algmode = OP_ALG_AAI_HMAC;
1770                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1771                 break;
1772         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1773         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1774         case RTE_CRYPTO_AUTH_NULL:
1775         case RTE_CRYPTO_AUTH_SHA1:
1776         case RTE_CRYPTO_AUTH_SHA256:
1777         case RTE_CRYPTO_AUTH_SHA512:
1778         case RTE_CRYPTO_AUTH_SHA224:
1779         case RTE_CRYPTO_AUTH_SHA384:
1780         case RTE_CRYPTO_AUTH_MD5:
1781         case RTE_CRYPTO_AUTH_AES_GMAC:
1782         case RTE_CRYPTO_AUTH_KASUMI_F9:
1783         case RTE_CRYPTO_AUTH_AES_CMAC:
1784         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1785         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1786                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1787                               xform->auth.algo);
1788                 goto error_out;
1789         default:
1790                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1791                               xform->auth.algo);
1792                 goto error_out;
1793         }
1794         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1795                                 DIR_ENC : DIR_DEC;
1796
1797         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1798                                    1, 0, SHR_NEVER, &authdata, !session->dir,
1799                                    session->digest_length);
1800         if (bufsize < 0) {
1801                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1802                 goto error_out;
1803         }
1804
1805         flc->word1_sdl = (uint8_t)bufsize;
1806         session->ctxt = priv;
1807         for (i = 0; i < bufsize; i++)
1808                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1809                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1810
1811
1812         return 0;
1813
1814 error_out:
1815         rte_free(session->auth_key.data);
1816         rte_free(priv);
1817         return -1;
1818 }
1819
1820 static int
1821 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1822                     struct rte_crypto_sym_xform *xform,
1823                     dpaa2_sec_session *session)
1824 {
1825         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1826         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1827         struct alginfo aeaddata;
1828         int bufsize, i;
1829         struct ctxt_priv *priv;
1830         struct sec_flow_context *flc;
1831         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1832         int err;
1833
1834         PMD_INIT_FUNC_TRACE();
1835
1836         /* Set IV parameters */
1837         session->iv.offset = aead_xform->iv.offset;
1838         session->iv.length = aead_xform->iv.length;
1839         session->ctxt_type = DPAA2_SEC_AEAD;
1840
1841         /* For SEC AEAD only one descriptor is required */
1842         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1843                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1844                         RTE_CACHE_LINE_SIZE);
1845         if (priv == NULL) {
1846                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1847                 return -1;
1848         }
1849
1850         priv->fle_pool = dev_priv->fle_pool;
1851         flc = &priv->flc_desc[0].flc;
1852
1853         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1854                                                RTE_CACHE_LINE_SIZE);
1855         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1856                 DPAA2_SEC_ERR("No Memory for aead key");
1857                 rte_free(priv);
1858                 return -1;
1859         }
1860         memcpy(session->aead_key.data, aead_xform->key.data,
1861                aead_xform->key.length);
1862
1863         session->digest_length = aead_xform->digest_length;
1864         session->aead_key.length = aead_xform->key.length;
1865         ctxt->auth_only_len = aead_xform->aad_length;
1866
1867         aeaddata.key = (size_t)session->aead_key.data;
1868         aeaddata.keylen = session->aead_key.length;
1869         aeaddata.key_enc_flags = 0;
1870         aeaddata.key_type = RTA_DATA_IMM;
1871
1872         switch (aead_xform->algo) {
1873         case RTE_CRYPTO_AEAD_AES_GCM:
1874                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1875                 aeaddata.algmode = OP_ALG_AAI_GCM;
1876                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1877                 break;
1878         case RTE_CRYPTO_AEAD_AES_CCM:
1879                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1880                               aead_xform->algo);
1881                 goto error_out;
1882         default:
1883                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1884                               aead_xform->algo);
1885                 goto error_out;
1886         }
1887         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1888                                 DIR_ENC : DIR_DEC;
1889
1890         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1891         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1892                                MIN_JOB_DESC_SIZE,
1893                                (unsigned int *)priv->flc_desc[0].desc,
1894                                &priv->flc_desc[0].desc[1], 1);
1895
1896         if (err < 0) {
1897                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1898                 goto error_out;
1899         }
1900         if (priv->flc_desc[0].desc[1] & 1) {
1901                 aeaddata.key_type = RTA_DATA_IMM;
1902         } else {
1903                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1904                 aeaddata.key_type = RTA_DATA_PTR;
1905         }
1906         priv->flc_desc[0].desc[0] = 0;
1907         priv->flc_desc[0].desc[1] = 0;
1908
1909         if (session->dir == DIR_ENC)
1910                 bufsize = cnstr_shdsc_gcm_encap(
1911                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1912                                 &aeaddata, session->iv.length,
1913                                 session->digest_length);
1914         else
1915                 bufsize = cnstr_shdsc_gcm_decap(
1916                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
1917                                 &aeaddata, session->iv.length,
1918                                 session->digest_length);
1919         if (bufsize < 0) {
1920                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1921                 goto error_out;
1922         }
1923
1924         flc->word1_sdl = (uint8_t)bufsize;
1925         session->ctxt = priv;
1926         for (i = 0; i < bufsize; i++)
1927                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1928                             i, priv->flc_desc[0].desc[i]);
1929
1930         return 0;
1931
1932 error_out:
1933         rte_free(session->aead_key.data);
1934         rte_free(priv);
1935         return -1;
1936 }
1937
1938
1939 static int
1940 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1941                     struct rte_crypto_sym_xform *xform,
1942                     dpaa2_sec_session *session)
1943 {
1944         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1945         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1946         struct alginfo authdata, cipherdata;
1947         int bufsize, i;
1948         struct ctxt_priv *priv;
1949         struct sec_flow_context *flc;
1950         struct rte_crypto_cipher_xform *cipher_xform;
1951         struct rte_crypto_auth_xform *auth_xform;
1952         int err;
1953
1954         PMD_INIT_FUNC_TRACE();
1955
1956         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1957                 cipher_xform = &xform->cipher;
1958                 auth_xform = &xform->next->auth;
1959                 session->ctxt_type =
1960                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1961                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1962         } else {
1963                 cipher_xform = &xform->next->cipher;
1964                 auth_xform = &xform->auth;
1965                 session->ctxt_type =
1966                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1967                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1968         }
1969
1970         /* Set IV parameters */
1971         session->iv.offset = cipher_xform->iv.offset;
1972         session->iv.length = cipher_xform->iv.length;
1973
1974         /* For SEC AEAD only one descriptor is required */
1975         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1976                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1977                         RTE_CACHE_LINE_SIZE);
1978         if (priv == NULL) {
1979                 DPAA2_SEC_ERR("No Memory for priv CTXT");
1980                 return -1;
1981         }
1982
1983         priv->fle_pool = dev_priv->fle_pool;
1984         flc = &priv->flc_desc[0].flc;
1985
1986         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1987                                                RTE_CACHE_LINE_SIZE);
1988         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1989                 DPAA2_SEC_ERR("No Memory for cipher key");
1990                 rte_free(priv);
1991                 return -1;
1992         }
1993         session->cipher_key.length = cipher_xform->key.length;
1994         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1995                                              RTE_CACHE_LINE_SIZE);
1996         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1997                 DPAA2_SEC_ERR("No Memory for auth key");
1998                 rte_free(session->cipher_key.data);
1999                 rte_free(priv);
2000                 return -1;
2001         }
2002         session->auth_key.length = auth_xform->key.length;
2003         memcpy(session->cipher_key.data, cipher_xform->key.data,
2004                cipher_xform->key.length);
2005         memcpy(session->auth_key.data, auth_xform->key.data,
2006                auth_xform->key.length);
2007
2008         authdata.key = (size_t)session->auth_key.data;
2009         authdata.keylen = session->auth_key.length;
2010         authdata.key_enc_flags = 0;
2011         authdata.key_type = RTA_DATA_IMM;
2012
2013         session->digest_length = auth_xform->digest_length;
2014
2015         switch (auth_xform->algo) {
2016         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2017                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2018                 authdata.algmode = OP_ALG_AAI_HMAC;
2019                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2020                 break;
2021         case RTE_CRYPTO_AUTH_MD5_HMAC:
2022                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2023                 authdata.algmode = OP_ALG_AAI_HMAC;
2024                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2025                 break;
2026         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2027                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2028                 authdata.algmode = OP_ALG_AAI_HMAC;
2029                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2030                 break;
2031         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2032                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2033                 authdata.algmode = OP_ALG_AAI_HMAC;
2034                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2035                 break;
2036         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2037                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2038                 authdata.algmode = OP_ALG_AAI_HMAC;
2039                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2040                 break;
2041         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2042                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2043                 authdata.algmode = OP_ALG_AAI_HMAC;
2044                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2045                 break;
2046         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2047         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2048         case RTE_CRYPTO_AUTH_NULL:
2049         case RTE_CRYPTO_AUTH_SHA1:
2050         case RTE_CRYPTO_AUTH_SHA256:
2051         case RTE_CRYPTO_AUTH_SHA512:
2052         case RTE_CRYPTO_AUTH_SHA224:
2053         case RTE_CRYPTO_AUTH_SHA384:
2054         case RTE_CRYPTO_AUTH_MD5:
2055         case RTE_CRYPTO_AUTH_AES_GMAC:
2056         case RTE_CRYPTO_AUTH_KASUMI_F9:
2057         case RTE_CRYPTO_AUTH_AES_CMAC:
2058         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2059         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2060                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2061                               auth_xform->algo);
2062                 goto error_out;
2063         default:
2064                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2065                               auth_xform->algo);
2066                 goto error_out;
2067         }
2068         cipherdata.key = (size_t)session->cipher_key.data;
2069         cipherdata.keylen = session->cipher_key.length;
2070         cipherdata.key_enc_flags = 0;
2071         cipherdata.key_type = RTA_DATA_IMM;
2072
2073         switch (cipher_xform->algo) {
2074         case RTE_CRYPTO_CIPHER_AES_CBC:
2075                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2076                 cipherdata.algmode = OP_ALG_AAI_CBC;
2077                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2078                 break;
2079         case RTE_CRYPTO_CIPHER_3DES_CBC:
2080                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2081                 cipherdata.algmode = OP_ALG_AAI_CBC;
2082                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2083                 break;
2084         case RTE_CRYPTO_CIPHER_AES_CTR:
2085                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2086                 cipherdata.algmode = OP_ALG_AAI_CTR;
2087                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2088                 break;
2089         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2090         case RTE_CRYPTO_CIPHER_NULL:
2091         case RTE_CRYPTO_CIPHER_3DES_ECB:
2092         case RTE_CRYPTO_CIPHER_AES_ECB:
2093         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2094                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2095                               cipher_xform->algo);
2096                 goto error_out;
2097         default:
2098                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2099                               cipher_xform->algo);
2100                 goto error_out;
2101         }
2102         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2103                                 DIR_ENC : DIR_DEC;
2104
2105         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2106         priv->flc_desc[0].desc[1] = authdata.keylen;
2107         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2108                                MIN_JOB_DESC_SIZE,
2109                                (unsigned int *)priv->flc_desc[0].desc,
2110                                &priv->flc_desc[0].desc[2], 2);
2111
2112         if (err < 0) {
2113                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2114                 goto error_out;
2115         }
2116         if (priv->flc_desc[0].desc[2] & 1) {
2117                 cipherdata.key_type = RTA_DATA_IMM;
2118         } else {
2119                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2120                 cipherdata.key_type = RTA_DATA_PTR;
2121         }
2122         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2123                 authdata.key_type = RTA_DATA_IMM;
2124         } else {
2125                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2126                 authdata.key_type = RTA_DATA_PTR;
2127         }
2128         priv->flc_desc[0].desc[0] = 0;
2129         priv->flc_desc[0].desc[1] = 0;
2130         priv->flc_desc[0].desc[2] = 0;
2131
2132         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2133                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2134                                               0, SHR_SERIAL,
2135                                               &cipherdata, &authdata,
2136                                               session->iv.length,
2137                                               ctxt->auth_only_len,
2138                                               session->digest_length,
2139                                               session->dir);
2140                 if (bufsize < 0) {
2141                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2142                         goto error_out;
2143                 }
2144         } else {
2145                 DPAA2_SEC_ERR("Hash before cipher not supported");
2146                 goto error_out;
2147         }
2148
2149         flc->word1_sdl = (uint8_t)bufsize;
2150         session->ctxt = priv;
2151         for (i = 0; i < bufsize; i++)
2152                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2153                             i, priv->flc_desc[0].desc[i]);
2154
2155         return 0;
2156
2157 error_out:
2158         rte_free(session->cipher_key.data);
2159         rte_free(session->auth_key.data);
2160         rte_free(priv);
2161         return -1;
2162 }
2163
2164 static int
2165 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2166                             struct rte_crypto_sym_xform *xform, void *sess)
2167 {
2168         dpaa2_sec_session *session = sess;
2169         int ret;
2170
2171         PMD_INIT_FUNC_TRACE();
2172
2173         if (unlikely(sess == NULL)) {
2174                 DPAA2_SEC_ERR("Invalid session struct");
2175                 return -1;
2176         }
2177
2178         memset(session, 0, sizeof(dpaa2_sec_session));
2179         /* Default IV length = 0 */
2180         session->iv.length = 0;
2181
2182         /* Cipher Only */
2183         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2184                 session->ctxt_type = DPAA2_SEC_CIPHER;
2185                 ret = dpaa2_sec_cipher_init(dev, xform, session);
2186
2187         /* Authentication Only */
2188         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2189                    xform->next == NULL) {
2190                 session->ctxt_type = DPAA2_SEC_AUTH;
2191                 ret = dpaa2_sec_auth_init(dev, xform, session);
2192
2193         /* Cipher then Authenticate */
2194         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2195                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2196                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2197                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2198
2199         /* Authenticate then Cipher */
2200         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2201                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2202                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2203                 ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2204
2205         /* AEAD operation for AES-GCM kind of Algorithms */
2206         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2207                    xform->next == NULL) {
2208                 ret = dpaa2_sec_aead_init(dev, xform, session);
2209
2210         } else {
2211                 DPAA2_SEC_ERR("Invalid crypto type");
2212                 return -EINVAL;
2213         }
2214
2215         return ret;
2216 }
2217
2218 static int
2219 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2220                         dpaa2_sec_session *session,
2221                         struct alginfo *aeaddata)
2222 {
2223         PMD_INIT_FUNC_TRACE();
2224
2225         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2226                                                RTE_CACHE_LINE_SIZE);
2227         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2228                 DPAA2_SEC_ERR("No Memory for aead key");
2229                 return -1;
2230         }
2231         memcpy(session->aead_key.data, aead_xform->key.data,
2232                aead_xform->key.length);
2233
2234         session->digest_length = aead_xform->digest_length;
2235         session->aead_key.length = aead_xform->key.length;
2236
2237         aeaddata->key = (size_t)session->aead_key.data;
2238         aeaddata->keylen = session->aead_key.length;
2239         aeaddata->key_enc_flags = 0;
2240         aeaddata->key_type = RTA_DATA_IMM;
2241
2242         switch (aead_xform->algo) {
2243         case RTE_CRYPTO_AEAD_AES_GCM:
2244                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2245                 aeaddata->algmode = OP_ALG_AAI_GCM;
2246                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2247                 break;
2248         case RTE_CRYPTO_AEAD_AES_CCM:
2249                 aeaddata->algtype = OP_ALG_ALGSEL_AES;
2250                 aeaddata->algmode = OP_ALG_AAI_CCM;
2251                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2252                 break;
2253         default:
2254                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2255                               aead_xform->algo);
2256                 return -1;
2257         }
2258         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2259                                 DIR_ENC : DIR_DEC;
2260
2261         return 0;
2262 }
2263
2264 static int
2265 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2266         struct rte_crypto_auth_xform *auth_xform,
2267         dpaa2_sec_session *session,
2268         struct alginfo *cipherdata,
2269         struct alginfo *authdata)
2270 {
2271         if (cipher_xform) {
2272                 session->cipher_key.data = rte_zmalloc(NULL,
2273                                                        cipher_xform->key.length,
2274                                                        RTE_CACHE_LINE_SIZE);
2275                 if (session->cipher_key.data == NULL &&
2276                                 cipher_xform->key.length > 0) {
2277                         DPAA2_SEC_ERR("No Memory for cipher key");
2278                         return -ENOMEM;
2279                 }
2280
2281                 session->cipher_key.length = cipher_xform->key.length;
2282                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2283                                 cipher_xform->key.length);
2284                 session->cipher_alg = cipher_xform->algo;
2285         } else {
2286                 session->cipher_key.data = NULL;
2287                 session->cipher_key.length = 0;
2288                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2289         }
2290
2291         if (auth_xform) {
2292                 session->auth_key.data = rte_zmalloc(NULL,
2293                                                 auth_xform->key.length,
2294                                                 RTE_CACHE_LINE_SIZE);
2295                 if (session->auth_key.data == NULL &&
2296                                 auth_xform->key.length > 0) {
2297                         DPAA2_SEC_ERR("No Memory for auth key");
2298                         return -ENOMEM;
2299                 }
2300                 session->auth_key.length = auth_xform->key.length;
2301                 memcpy(session->auth_key.data, auth_xform->key.data,
2302                                 auth_xform->key.length);
2303                 session->auth_alg = auth_xform->algo;
2304         } else {
2305                 session->auth_key.data = NULL;
2306                 session->auth_key.length = 0;
2307                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2308         }
2309
2310         authdata->key = (size_t)session->auth_key.data;
2311         authdata->keylen = session->auth_key.length;
2312         authdata->key_enc_flags = 0;
2313         authdata->key_type = RTA_DATA_IMM;
2314         switch (session->auth_alg) {
2315         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2316                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2317                 authdata->algmode = OP_ALG_AAI_HMAC;
2318                 break;
2319         case RTE_CRYPTO_AUTH_MD5_HMAC:
2320                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2321                 authdata->algmode = OP_ALG_AAI_HMAC;
2322                 break;
2323         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2324                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2325                 authdata->algmode = OP_ALG_AAI_HMAC;
2326                 break;
2327         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2328                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2329                 authdata->algmode = OP_ALG_AAI_HMAC;
2330                 break;
2331         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2332                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2333                 authdata->algmode = OP_ALG_AAI_HMAC;
2334                 break;
2335         case RTE_CRYPTO_AUTH_AES_CMAC:
2336                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2337                 break;
2338         case RTE_CRYPTO_AUTH_NULL:
2339                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2340                 break;
2341         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2342         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2343         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2344         case RTE_CRYPTO_AUTH_SHA1:
2345         case RTE_CRYPTO_AUTH_SHA256:
2346         case RTE_CRYPTO_AUTH_SHA512:
2347         case RTE_CRYPTO_AUTH_SHA224:
2348         case RTE_CRYPTO_AUTH_SHA384:
2349         case RTE_CRYPTO_AUTH_MD5:
2350         case RTE_CRYPTO_AUTH_AES_GMAC:
2351         case RTE_CRYPTO_AUTH_KASUMI_F9:
2352         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2353         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2354                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2355                               session->auth_alg);
2356                 return -1;
2357         default:
2358                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2359                               session->auth_alg);
2360                 return -1;
2361         }
2362         cipherdata->key = (size_t)session->cipher_key.data;
2363         cipherdata->keylen = session->cipher_key.length;
2364         cipherdata->key_enc_flags = 0;
2365         cipherdata->key_type = RTA_DATA_IMM;
2366
2367         switch (session->cipher_alg) {
2368         case RTE_CRYPTO_CIPHER_AES_CBC:
2369                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2370                 cipherdata->algmode = OP_ALG_AAI_CBC;
2371                 break;
2372         case RTE_CRYPTO_CIPHER_3DES_CBC:
2373                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
2374                 cipherdata->algmode = OP_ALG_AAI_CBC;
2375                 break;
2376         case RTE_CRYPTO_CIPHER_AES_CTR:
2377                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2378                 cipherdata->algmode = OP_ALG_AAI_CTR;
2379                 break;
2380         case RTE_CRYPTO_CIPHER_NULL:
2381                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
2382                 break;
2383         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2384         case RTE_CRYPTO_CIPHER_3DES_ECB:
2385         case RTE_CRYPTO_CIPHER_AES_ECB:
2386         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2387                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2388                               session->cipher_alg);
2389                 return -1;
2390         default:
2391                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2392                               session->cipher_alg);
2393                 return -1;
2394         }
2395
2396         return 0;
2397 }
2398
2399 #ifdef RTE_LIBRTE_SECURITY_TEST
2400 static uint8_t aes_cbc_iv[] = {
2401         0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2402         0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2403 #endif
2404
2405 static int
2406 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2407                             struct rte_security_session_conf *conf,
2408                             void *sess)
2409 {
2410         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2411         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2412         struct rte_crypto_auth_xform *auth_xform = NULL;
2413         struct rte_crypto_aead_xform *aead_xform = NULL;
2414         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2415         struct ctxt_priv *priv;
2416         struct ipsec_encap_pdb encap_pdb;
2417         struct ipsec_decap_pdb decap_pdb;
2418         struct alginfo authdata, cipherdata;
2419         int bufsize;
2420         struct sec_flow_context *flc;
2421         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2422         int ret = -1;
2423
2424         PMD_INIT_FUNC_TRACE();
2425
2426         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2427                                 sizeof(struct ctxt_priv) +
2428                                 sizeof(struct sec_flc_desc),
2429                                 RTE_CACHE_LINE_SIZE);
2430
2431         if (priv == NULL) {
2432                 DPAA2_SEC_ERR("No memory for priv CTXT");
2433                 return -ENOMEM;
2434         }
2435
2436         priv->fle_pool = dev_priv->fle_pool;
2437         flc = &priv->flc_desc[0].flc;
2438
2439         memset(session, 0, sizeof(dpaa2_sec_session));
2440
2441         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2442                 cipher_xform = &conf->crypto_xform->cipher;
2443                 if (conf->crypto_xform->next)
2444                         auth_xform = &conf->crypto_xform->next->auth;
2445                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2446                                         session, &cipherdata, &authdata);
2447         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2448                 auth_xform = &conf->crypto_xform->auth;
2449                 if (conf->crypto_xform->next)
2450                         cipher_xform = &conf->crypto_xform->next->cipher;
2451                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2452                                         session, &cipherdata, &authdata);
2453         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2454                 aead_xform = &conf->crypto_xform->aead;
2455                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2456                                         session, &cipherdata);
2457         } else {
2458                 DPAA2_SEC_ERR("XFORM not specified");
2459                 ret = -EINVAL;
2460                 goto out;
2461         }
2462         if (ret) {
2463                 DPAA2_SEC_ERR("Failed to process xform");
2464                 goto out;
2465         }
2466
2467         session->ctxt_type = DPAA2_SEC_IPSEC;
2468         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2469                 uint8_t *hdr = NULL;
2470                 struct ip ip4_hdr;
2471                 struct rte_ipv6_hdr ip6_hdr;
2472
2473                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
2474                 /* For Sec Proto only one descriptor is required. */
2475                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2476                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2477                         PDBOPTS_ESP_OIHI_PDB_INL |
2478                         PDBOPTS_ESP_IVSRC |
2479                         PDBHMO_ESP_ENCAP_DTTL |
2480                         PDBHMO_ESP_SNR;
2481                 if (ipsec_xform->options.esn)
2482                         encap_pdb.options |= PDBOPTS_ESP_ESN;
2483                 encap_pdb.spi = ipsec_xform->spi;
2484                 session->dir = DIR_ENC;
2485                 if (ipsec_xform->tunnel.type ==
2486                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2487                         encap_pdb.ip_hdr_len = sizeof(struct ip);
2488                         ip4_hdr.ip_v = IPVERSION;
2489                         ip4_hdr.ip_hl = 5;
2490                         ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2491                         ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2492                         ip4_hdr.ip_id = 0;
2493                         ip4_hdr.ip_off = 0;
2494                         ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2495                         ip4_hdr.ip_p = IPPROTO_ESP;
2496                         ip4_hdr.ip_sum = 0;
2497                         ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2498                         ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2499                         ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2500                                         &ip4_hdr, sizeof(struct ip));
2501                         hdr = (uint8_t *)&ip4_hdr;
2502                 } else if (ipsec_xform->tunnel.type ==
2503                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2504                         ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2505                                 DPAA2_IPv6_DEFAULT_VTC_FLOW |
2506                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2507                                         RTE_IPV6_HDR_TC_SHIFT) &
2508                                         RTE_IPV6_HDR_TC_MASK) |
2509                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2510                                         RTE_IPV6_HDR_FL_SHIFT) &
2511                                         RTE_IPV6_HDR_FL_MASK));
2512                         /* Payload length will be updated by HW */
2513                         ip6_hdr.payload_len = 0;
2514                         ip6_hdr.hop_limits =
2515                                         ipsec_xform->tunnel.ipv6.hlimit;
2516                         ip6_hdr.proto = (ipsec_xform->proto ==
2517                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2518                                         IPPROTO_ESP : IPPROTO_AH;
2519                         memcpy(&ip6_hdr.src_addr,
2520                                 &ipsec_xform->tunnel.ipv6.src_addr, 16);
2521                         memcpy(&ip6_hdr.dst_addr,
2522                                 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2523                         encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2524                         hdr = (uint8_t *)&ip6_hdr;
2525                 }
2526
2527                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2528                                 1, 0, SHR_SERIAL, &encap_pdb,
2529                                 hdr, &cipherdata, &authdata);
2530         } else if (ipsec_xform->direction ==
2531                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2532                 flc->dhr = SEC_FLC_DHR_INBOUND;
2533                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2534                 decap_pdb.options = sizeof(struct ip) << 16;
2535                 if (ipsec_xform->options.esn)
2536                         decap_pdb.options |= PDBOPTS_ESP_ESN;
2537                 decap_pdb.options = (ipsec_xform->tunnel.type ==
2538                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2539                                 sizeof(struct ip) << 16 :
2540                                 sizeof(struct rte_ipv6_hdr) << 16;
2541                 session->dir = DIR_DEC;
2542                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2543                                 1, 0, SHR_SERIAL,
2544                                 &decap_pdb, &cipherdata, &authdata);
2545         } else
2546                 goto out;
2547
2548         if (bufsize < 0) {
2549                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2550                 goto out;
2551         }
2552
2553         flc->word1_sdl = (uint8_t)bufsize;
2554
2555         /* Enable the stashing control bit */
2556         DPAA2_SET_FLC_RSC(flc);
2557         flc->word2_rflc_31_0 = lower_32_bits(
2558                         (size_t)&(((struct dpaa2_sec_qp *)
2559                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2560         flc->word3_rflc_63_32 = upper_32_bits(
2561                         (size_t)&(((struct dpaa2_sec_qp *)
2562                         dev->data->queue_pairs[0])->rx_vq));
2563
2564         /* Set EWS bit i.e. enable write-safe */
2565         DPAA2_SET_FLC_EWS(flc);
2566         /* Set BS = 1 i.e reuse input buffers as output buffers */
2567         DPAA2_SET_FLC_REUSE_BS(flc);
2568         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2569         DPAA2_SET_FLC_REUSE_FF(flc);
2570
2571         session->ctxt = priv;
2572
2573         return 0;
2574 out:
2575         rte_free(session->auth_key.data);
2576         rte_free(session->cipher_key.data);
2577         rte_free(priv);
2578         return ret;
2579 }
2580
2581 static int
2582 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
2583                            struct rte_security_session_conf *conf,
2584                            void *sess)
2585 {
2586         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2587         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2588         struct rte_crypto_auth_xform *auth_xform = NULL;
2589         struct rte_crypto_cipher_xform *cipher_xform;
2590         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2591         struct ctxt_priv *priv;
2592         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2593         struct alginfo authdata, cipherdata;
2594         struct alginfo *p_authdata = NULL;
2595         int bufsize = -1;
2596         struct sec_flow_context *flc;
2597 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2598         int swap = true;
2599 #else
2600         int swap = false;
2601 #endif
2602
2603         PMD_INIT_FUNC_TRACE();
2604
2605         memset(session, 0, sizeof(dpaa2_sec_session));
2606
2607         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2608                                 sizeof(struct ctxt_priv) +
2609                                 sizeof(struct sec_flc_desc),
2610                                 RTE_CACHE_LINE_SIZE);
2611
2612         if (priv == NULL) {
2613                 DPAA2_SEC_ERR("No memory for priv CTXT");
2614                 return -ENOMEM;
2615         }
2616
2617         priv->fle_pool = dev_priv->fle_pool;
2618         flc = &priv->flc_desc[0].flc;
2619
2620         /* find xfrm types */
2621         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2622                 cipher_xform = &xform->cipher;
2623         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2624                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2625                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2626                 cipher_xform = &xform->cipher;
2627                 auth_xform = &xform->next->auth;
2628         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2629                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2630                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2631                 cipher_xform = &xform->next->cipher;
2632                 auth_xform = &xform->auth;
2633         } else {
2634                 DPAA2_SEC_ERR("Invalid crypto type");
2635                 return -EINVAL;
2636         }
2637
2638         session->ctxt_type = DPAA2_SEC_PDCP;
2639         if (cipher_xform) {
2640                 session->cipher_key.data = rte_zmalloc(NULL,
2641                                                cipher_xform->key.length,
2642                                                RTE_CACHE_LINE_SIZE);
2643                 if (session->cipher_key.data == NULL &&
2644                                 cipher_xform->key.length > 0) {
2645                         DPAA2_SEC_ERR("No Memory for cipher key");
2646                         rte_free(priv);
2647                         return -ENOMEM;
2648                 }
2649                 session->cipher_key.length = cipher_xform->key.length;
2650                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2651                         cipher_xform->key.length);
2652                 session->dir =
2653                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2654                                         DIR_ENC : DIR_DEC;
2655                 session->cipher_alg = cipher_xform->algo;
2656         } else {
2657                 session->cipher_key.data = NULL;
2658                 session->cipher_key.length = 0;
2659                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2660                 session->dir = DIR_ENC;
2661         }
2662
2663         session->pdcp.domain = pdcp_xform->domain;
2664         session->pdcp.bearer = pdcp_xform->bearer;
2665         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2666         session->pdcp.sn_size = pdcp_xform->sn_size;
2667 #ifdef ENABLE_HFN_OVERRIDE
2668         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
2669 #endif
2670         session->pdcp.hfn = pdcp_xform->hfn;
2671         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2672
2673         cipherdata.key = (size_t)session->cipher_key.data;
2674         cipherdata.keylen = session->cipher_key.length;
2675         cipherdata.key_enc_flags = 0;
2676         cipherdata.key_type = RTA_DATA_IMM;
2677
2678         switch (session->cipher_alg) {
2679         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2680                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
2681                 break;
2682         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2683                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
2684                 break;
2685         case RTE_CRYPTO_CIPHER_AES_CTR:
2686                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
2687                 break;
2688         case RTE_CRYPTO_CIPHER_NULL:
2689                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
2690                 break;
2691         default:
2692                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2693                               session->cipher_alg);
2694                 goto out;
2695         }
2696
2697         if (auth_xform) {
2698                 session->auth_key.data = rte_zmalloc(NULL,
2699                                                      auth_xform->key.length,
2700                                                      RTE_CACHE_LINE_SIZE);
2701                 if (!session->auth_key.data &&
2702                     auth_xform->key.length > 0) {
2703                         DPAA2_SEC_ERR("No Memory for auth key");
2704                         rte_free(session->cipher_key.data);
2705                         rte_free(priv);
2706                         return -ENOMEM;
2707                 }
2708                 session->auth_key.length = auth_xform->key.length;
2709                 memcpy(session->auth_key.data, auth_xform->key.data,
2710                        auth_xform->key.length);
2711                 session->auth_alg = auth_xform->algo;
2712         } else {
2713                 session->auth_key.data = NULL;
2714                 session->auth_key.length = 0;
2715                 session->auth_alg = 0;
2716         }
2717         authdata.key = (size_t)session->auth_key.data;
2718         authdata.keylen = session->auth_key.length;
2719         authdata.key_enc_flags = 0;
2720         authdata.key_type = RTA_DATA_IMM;
2721
2722         if (session->auth_alg) {
2723                 switch (session->auth_alg) {
2724                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2725                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
2726                         break;
2727                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2728                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
2729                         break;
2730                 case RTE_CRYPTO_AUTH_AES_CMAC:
2731                         authdata.algtype = PDCP_AUTH_TYPE_AES;
2732                         break;
2733                 case RTE_CRYPTO_AUTH_NULL:
2734                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
2735                         break;
2736                 default:
2737                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2738                                       session->auth_alg);
2739                         goto out;
2740                 }
2741
2742                 p_authdata = &authdata;
2743         } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2744                 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
2745                 goto out;
2746         }
2747
2748         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2749                 if (session->dir == DIR_ENC)
2750                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
2751                                         priv->flc_desc[0].desc, 1, swap,
2752                                         pdcp_xform->hfn,
2753                                         session->pdcp.sn_size,
2754                                         pdcp_xform->bearer,
2755                                         pdcp_xform->pkt_dir,
2756                                         pdcp_xform->hfn_threshold,
2757                                         &cipherdata, &authdata,
2758                                         0);
2759                 else if (session->dir == DIR_DEC)
2760                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
2761                                         priv->flc_desc[0].desc, 1, swap,
2762                                         pdcp_xform->hfn,
2763                                         session->pdcp.sn_size,
2764                                         pdcp_xform->bearer,
2765                                         pdcp_xform->pkt_dir,
2766                                         pdcp_xform->hfn_threshold,
2767                                         &cipherdata, &authdata,
2768                                         0);
2769         } else {
2770                 if (session->dir == DIR_ENC)
2771                         bufsize = cnstr_shdsc_pdcp_u_plane_encap(
2772                                         priv->flc_desc[0].desc, 1, swap,
2773                                         session->pdcp.sn_size,
2774                                         pdcp_xform->hfn,
2775                                         pdcp_xform->bearer,
2776                                         pdcp_xform->pkt_dir,
2777                                         pdcp_xform->hfn_threshold,
2778                                         &cipherdata, p_authdata, 0);
2779                 else if (session->dir == DIR_DEC)
2780                         bufsize = cnstr_shdsc_pdcp_u_plane_decap(
2781                                         priv->flc_desc[0].desc, 1, swap,
2782                                         session->pdcp.sn_size,
2783                                         pdcp_xform->hfn,
2784                                         pdcp_xform->bearer,
2785                                         pdcp_xform->pkt_dir,
2786                                         pdcp_xform->hfn_threshold,
2787                                         &cipherdata, p_authdata, 0);
2788         }
2789
2790         if (bufsize < 0) {
2791                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2792                 goto out;
2793         }
2794
2795         /* Enable the stashing control bit */
2796         DPAA2_SET_FLC_RSC(flc);
2797         flc->word2_rflc_31_0 = lower_32_bits(
2798                         (size_t)&(((struct dpaa2_sec_qp *)
2799                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
2800         flc->word3_rflc_63_32 = upper_32_bits(
2801                         (size_t)&(((struct dpaa2_sec_qp *)
2802                         dev->data->queue_pairs[0])->rx_vq));
2803
2804         flc->word1_sdl = (uint8_t)bufsize;
2805
2806         /* Set EWS bit i.e. enable write-safe */
2807         DPAA2_SET_FLC_EWS(flc);
2808         /* Set BS = 1 i.e reuse input buffers as output buffers */
2809         DPAA2_SET_FLC_REUSE_BS(flc);
2810         /* Set FF = 10; reuse input buffers if they provide sufficient space */
2811         DPAA2_SET_FLC_REUSE_FF(flc);
2812
2813         session->ctxt = priv;
2814
2815         return 0;
2816 out:
2817         rte_free(session->auth_key.data);
2818         rte_free(session->cipher_key.data);
2819         rte_free(priv);
2820         return -1;
2821 }
2822
2823 static int
2824 dpaa2_sec_security_session_create(void *dev,
2825                                   struct rte_security_session_conf *conf,
2826                                   struct rte_security_session *sess,
2827                                   struct rte_mempool *mempool)
2828 {
2829         void *sess_private_data;
2830         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2831         int ret;
2832
2833         if (rte_mempool_get(mempool, &sess_private_data)) {
2834                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2835                 return -ENOMEM;
2836         }
2837
2838         switch (conf->protocol) {
2839         case RTE_SECURITY_PROTOCOL_IPSEC:
2840                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2841                                 sess_private_data);
2842                 break;
2843         case RTE_SECURITY_PROTOCOL_MACSEC:
2844                 return -ENOTSUP;
2845         case RTE_SECURITY_PROTOCOL_PDCP:
2846                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
2847                                 sess_private_data);
2848                 break;
2849         default:
2850                 return -EINVAL;
2851         }
2852         if (ret != 0) {
2853                 DPAA2_SEC_ERR("Failed to configure session parameters");
2854                 /* Return session to mempool */
2855                 rte_mempool_put(mempool, sess_private_data);
2856                 return ret;
2857         }
2858
2859         set_sec_session_private_data(sess, sess_private_data);
2860
2861         return ret;
2862 }
2863
2864 /** Clear the memory of session so it doesn't leave key material behind */
2865 static int
2866 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2867                 struct rte_security_session *sess)
2868 {
2869         PMD_INIT_FUNC_TRACE();
2870         void *sess_priv = get_sec_session_private_data(sess);
2871
2872         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2873
2874         if (sess_priv) {
2875                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2876
2877                 rte_free(s->ctxt);
2878                 rte_free(s->cipher_key.data);
2879                 rte_free(s->auth_key.data);
2880                 memset(s, 0, sizeof(dpaa2_sec_session));
2881                 set_sec_session_private_data(sess, NULL);
2882                 rte_mempool_put(sess_mp, sess_priv);
2883         }
2884         return 0;
2885 }
2886
2887 static int
2888 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2889                 struct rte_crypto_sym_xform *xform,
2890                 struct rte_cryptodev_sym_session *sess,
2891                 struct rte_mempool *mempool)
2892 {
2893         void *sess_private_data;
2894         int ret;
2895
2896         if (rte_mempool_get(mempool, &sess_private_data)) {
2897                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2898                 return -ENOMEM;
2899         }
2900
2901         ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2902         if (ret != 0) {
2903                 DPAA2_SEC_ERR("Failed to configure session parameters");
2904                 /* Return session to mempool */
2905                 rte_mempool_put(mempool, sess_private_data);
2906                 return ret;
2907         }
2908
2909         set_sym_session_private_data(sess, dev->driver_id,
2910                 sess_private_data);
2911
2912         return 0;
2913 }
2914
2915 /** Clear the memory of session so it doesn't leave key material behind */
2916 static void
2917 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2918                 struct rte_cryptodev_sym_session *sess)
2919 {
2920         PMD_INIT_FUNC_TRACE();
2921         uint8_t index = dev->driver_id;
2922         void *sess_priv = get_sym_session_private_data(sess, index);
2923         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2924
2925         if (sess_priv) {
2926                 rte_free(s->ctxt);
2927                 rte_free(s->cipher_key.data);
2928                 rte_free(s->auth_key.data);
2929                 memset(s, 0, sizeof(dpaa2_sec_session));
2930                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2931                 set_sym_session_private_data(sess, index, NULL);
2932                 rte_mempool_put(sess_mp, sess_priv);
2933         }
2934 }
2935
2936 static int
2937 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2938                         struct rte_cryptodev_config *config __rte_unused)
2939 {
2940         PMD_INIT_FUNC_TRACE();
2941
2942         return 0;
2943 }
2944
2945 static int
2946 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2947 {
2948         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2949         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2950         struct dpseci_attr attr;
2951         struct dpaa2_queue *dpaa2_q;
2952         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2953                                         dev->data->queue_pairs;
2954         struct dpseci_rx_queue_attr rx_attr;
2955         struct dpseci_tx_queue_attr tx_attr;
2956         int ret, i;
2957
2958         PMD_INIT_FUNC_TRACE();
2959
2960         memset(&attr, 0, sizeof(struct dpseci_attr));
2961
2962         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2963         if (ret) {
2964                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2965                               priv->hw_id);
2966                 goto get_attr_failure;
2967         }
2968         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2969         if (ret) {
2970                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2971                 goto get_attr_failure;
2972         }
2973         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2974                 dpaa2_q = &qp[i]->rx_vq;
2975                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2976                                     &rx_attr);
2977                 dpaa2_q->fqid = rx_attr.fqid;
2978                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2979         }
2980         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2981                 dpaa2_q = &qp[i]->tx_vq;
2982                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2983                                     &tx_attr);
2984                 dpaa2_q->fqid = tx_attr.fqid;
2985                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2986         }
2987
2988         return 0;
2989 get_attr_failure:
2990         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2991         return -1;
2992 }
2993
2994 static void
2995 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2996 {
2997         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2998         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2999         int ret;
3000
3001         PMD_INIT_FUNC_TRACE();
3002
3003         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3004         if (ret) {
3005                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3006                              priv->hw_id);
3007                 return;
3008         }
3009
3010         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3011         if (ret < 0) {
3012                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3013                 return;
3014         }
3015 }
3016
3017 static int
3018 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3019 {
3020         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3021         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3022         int ret;
3023
3024         PMD_INIT_FUNC_TRACE();
3025
3026         /* Function is reverse of dpaa2_sec_dev_init.
3027          * It does the following:
3028          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3029          * 2. Close the DPSECI device
3030          * 3. Free the allocated resources.
3031          */
3032
3033         /*Close the device at underlying layer*/
3034         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3035         if (ret) {
3036                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3037                 return -1;
3038         }
3039
3040         /*Free the allocated memory for ethernet private data and dpseci*/
3041         priv->hw = NULL;
3042         rte_free(dpseci);
3043
3044         return 0;
3045 }
3046
3047 static void
3048 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3049                         struct rte_cryptodev_info *info)
3050 {
3051         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3052
3053         PMD_INIT_FUNC_TRACE();
3054         if (info != NULL) {
3055                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3056                 info->feature_flags = dev->feature_flags;
3057                 info->capabilities = dpaa2_sec_capabilities;
3058                 /* No limit of number of sessions */
3059                 info->sym.max_nb_sessions = 0;
3060                 info->driver_id = cryptodev_driver_id;
3061         }
3062 }
3063
3064 static
3065 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3066                          struct rte_cryptodev_stats *stats)
3067 {
3068         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3069         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3070         struct dpseci_sec_counters counters = {0};
3071         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3072                                         dev->data->queue_pairs;
3073         int ret, i;
3074
3075         PMD_INIT_FUNC_TRACE();
3076         if (stats == NULL) {
3077                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3078                 return;
3079         }
3080         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3081                 if (qp[i] == NULL) {
3082                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3083                         continue;
3084                 }
3085
3086                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3087                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3088                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3089                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3090         }
3091
3092         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
3093                                       &counters);
3094         if (ret) {
3095                 DPAA2_SEC_ERR("SEC counters failed");
3096         } else {
3097                 DPAA2_SEC_INFO("dpseci hardware stats:"
3098                             "\n\tNum of Requests Dequeued = %" PRIu64
3099                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3100                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3101                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3102                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3103                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3104                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3105                             counters.dequeued_requests,
3106                             counters.ob_enc_requests,
3107                             counters.ib_dec_requests,
3108                             counters.ob_enc_bytes,
3109                             counters.ob_prot_bytes,
3110                             counters.ib_dec_bytes,
3111                             counters.ib_valid_bytes);
3112         }
3113 }
3114
3115 static
3116 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3117 {
3118         int i;
3119         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3120                                    (dev->data->queue_pairs);
3121
3122         PMD_INIT_FUNC_TRACE();
3123
3124         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3125                 if (qp[i] == NULL) {
3126                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3127                         continue;
3128                 }
3129                 qp[i]->tx_vq.rx_pkts = 0;
3130                 qp[i]->tx_vq.tx_pkts = 0;
3131                 qp[i]->tx_vq.err_pkts = 0;
3132                 qp[i]->rx_vq.rx_pkts = 0;
3133                 qp[i]->rx_vq.tx_pkts = 0;
3134                 qp[i]->rx_vq.err_pkts = 0;
3135         }
3136 }
3137
3138 static void __attribute__((hot))
3139 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3140                                  const struct qbman_fd *fd,
3141                                  const struct qbman_result *dq,
3142                                  struct dpaa2_queue *rxq,
3143                                  struct rte_event *ev)
3144 {
3145         /* Prefetching mbuf */
3146         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3147                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3148
3149         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3150         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3151
3152         ev->flow_id = rxq->ev.flow_id;
3153         ev->sub_event_type = rxq->ev.sub_event_type;
3154         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3155         ev->op = RTE_EVENT_OP_NEW;
3156         ev->sched_type = rxq->ev.sched_type;
3157         ev->queue_id = rxq->ev.queue_id;
3158         ev->priority = rxq->ev.priority;
3159         ev->event_ptr = sec_fd_to_mbuf(fd);
3160
3161         qbman_swp_dqrr_consume(swp, dq);
3162 }
3163 static void
3164 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
3165                                  const struct qbman_fd *fd,
3166                                  const struct qbman_result *dq,
3167                                  struct dpaa2_queue *rxq,
3168                                  struct rte_event *ev)
3169 {
3170         uint8_t dqrr_index;
3171         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3172         /* Prefetching mbuf */
3173         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3174                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3175
3176         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3177         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3178
3179         ev->flow_id = rxq->ev.flow_id;
3180         ev->sub_event_type = rxq->ev.sub_event_type;
3181         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3182         ev->op = RTE_EVENT_OP_NEW;
3183         ev->sched_type = rxq->ev.sched_type;
3184         ev->queue_id = rxq->ev.queue_id;
3185         ev->priority = rxq->ev.priority;
3186
3187         ev->event_ptr = sec_fd_to_mbuf(fd);
3188         dqrr_index = qbman_get_dqrr_idx(dq);
3189         crypto_op->sym->m_src->seqn = dqrr_index + 1;
3190         DPAA2_PER_LCORE_DQRR_SIZE++;
3191         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3192         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3193 }
3194
3195 int
3196 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3197                 int qp_id,
3198                 uint16_t dpcon_id,
3199                 const struct rte_event *event)
3200 {
3201         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3202         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3203         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3204         struct dpseci_rx_queue_cfg cfg;
3205         int ret;
3206
3207         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3208                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3209         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3210                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3211         else
3212                 return -EINVAL;
3213
3214         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3215         cfg.options = DPSECI_QUEUE_OPT_DEST;
3216         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3217         cfg.dest_cfg.dest_id = dpcon_id;
3218         cfg.dest_cfg.priority = event->priority;
3219
3220         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3221         cfg.user_ctx = (size_t)(qp);
3222         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3223                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3224                 cfg.order_preservation_en = 1;
3225         }
3226         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3227                                   qp_id, &cfg);
3228         if (ret) {
3229                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3230                 return ret;
3231         }
3232
3233         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3234
3235         return 0;
3236 }
3237
3238 int
3239 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3240                         int qp_id)
3241 {
3242         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3243         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3244         struct dpseci_rx_queue_cfg cfg;
3245         int ret;
3246
3247         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3248         cfg.options = DPSECI_QUEUE_OPT_DEST;
3249         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3250
3251         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3252                                   qp_id, &cfg);
3253         if (ret)
3254                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3255
3256         return ret;
3257 }
3258
3259 static struct rte_cryptodev_ops crypto_ops = {
3260         .dev_configure        = dpaa2_sec_dev_configure,
3261         .dev_start            = dpaa2_sec_dev_start,
3262         .dev_stop             = dpaa2_sec_dev_stop,
3263         .dev_close            = dpaa2_sec_dev_close,
3264         .dev_infos_get        = dpaa2_sec_dev_infos_get,
3265         .stats_get            = dpaa2_sec_stats_get,
3266         .stats_reset          = dpaa2_sec_stats_reset,
3267         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3268         .queue_pair_release   = dpaa2_sec_queue_pair_release,
3269         .queue_pair_count     = dpaa2_sec_queue_pair_count,
3270         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3271         .sym_session_configure    = dpaa2_sec_sym_session_configure,
3272         .sym_session_clear        = dpaa2_sec_sym_session_clear,
3273 };
3274
3275 static const struct rte_security_capability *
3276 dpaa2_sec_capabilities_get(void *device __rte_unused)
3277 {
3278         return dpaa2_sec_security_cap;
3279 }
3280
3281 static const struct rte_security_ops dpaa2_sec_security_ops = {
3282         .session_create = dpaa2_sec_security_session_create,
3283         .session_update = NULL,
3284         .session_stats_get = NULL,
3285         .session_destroy = dpaa2_sec_security_session_destroy,
3286         .set_pkt_metadata = NULL,
3287         .capabilities_get = dpaa2_sec_capabilities_get
3288 };
3289
3290 static int
3291 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3292 {
3293         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3294
3295         rte_free(dev->security_ctx);
3296
3297         rte_mempool_free(internals->fle_pool);
3298
3299         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3300                        dev->data->name, rte_socket_id());
3301
3302         return 0;
3303 }
3304
3305 static int
3306 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3307 {
3308         struct dpaa2_sec_dev_private *internals;
3309         struct rte_device *dev = cryptodev->device;
3310         struct rte_dpaa2_device *dpaa2_dev;
3311         struct rte_security_ctx *security_instance;
3312         struct fsl_mc_io *dpseci;
3313         uint16_t token;
3314         struct dpseci_attr attr;
3315         int retcode, hw_id;
3316         char str[30];
3317
3318         PMD_INIT_FUNC_TRACE();
3319         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3320         if (dpaa2_dev == NULL) {
3321                 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3322                 return -1;
3323         }
3324         hw_id = dpaa2_dev->object_id;
3325
3326         cryptodev->driver_id = cryptodev_driver_id;
3327         cryptodev->dev_ops = &crypto_ops;
3328
3329         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3330         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3331         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3332                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3333                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3334                         RTE_CRYPTODEV_FF_SECURITY |
3335                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3336                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3337                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3338                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3339                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3340
3341         internals = cryptodev->data->dev_private;
3342
3343         /*
3344          * For secondary processes, we don't initialise any further as primary
3345          * has already done this work. Only check we don't need a different
3346          * RX function
3347          */
3348         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3349                 DPAA2_SEC_DEBUG("Device already init by primary process");
3350                 return 0;
3351         }
3352
3353         /* Initialize security_ctx only for primary process*/
3354         security_instance = rte_malloc("rte_security_instances_ops",
3355                                 sizeof(struct rte_security_ctx), 0);
3356         if (security_instance == NULL)
3357                 return -ENOMEM;
3358         security_instance->device = (void *)cryptodev;
3359         security_instance->ops = &dpaa2_sec_security_ops;
3360         security_instance->sess_cnt = 0;
3361         cryptodev->security_ctx = security_instance;
3362
3363         /*Open the rte device via MC and save the handle for further use*/
3364         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3365                                 sizeof(struct fsl_mc_io), 0);
3366         if (!dpseci) {
3367                 DPAA2_SEC_ERR(
3368                         "Error in allocating the memory for dpsec object");
3369                 return -1;
3370         }
3371         dpseci->regs = rte_mcp_ptr_list[0];
3372
3373         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3374         if (retcode != 0) {
3375                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3376                               retcode);
3377                 goto init_error;
3378         }
3379         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3380         if (retcode != 0) {
3381                 DPAA2_SEC_ERR(
3382                              "Cannot get dpsec device attributed: Error = %x",
3383                              retcode);
3384                 goto init_error;
3385         }
3386         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3387                         "dpsec-%u", hw_id);
3388
3389         internals->max_nb_queue_pairs = attr.num_tx_queues;
3390         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3391         internals->hw = dpseci;
3392         internals->token = token;
3393
3394         snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3395                         getpid(), cryptodev->data->dev_id);
3396         internals->fle_pool = rte_mempool_create((const char *)str,
3397                         FLE_POOL_NUM_BUFS,
3398                         FLE_POOL_BUF_SIZE,
3399                         FLE_POOL_CACHE_SIZE, 0,
3400                         NULL, NULL, NULL, NULL,
3401                         SOCKET_ID_ANY, 0);
3402         if (!internals->fle_pool) {
3403                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3404                 goto init_error;
3405         }
3406
3407         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3408         return 0;
3409
3410 init_error:
3411         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3412
3413         /* dpaa2_sec_uninit(crypto_dev_name); */
3414         return -EFAULT;
3415 }
3416
3417 static int
3418 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3419                           struct rte_dpaa2_device *dpaa2_dev)
3420 {
3421         struct rte_cryptodev *cryptodev;
3422         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3423
3424         int retval;
3425
3426         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3427                         dpaa2_dev->object_id);
3428
3429         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3430         if (cryptodev == NULL)
3431                 return -ENOMEM;
3432
3433         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3434                 cryptodev->data->dev_private = rte_zmalloc_socket(
3435                                         "cryptodev private structure",
3436                                         sizeof(struct dpaa2_sec_dev_private),
3437                                         RTE_CACHE_LINE_SIZE,
3438                                         rte_socket_id());
3439
3440                 if (cryptodev->data->dev_private == NULL)
3441                         rte_panic("Cannot allocate memzone for private "
3442                                   "device data");
3443         }
3444
3445         dpaa2_dev->cryptodev = cryptodev;
3446         cryptodev->device = &dpaa2_dev->device;
3447
3448         /* init user callbacks */
3449         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3450
3451         /* Invoke PMD device initialization function */
3452         retval = dpaa2_sec_dev_init(cryptodev);
3453         if (retval == 0)
3454                 return 0;
3455
3456         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3457                 rte_free(cryptodev->data->dev_private);
3458
3459         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3460
3461         return -ENXIO;
3462 }
3463
3464 static int
3465 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
3466 {
3467         struct rte_cryptodev *cryptodev;
3468         int ret;
3469
3470         cryptodev = dpaa2_dev->cryptodev;
3471         if (cryptodev == NULL)
3472                 return -ENODEV;
3473
3474         ret = dpaa2_sec_uninit(cryptodev);
3475         if (ret)
3476                 return ret;
3477
3478         return rte_cryptodev_pmd_destroy(cryptodev);
3479 }
3480
3481 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
3482         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
3483         .drv_type = DPAA2_CRYPTO,
3484         .driver = {
3485                 .name = "DPAA2 SEC PMD"
3486         },
3487         .probe = cryptodev_dpaa2_sec_probe,
3488         .remove = cryptodev_dpaa2_sec_remove,
3489 };
3490
3491 static struct cryptodev_driver dpaa2_sec_crypto_drv;
3492
3493 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
3494 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
3495                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
3496
3497 RTE_INIT(dpaa2_sec_init_log)
3498 {
3499         /* Bus level logs */
3500         dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
3501         if (dpaa2_logtype_sec >= 0)
3502                 rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
3503 }