crypto/dpaa2_sec: create fle pool per queue pair
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2022 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 #include <rte_hexdump.h>
32
33 #include "dpaa2_sec_priv.h"
34 #include "dpaa2_sec_event.h"
35 #include "dpaa2_sec_logs.h"
36
37 /* RTA header files */
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 #include <desc/algo.h>
42
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44  * a pointer to the shared descriptor
45  */
46 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID           0x1957
48 #define FSL_DEVICE_ID           0x410
49 #define FSL_SUBSYSTEM_SEC       1
50 #define FSL_MC_DPSECI_DEVID     3
51
52 #define NO_PREFETCH 0
53
54 #define DRIVER_DUMP_MODE "drv_dump_mode"
55 #define DRIVER_STRICT_ORDER "drv_strict_order"
56
57 /* DPAA2_SEC_DP_DUMP levels */
58 enum dpaa2_sec_dump_levels {
59         DPAA2_SEC_DP_NO_DUMP,
60         DPAA2_SEC_DP_ERR_DUMP,
61         DPAA2_SEC_DP_FULL_DUMP
62 };
63
64 uint8_t cryptodev_driver_id;
65 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
66
67 static inline void
68 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
69 {
70         struct qbman_fle *fle;
71         struct rte_crypto_op *op;
72
73 #ifdef RTE_LIB_SECURITY
74         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
75                 return;
76 #endif
77         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
78         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
79         /* free the fle memory */
80         if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
81                 rte_mempool_put(qp->fle_pool, (void *)(fle-1));
82         else
83                 rte_free((void *)(fle-1));
84 }
85
86 #ifdef RTE_LIB_SECURITY
87 static inline int
88 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
89                            struct rte_crypto_op *op,
90                            struct qbman_fd *fd, uint16_t bpid)
91 {
92         struct rte_crypto_sym_op *sym_op = op->sym;
93         struct ctxt_priv *priv = sess->ctxt;
94         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
95         struct sec_flow_context *flc;
96         struct rte_mbuf *mbuf;
97         uint32_t in_len = 0, out_len = 0;
98
99         if (sym_op->m_dst)
100                 mbuf = sym_op->m_dst;
101         else
102                 mbuf = sym_op->m_src;
103
104         /* first FLE entry used to store mbuf and session ctxt */
105         fle = (struct qbman_fle *)rte_malloc(NULL,
106                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
107                         RTE_CACHE_LINE_SIZE);
108         if (unlikely(!fle)) {
109                 DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
110                 return -ENOMEM;
111         }
112         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
113         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
114         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
115
116         /* Save the shared descriptor */
117         flc = &priv->flc_desc[0].flc;
118
119         op_fle = fle + 1;
120         ip_fle = fle + 2;
121         sge = fle + 3;
122
123         if (likely(bpid < MAX_BPID)) {
124                 DPAA2_SET_FD_BPID(fd, bpid);
125                 DPAA2_SET_FLE_BPID(op_fle, bpid);
126                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
127         } else {
128                 DPAA2_SET_FD_IVP(fd);
129                 DPAA2_SET_FLE_IVP(op_fle);
130                 DPAA2_SET_FLE_IVP(ip_fle);
131         }
132
133         /* Configure FD as a FRAME LIST */
134         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
135         DPAA2_SET_FD_COMPOUND_FMT(fd);
136         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
137
138         /* Configure Output FLE with Scatter/Gather Entry */
139         DPAA2_SET_FLE_SG_EXT(op_fle);
140         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
141
142         /* Configure Output SGE for Encap/Decap */
143         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
144         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
145         /* o/p segs */
146         while (mbuf->next) {
147                 sge->length = mbuf->data_len;
148                 out_len += sge->length;
149                 sge++;
150                 mbuf = mbuf->next;
151                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153         }
154         /* using buf_len for last buf - so that extra data can be added */
155         sge->length = mbuf->buf_len - mbuf->data_off;
156         out_len += sge->length;
157
158         DPAA2_SET_FLE_FIN(sge);
159         op_fle->length = out_len;
160
161         sge++;
162         mbuf = sym_op->m_src;
163
164         /* Configure Input FLE with Scatter/Gather Entry */
165         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
166         DPAA2_SET_FLE_SG_EXT(ip_fle);
167         DPAA2_SET_FLE_FIN(ip_fle);
168
169         /* Configure input SGE for Encap/Decap */
170         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
171         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
172         sge->length = mbuf->data_len;
173         in_len += sge->length;
174
175         mbuf = mbuf->next;
176         /* i/p segs */
177         while (mbuf) {
178                 sge++;
179                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
180                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
181                 sge->length = mbuf->data_len;
182                 in_len += sge->length;
183                 mbuf = mbuf->next;
184         }
185         ip_fle->length = in_len;
186         DPAA2_SET_FLE_FIN(sge);
187
188         /* In case of PDCP, per packet HFN is stored in
189          * mbuf priv after sym_op.
190          */
191         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
192                 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
193                                         sess->pdcp.hfn_ovd_offset);
194                 /*enable HFN override override */
195                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
196                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
197                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
198         }
199         DPAA2_SET_FD_LEN(fd, ip_fle->length);
200
201         return 0;
202 }
203
204 static inline int
205 build_proto_compound_fd(dpaa2_sec_session *sess,
206                struct rte_crypto_op *op,
207                struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
208 {
209         struct rte_crypto_sym_op *sym_op = op->sym;
210         struct ctxt_priv *priv = sess->ctxt;
211         struct qbman_fle *fle, *ip_fle, *op_fle;
212         struct sec_flow_context *flc;
213         struct rte_mbuf *src_mbuf = sym_op->m_src;
214         struct rte_mbuf *dst_mbuf = sym_op->m_dst;
215         int retval;
216
217         if (!dst_mbuf)
218                 dst_mbuf = src_mbuf;
219
220         /* Save the shared descriptor */
221         flc = &priv->flc_desc[0].flc;
222
223         /* we are using the first FLE entry to store Mbuf */
224         retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
225         if (retval) {
226                 DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
227                 return -ENOMEM;
228         }
229         memset(fle, 0, FLE_POOL_BUF_SIZE);
230         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
231         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
232
233         op_fle = fle + 1;
234         ip_fle = fle + 2;
235
236         if (likely(bpid < MAX_BPID)) {
237                 DPAA2_SET_FD_BPID(fd, bpid);
238                 DPAA2_SET_FLE_BPID(op_fle, bpid);
239                 DPAA2_SET_FLE_BPID(ip_fle, bpid);
240         } else {
241                 DPAA2_SET_FD_IVP(fd);
242                 DPAA2_SET_FLE_IVP(op_fle);
243                 DPAA2_SET_FLE_IVP(ip_fle);
244         }
245
246         /* Configure FD as a FRAME LIST */
247         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
248         DPAA2_SET_FD_COMPOUND_FMT(fd);
249         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
250
251         /* Configure Output FLE with dst mbuf data  */
252         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
253         DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
254         DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
255
256         /* Configure Input FLE with src mbuf data */
257         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
258         DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
259         DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
260
261         DPAA2_SET_FD_LEN(fd, ip_fle->length);
262         DPAA2_SET_FLE_FIN(ip_fle);
263
264         /* In case of PDCP, per packet HFN is stored in
265          * mbuf priv after sym_op.
266          */
267         if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
268                 uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
269                                         sess->pdcp.hfn_ovd_offset);
270                 /*enable HFN override override */
271                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
272                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
273                 DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
274         }
275
276         return 0;
277
278 }
279
280 static inline int
281 build_proto_fd(dpaa2_sec_session *sess,
282                struct rte_crypto_op *op,
283                struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
284 {
285         struct rte_crypto_sym_op *sym_op = op->sym;
286         if (sym_op->m_dst)
287                 return build_proto_compound_fd(sess, op, fd, bpid, qp);
288
289         struct ctxt_priv *priv = sess->ctxt;
290         struct sec_flow_context *flc;
291         struct rte_mbuf *mbuf = sym_op->m_src;
292
293         if (likely(bpid < MAX_BPID))
294                 DPAA2_SET_FD_BPID(fd, bpid);
295         else
296                 DPAA2_SET_FD_IVP(fd);
297
298         /* Save the shared descriptor */
299         flc = &priv->flc_desc[0].flc;
300
301         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
302         DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
303         DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
304         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
305
306         /* save physical address of mbuf */
307         op->sym->aead.digest.phys_addr = mbuf->buf_iova;
308         mbuf->buf_iova = (size_t)op;
309
310         return 0;
311 }
312 #endif
313
314 static inline int
315 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
316                  struct rte_crypto_op *op,
317                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
318 {
319         struct rte_crypto_sym_op *sym_op = op->sym;
320         struct ctxt_priv *priv = sess->ctxt;
321         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
322         struct sec_flow_context *flc;
323         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
324         int icv_len = sess->digest_length;
325         uint8_t *old_icv;
326         struct rte_mbuf *mbuf;
327         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
328                         sess->iv.offset);
329
330         if (sym_op->m_dst)
331                 mbuf = sym_op->m_dst;
332         else
333                 mbuf = sym_op->m_src;
334
335         /* first FLE entry used to store mbuf and session ctxt */
336         fle = (struct qbman_fle *)rte_malloc(NULL,
337                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
338                         RTE_CACHE_LINE_SIZE);
339         if (unlikely(!fle)) {
340                 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
341                 return -ENOMEM;
342         }
343         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
344         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
345         DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
346
347         op_fle = fle + 1;
348         ip_fle = fle + 2;
349         sge = fle + 3;
350
351         /* Save the shared descriptor */
352         flc = &priv->flc_desc[0].flc;
353
354         /* Configure FD as a FRAME LIST */
355         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
356         DPAA2_SET_FD_COMPOUND_FMT(fd);
357         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
358
359         DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
360                    "iv-len=%d data_off: 0x%x\n",
361                    sym_op->aead.data.offset,
362                    sym_op->aead.data.length,
363                    sess->digest_length,
364                    sess->iv.length,
365                    sym_op->m_src->data_off);
366
367         /* Configure Output FLE with Scatter/Gather Entry */
368         DPAA2_SET_FLE_SG_EXT(op_fle);
369         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
370
371         if (auth_only_len)
372                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
373
374         op_fle->length = (sess->dir == DIR_ENC) ?
375                         (sym_op->aead.data.length + icv_len) :
376                         sym_op->aead.data.length;
377
378         /* Configure Output SGE for Encap/Decap */
379         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
380         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
381         sge->length = mbuf->data_len - sym_op->aead.data.offset;
382
383         mbuf = mbuf->next;
384         /* o/p segs */
385         while (mbuf) {
386                 sge++;
387                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
388                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
389                 sge->length = mbuf->data_len;
390                 mbuf = mbuf->next;
391         }
392         sge->length -= icv_len;
393
394         if (sess->dir == DIR_ENC) {
395                 sge++;
396                 DPAA2_SET_FLE_ADDR(sge,
397                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
398                 sge->length = icv_len;
399         }
400         DPAA2_SET_FLE_FIN(sge);
401
402         sge++;
403         mbuf = sym_op->m_src;
404
405         /* Configure Input FLE with Scatter/Gather Entry */
406         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
407         DPAA2_SET_FLE_SG_EXT(ip_fle);
408         DPAA2_SET_FLE_FIN(ip_fle);
409         ip_fle->length = (sess->dir == DIR_ENC) ?
410                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
411                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
412                  icv_len);
413
414         /* Configure Input SGE for Encap/Decap */
415         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
416         sge->length = sess->iv.length;
417
418         sge++;
419         if (auth_only_len) {
420                 DPAA2_SET_FLE_ADDR(sge,
421                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
422                 sge->length = auth_only_len;
423                 sge++;
424         }
425
426         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
427         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
428                                 mbuf->data_off);
429         sge->length = mbuf->data_len - sym_op->aead.data.offset;
430
431         mbuf = mbuf->next;
432         /* i/p segs */
433         while (mbuf) {
434                 sge++;
435                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
436                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
437                 sge->length = mbuf->data_len;
438                 mbuf = mbuf->next;
439         }
440
441         if (sess->dir == DIR_DEC) {
442                 sge++;
443                 old_icv = (uint8_t *)(sge + 1);
444                 memcpy(old_icv, sym_op->aead.digest.data, icv_len);
445                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
446                 sge->length = icv_len;
447         }
448
449         DPAA2_SET_FLE_FIN(sge);
450         if (auth_only_len) {
451                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
452                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
453         }
454         DPAA2_SET_FD_LEN(fd, ip_fle->length);
455
456         return 0;
457 }
458
459 static inline int
460 build_authenc_gcm_fd(dpaa2_sec_session *sess,
461                      struct rte_crypto_op *op,
462                      struct qbman_fd *fd, uint16_t bpid,
463                      struct dpaa2_sec_qp *qp)
464 {
465         struct rte_crypto_sym_op *sym_op = op->sym;
466         struct ctxt_priv *priv = sess->ctxt;
467         struct qbman_fle *fle, *sge;
468         struct sec_flow_context *flc;
469         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
470         int icv_len = sess->digest_length, retval;
471         uint8_t *old_icv;
472         struct rte_mbuf *dst;
473         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
474                         sess->iv.offset);
475
476         if (sym_op->m_dst)
477                 dst = sym_op->m_dst;
478         else
479                 dst = sym_op->m_src;
480
481         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
482          * Currently we donot know which FLE has the mbuf stored.
483          * So while retreiving we can go back 1 FLE from the FD -ADDR
484          * to get the MBUF Addr from the previous FLE.
485          * We can have a better approach to use the inline Mbuf
486          */
487         retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
488         if (retval) {
489                 DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
490                 return -ENOMEM;
491         }
492         memset(fle, 0, FLE_POOL_BUF_SIZE);
493         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
494         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
495         fle = fle + 1;
496         sge = fle + 2;
497         if (likely(bpid < MAX_BPID)) {
498                 DPAA2_SET_FD_BPID(fd, bpid);
499                 DPAA2_SET_FLE_BPID(fle, bpid);
500                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
501                 DPAA2_SET_FLE_BPID(sge, bpid);
502                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
503                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
504                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
505         } else {
506                 DPAA2_SET_FD_IVP(fd);
507                 DPAA2_SET_FLE_IVP(fle);
508                 DPAA2_SET_FLE_IVP((fle + 1));
509                 DPAA2_SET_FLE_IVP(sge);
510                 DPAA2_SET_FLE_IVP((sge + 1));
511                 DPAA2_SET_FLE_IVP((sge + 2));
512                 DPAA2_SET_FLE_IVP((sge + 3));
513         }
514
515         /* Save the shared descriptor */
516         flc = &priv->flc_desc[0].flc;
517         /* Configure FD as a FRAME LIST */
518         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
519         DPAA2_SET_FD_COMPOUND_FMT(fd);
520         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
521
522         DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
523                    "iv-len=%d data_off: 0x%x\n",
524                    sym_op->aead.data.offset,
525                    sym_op->aead.data.length,
526                    sess->digest_length,
527                    sess->iv.length,
528                    sym_op->m_src->data_off);
529
530         /* Configure Output FLE with Scatter/Gather Entry */
531         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
532         if (auth_only_len)
533                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
534         fle->length = (sess->dir == DIR_ENC) ?
535                         (sym_op->aead.data.length + icv_len) :
536                         sym_op->aead.data.length;
537
538         DPAA2_SET_FLE_SG_EXT(fle);
539
540         /* Configure Output SGE for Encap/Decap */
541         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
542         DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
543         sge->length = sym_op->aead.data.length;
544
545         if (sess->dir == DIR_ENC) {
546                 sge++;
547                 DPAA2_SET_FLE_ADDR(sge,
548                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
549                 sge->length = sess->digest_length;
550         }
551         DPAA2_SET_FLE_FIN(sge);
552
553         sge++;
554         fle++;
555
556         /* Configure Input FLE with Scatter/Gather Entry */
557         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
558         DPAA2_SET_FLE_SG_EXT(fle);
559         DPAA2_SET_FLE_FIN(fle);
560         fle->length = (sess->dir == DIR_ENC) ?
561                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
562                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
563                  sess->digest_length);
564
565         /* Configure Input SGE for Encap/Decap */
566         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
567         sge->length = sess->iv.length;
568         sge++;
569         if (auth_only_len) {
570                 DPAA2_SET_FLE_ADDR(sge,
571                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
572                 sge->length = auth_only_len;
573                 DPAA2_SET_FLE_BPID(sge, bpid);
574                 sge++;
575         }
576
577         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
578         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
579                                 sym_op->m_src->data_off);
580         sge->length = sym_op->aead.data.length;
581         if (sess->dir == DIR_DEC) {
582                 sge++;
583                 old_icv = (uint8_t *)(sge + 1);
584                 memcpy(old_icv, sym_op->aead.digest.data,
585                        sess->digest_length);
586                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
587                 sge->length = sess->digest_length;
588         }
589         DPAA2_SET_FLE_FIN(sge);
590
591         if (auth_only_len) {
592                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
593                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
594         }
595
596         DPAA2_SET_FD_LEN(fd, fle->length);
597         return 0;
598 }
599
600 static inline int
601 build_authenc_sg_fd(dpaa2_sec_session *sess,
602                  struct rte_crypto_op *op,
603                  struct qbman_fd *fd, __rte_unused uint16_t bpid)
604 {
605         struct rte_crypto_sym_op *sym_op = op->sym;
606         struct ctxt_priv *priv = sess->ctxt;
607         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
608         struct sec_flow_context *flc;
609         uint16_t auth_hdr_len = sym_op->cipher.data.offset -
610                                 sym_op->auth.data.offset;
611         uint16_t auth_tail_len = sym_op->auth.data.length -
612                                 sym_op->cipher.data.length - auth_hdr_len;
613         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
614         int icv_len = sess->digest_length;
615         uint8_t *old_icv;
616         struct rte_mbuf *mbuf;
617         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
618                         sess->iv.offset);
619
620         if (sym_op->m_dst)
621                 mbuf = sym_op->m_dst;
622         else
623                 mbuf = sym_op->m_src;
624
625         /* first FLE entry used to store mbuf and session ctxt */
626         fle = (struct qbman_fle *)rte_malloc(NULL,
627                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
628                         RTE_CACHE_LINE_SIZE);
629         if (unlikely(!fle)) {
630                 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
631                 return -ENOMEM;
632         }
633         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
634         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
635         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
636
637         op_fle = fle + 1;
638         ip_fle = fle + 2;
639         sge = fle + 3;
640
641         /* Save the shared descriptor */
642         flc = &priv->flc_desc[0].flc;
643
644         /* Configure FD as a FRAME LIST */
645         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
646         DPAA2_SET_FD_COMPOUND_FMT(fd);
647         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
648
649         DPAA2_SEC_DP_DEBUG(
650                 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
651                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
652                 sym_op->auth.data.offset,
653                 sym_op->auth.data.length,
654                 sess->digest_length,
655                 sym_op->cipher.data.offset,
656                 sym_op->cipher.data.length,
657                 sess->iv.length,
658                 sym_op->m_src->data_off);
659
660         /* Configure Output FLE with Scatter/Gather Entry */
661         DPAA2_SET_FLE_SG_EXT(op_fle);
662         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
663
664         if (auth_only_len)
665                 DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
666
667         op_fle->length = (sess->dir == DIR_ENC) ?
668                         (sym_op->cipher.data.length + icv_len) :
669                         sym_op->cipher.data.length;
670
671         /* Configure Output SGE for Encap/Decap */
672         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
673         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
674         sge->length = mbuf->data_len - sym_op->auth.data.offset;
675
676         mbuf = mbuf->next;
677         /* o/p segs */
678         while (mbuf) {
679                 sge++;
680                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
681                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
682                 sge->length = mbuf->data_len;
683                 mbuf = mbuf->next;
684         }
685         sge->length -= icv_len;
686
687         if (sess->dir == DIR_ENC) {
688                 sge++;
689                 DPAA2_SET_FLE_ADDR(sge,
690                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
691                 sge->length = icv_len;
692         }
693         DPAA2_SET_FLE_FIN(sge);
694
695         sge++;
696         mbuf = sym_op->m_src;
697
698         /* Configure Input FLE with Scatter/Gather Entry */
699         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
700         DPAA2_SET_FLE_SG_EXT(ip_fle);
701         DPAA2_SET_FLE_FIN(ip_fle);
702         ip_fle->length = (sess->dir == DIR_ENC) ?
703                         (sym_op->auth.data.length + sess->iv.length) :
704                         (sym_op->auth.data.length + sess->iv.length +
705                          icv_len);
706
707         /* Configure Input SGE for Encap/Decap */
708         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
709         sge->length = sess->iv.length;
710
711         sge++;
712         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
713         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
714                                 mbuf->data_off);
715         sge->length = mbuf->data_len - sym_op->auth.data.offset;
716
717         mbuf = mbuf->next;
718         /* i/p segs */
719         while (mbuf) {
720                 sge++;
721                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
723                 sge->length = mbuf->data_len;
724                 mbuf = mbuf->next;
725         }
726         sge->length -= icv_len;
727
728         if (sess->dir == DIR_DEC) {
729                 sge++;
730                 old_icv = (uint8_t *)(sge + 1);
731                 memcpy(old_icv, sym_op->auth.digest.data,
732                        icv_len);
733                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
734                 sge->length = icv_len;
735         }
736
737         DPAA2_SET_FLE_FIN(sge);
738         if (auth_only_len) {
739                 DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
740                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
741         }
742         DPAA2_SET_FD_LEN(fd, ip_fle->length);
743
744         return 0;
745 }
746
747 static inline int
748 build_authenc_fd(dpaa2_sec_session *sess,
749                  struct rte_crypto_op *op,
750                  struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
751 {
752         struct rte_crypto_sym_op *sym_op = op->sym;
753         struct ctxt_priv *priv = sess->ctxt;
754         struct qbman_fle *fle, *sge;
755         struct sec_flow_context *flc;
756         uint16_t auth_hdr_len = sym_op->cipher.data.offset -
757                                 sym_op->auth.data.offset;
758         uint16_t auth_tail_len = sym_op->auth.data.length -
759                                 sym_op->cipher.data.length - auth_hdr_len;
760         uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
761
762         int icv_len = sess->digest_length, retval;
763         uint8_t *old_icv;
764         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
765                         sess->iv.offset);
766         struct rte_mbuf *dst;
767
768         if (sym_op->m_dst)
769                 dst = sym_op->m_dst;
770         else
771                 dst = sym_op->m_src;
772
773         /* we are using the first FLE entry to store Mbuf.
774          * Currently we donot know which FLE has the mbuf stored.
775          * So while retreiving we can go back 1 FLE from the FD -ADDR
776          * to get the MBUF Addr from the previous FLE.
777          * We can have a better approach to use the inline Mbuf
778          */
779         retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
780         if (retval) {
781                 DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
782                 return -ENOMEM;
783         }
784         memset(fle, 0, FLE_POOL_BUF_SIZE);
785         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
786         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
787         fle = fle + 1;
788         sge = fle + 2;
789         if (likely(bpid < MAX_BPID)) {
790                 DPAA2_SET_FD_BPID(fd, bpid);
791                 DPAA2_SET_FLE_BPID(fle, bpid);
792                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
793                 DPAA2_SET_FLE_BPID(sge, bpid);
794                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
795                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
796                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
797         } else {
798                 DPAA2_SET_FD_IVP(fd);
799                 DPAA2_SET_FLE_IVP(fle);
800                 DPAA2_SET_FLE_IVP((fle + 1));
801                 DPAA2_SET_FLE_IVP(sge);
802                 DPAA2_SET_FLE_IVP((sge + 1));
803                 DPAA2_SET_FLE_IVP((sge + 2));
804                 DPAA2_SET_FLE_IVP((sge + 3));
805         }
806
807         /* Save the shared descriptor */
808         flc = &priv->flc_desc[0].flc;
809         /* Configure FD as a FRAME LIST */
810         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
811         DPAA2_SET_FD_COMPOUND_FMT(fd);
812         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
813
814         DPAA2_SEC_DP_DEBUG(
815                 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
816                 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
817                 sym_op->auth.data.offset,
818                 sym_op->auth.data.length,
819                 sess->digest_length,
820                 sym_op->cipher.data.offset,
821                 sym_op->cipher.data.length,
822                 sess->iv.length,
823                 sym_op->m_src->data_off);
824
825         /* Configure Output FLE with Scatter/Gather Entry */
826         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
827         if (auth_only_len)
828                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
829         fle->length = (sess->dir == DIR_ENC) ?
830                         (sym_op->cipher.data.length + icv_len) :
831                         sym_op->cipher.data.length;
832
833         DPAA2_SET_FLE_SG_EXT(fle);
834
835         /* Configure Output SGE for Encap/Decap */
836         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
837         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
838                                 dst->data_off);
839         sge->length = sym_op->cipher.data.length;
840
841         if (sess->dir == DIR_ENC) {
842                 sge++;
843                 DPAA2_SET_FLE_ADDR(sge,
844                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
845                 sge->length = sess->digest_length;
846                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
847                                         sess->iv.length));
848         }
849         DPAA2_SET_FLE_FIN(sge);
850
851         sge++;
852         fle++;
853
854         /* Configure Input FLE with Scatter/Gather Entry */
855         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
856         DPAA2_SET_FLE_SG_EXT(fle);
857         DPAA2_SET_FLE_FIN(fle);
858         fle->length = (sess->dir == DIR_ENC) ?
859                         (sym_op->auth.data.length + sess->iv.length) :
860                         (sym_op->auth.data.length + sess->iv.length +
861                          sess->digest_length);
862
863         /* Configure Input SGE for Encap/Decap */
864         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
865         sge->length = sess->iv.length;
866         sge++;
867
868         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
869         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
870                                 sym_op->m_src->data_off);
871         sge->length = sym_op->auth.data.length;
872         if (sess->dir == DIR_DEC) {
873                 sge++;
874                 old_icv = (uint8_t *)(sge + 1);
875                 memcpy(old_icv, sym_op->auth.digest.data,
876                        sess->digest_length);
877                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
878                 sge->length = sess->digest_length;
879                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
880                                  sess->digest_length +
881                                  sess->iv.length));
882         }
883         DPAA2_SET_FLE_FIN(sge);
884         if (auth_only_len) {
885                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
886                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
887         }
888         return 0;
889 }
890
891 static inline int build_auth_sg_fd(
892                 dpaa2_sec_session *sess,
893                 struct rte_crypto_op *op,
894                 struct qbman_fd *fd,
895                 __rte_unused uint16_t bpid)
896 {
897         struct rte_crypto_sym_op *sym_op = op->sym;
898         struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
899         struct sec_flow_context *flc;
900         struct ctxt_priv *priv = sess->ctxt;
901         int data_len, data_offset;
902         uint8_t *old_digest;
903         struct rte_mbuf *mbuf;
904
905         data_len = sym_op->auth.data.length;
906         data_offset = sym_op->auth.data.offset;
907
908         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
909             sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
910                 if ((data_len & 7) || (data_offset & 7)) {
911                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
912                         return -ENOTSUP;
913                 }
914
915                 data_len = data_len >> 3;
916                 data_offset = data_offset >> 3;
917         }
918
919         mbuf = sym_op->m_src;
920         fle = (struct qbman_fle *)rte_malloc(NULL,
921                         FLE_SG_MEM_SIZE(mbuf->nb_segs),
922                         RTE_CACHE_LINE_SIZE);
923         if (unlikely(!fle)) {
924                 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
925                 return -ENOMEM;
926         }
927         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
928         /* first FLE entry used to store mbuf and session ctxt */
929         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
930         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
931         op_fle = fle + 1;
932         ip_fle = fle + 2;
933         sge = fle + 3;
934
935         flc = &priv->flc_desc[DESC_INITFINAL].flc;
936         /* sg FD */
937         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
938         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
939         DPAA2_SET_FD_COMPOUND_FMT(fd);
940
941         /* o/p fle */
942         DPAA2_SET_FLE_ADDR(op_fle,
943                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
944         op_fle->length = sess->digest_length;
945
946         /* i/p fle */
947         DPAA2_SET_FLE_SG_EXT(ip_fle);
948         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
949         ip_fle->length = data_len;
950
951         if (sess->iv.length) {
952                 uint8_t *iv_ptr;
953
954                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
955                                                    sess->iv.offset);
956
957                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
958                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
959                         sge->length = 12;
960                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
961                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
962                         sge->length = 8;
963                 } else {
964                         sge->length = sess->iv.length;
965                 }
966                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
967                 ip_fle->length += sge->length;
968                 sge++;
969         }
970         /* i/p 1st seg */
971         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
972         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
973
974         if (data_len <= (mbuf->data_len - data_offset)) {
975                 sge->length = data_len;
976                 data_len = 0;
977         } else {
978                 sge->length = mbuf->data_len - data_offset;
979
980                 /* remaining i/p segs */
981                 while ((data_len = data_len - sge->length) &&
982                        (mbuf = mbuf->next)) {
983                         sge++;
984                         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
985                         DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
986                         if (data_len > mbuf->data_len)
987                                 sge->length = mbuf->data_len;
988                         else
989                                 sge->length = data_len;
990                 }
991         }
992
993         if (sess->dir == DIR_DEC) {
994                 /* Digest verification case */
995                 sge++;
996                 old_digest = (uint8_t *)(sge + 1);
997                 rte_memcpy(old_digest, sym_op->auth.digest.data,
998                            sess->digest_length);
999                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1000                 sge->length = sess->digest_length;
1001                 ip_fle->length += sess->digest_length;
1002         }
1003         DPAA2_SET_FLE_FIN(sge);
1004         DPAA2_SET_FLE_FIN(ip_fle);
1005         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1006
1007         return 0;
1008 }
1009
1010 static inline int
1011 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1012               struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1013 {
1014         struct rte_crypto_sym_op *sym_op = op->sym;
1015         struct qbman_fle *fle, *sge;
1016         struct sec_flow_context *flc;
1017         struct ctxt_priv *priv = sess->ctxt;
1018         int data_len, data_offset;
1019         uint8_t *old_digest;
1020         int retval;
1021
1022         data_len = sym_op->auth.data.length;
1023         data_offset = sym_op->auth.data.offset;
1024
1025         if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1026             sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1027                 if ((data_len & 7) || (data_offset & 7)) {
1028                         DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1029                         return -ENOTSUP;
1030                 }
1031
1032                 data_len = data_len >> 3;
1033                 data_offset = data_offset >> 3;
1034         }
1035
1036         retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1037         if (retval) {
1038                 DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
1039                 return -ENOMEM;
1040         }
1041         memset(fle, 0, FLE_POOL_BUF_SIZE);
1042         /* TODO we are using the first FLE entry to store Mbuf.
1043          * Currently we donot know which FLE has the mbuf stored.
1044          * So while retreiving we can go back 1 FLE from the FD -ADDR
1045          * to get the MBUF Addr from the previous FLE.
1046          * We can have a better approach to use the inline Mbuf
1047          */
1048         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1049         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1050         fle = fle + 1;
1051         sge = fle + 2;
1052
1053         if (likely(bpid < MAX_BPID)) {
1054                 DPAA2_SET_FD_BPID(fd, bpid);
1055                 DPAA2_SET_FLE_BPID(fle, bpid);
1056                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1057                 DPAA2_SET_FLE_BPID(sge, bpid);
1058                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1059         } else {
1060                 DPAA2_SET_FD_IVP(fd);
1061                 DPAA2_SET_FLE_IVP(fle);
1062                 DPAA2_SET_FLE_IVP((fle + 1));
1063                 DPAA2_SET_FLE_IVP(sge);
1064                 DPAA2_SET_FLE_IVP((sge + 1));
1065         }
1066
1067         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1068         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1069         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1070         DPAA2_SET_FD_COMPOUND_FMT(fd);
1071
1072         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1073         fle->length = sess->digest_length;
1074         fle++;
1075
1076         /* Setting input FLE */
1077         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1078         DPAA2_SET_FLE_SG_EXT(fle);
1079         fle->length = data_len;
1080
1081         if (sess->iv.length) {
1082                 uint8_t *iv_ptr;
1083
1084                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1085                                                    sess->iv.offset);
1086
1087                 if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1088                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1089                         sge->length = 12;
1090                 } else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1091                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1092                         sge->length = 8;
1093                 } else {
1094                         sge->length = sess->iv.length;
1095                 }
1096
1097                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1098                 fle->length = fle->length + sge->length;
1099                 sge++;
1100         }
1101
1102         /* Setting data to authenticate */
1103         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1104         DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1105         sge->length = data_len;
1106
1107         if (sess->dir == DIR_DEC) {
1108                 sge++;
1109                 old_digest = (uint8_t *)(sge + 1);
1110                 rte_memcpy(old_digest, sym_op->auth.digest.data,
1111                            sess->digest_length);
1112                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1113                 sge->length = sess->digest_length;
1114                 fle->length = fle->length + sess->digest_length;
1115         }
1116
1117         DPAA2_SET_FLE_FIN(sge);
1118         DPAA2_SET_FLE_FIN(fle);
1119         DPAA2_SET_FD_LEN(fd, fle->length);
1120
1121         return 0;
1122 }
1123
1124 static int
1125 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1126                 struct qbman_fd *fd, __rte_unused uint16_t bpid)
1127 {
1128         struct rte_crypto_sym_op *sym_op = op->sym;
1129         struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1130         int data_len, data_offset;
1131         struct sec_flow_context *flc;
1132         struct ctxt_priv *priv = sess->ctxt;
1133         struct rte_mbuf *mbuf;
1134         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1135                         sess->iv.offset);
1136
1137         data_len = sym_op->cipher.data.length;
1138         data_offset = sym_op->cipher.data.offset;
1139
1140         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1141                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1142                 if ((data_len & 7) || (data_offset & 7)) {
1143                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1144                         return -ENOTSUP;
1145                 }
1146
1147                 data_len = data_len >> 3;
1148                 data_offset = data_offset >> 3;
1149         }
1150
1151         if (sym_op->m_dst)
1152                 mbuf = sym_op->m_dst;
1153         else
1154                 mbuf = sym_op->m_src;
1155
1156         /* first FLE entry used to store mbuf and session ctxt */
1157         fle = (struct qbman_fle *)rte_malloc(NULL,
1158                         FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1159                         RTE_CACHE_LINE_SIZE);
1160         if (!fle) {
1161                 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1162                 return -ENOMEM;
1163         }
1164         memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1165         /* first FLE entry used to store mbuf and session ctxt */
1166         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1167         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1168
1169         op_fle = fle + 1;
1170         ip_fle = fle + 2;
1171         sge = fle + 3;
1172
1173         flc = &priv->flc_desc[0].flc;
1174
1175         DPAA2_SEC_DP_DEBUG(
1176                 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1177                 " data_off: 0x%x\n",
1178                 data_offset,
1179                 data_len,
1180                 sess->iv.length,
1181                 sym_op->m_src->data_off);
1182
1183         /* o/p fle */
1184         DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1185         op_fle->length = data_len;
1186         DPAA2_SET_FLE_SG_EXT(op_fle);
1187
1188         /* o/p 1st seg */
1189         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1190         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1191         sge->length = mbuf->data_len - data_offset;
1192
1193         mbuf = mbuf->next;
1194         /* o/p segs */
1195         while (mbuf) {
1196                 sge++;
1197                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1198                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1199                 sge->length = mbuf->data_len;
1200                 mbuf = mbuf->next;
1201         }
1202         DPAA2_SET_FLE_FIN(sge);
1203
1204         DPAA2_SEC_DP_DEBUG(
1205                 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1206                 flc, fle, fle->addr_hi, fle->addr_lo,
1207                 fle->length);
1208
1209         /* i/p fle */
1210         mbuf = sym_op->m_src;
1211         sge++;
1212         DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1213         ip_fle->length = sess->iv.length + data_len;
1214         DPAA2_SET_FLE_SG_EXT(ip_fle);
1215
1216         /* i/p IV */
1217         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1218         DPAA2_SET_FLE_OFFSET(sge, 0);
1219         sge->length = sess->iv.length;
1220
1221         sge++;
1222
1223         /* i/p 1st seg */
1224         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1225         DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1226         sge->length = mbuf->data_len - data_offset;
1227
1228         mbuf = mbuf->next;
1229         /* i/p segs */
1230         while (mbuf) {
1231                 sge++;
1232                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1233                 DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1234                 sge->length = mbuf->data_len;
1235                 mbuf = mbuf->next;
1236         }
1237         DPAA2_SET_FLE_FIN(sge);
1238         DPAA2_SET_FLE_FIN(ip_fle);
1239
1240         /* sg fd */
1241         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1242         DPAA2_SET_FD_LEN(fd, ip_fle->length);
1243         DPAA2_SET_FD_COMPOUND_FMT(fd);
1244         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1245
1246         DPAA2_SEC_DP_DEBUG(
1247                 "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1248                 " off =%d, len =%d\n",
1249                 DPAA2_GET_FD_ADDR(fd),
1250                 DPAA2_GET_FD_BPID(fd),
1251                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1252                 DPAA2_GET_FD_OFFSET(fd),
1253                 DPAA2_GET_FD_LEN(fd));
1254         return 0;
1255 }
1256
1257 static int
1258 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1259                 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1260 {
1261         struct rte_crypto_sym_op *sym_op = op->sym;
1262         struct qbman_fle *fle, *sge;
1263         int retval, data_len, data_offset;
1264         struct sec_flow_context *flc;
1265         struct ctxt_priv *priv = sess->ctxt;
1266         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1267                         sess->iv.offset);
1268         struct rte_mbuf *dst;
1269
1270         data_len = sym_op->cipher.data.length;
1271         data_offset = sym_op->cipher.data.offset;
1272
1273         if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1274                 sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1275                 if ((data_len & 7) || (data_offset & 7)) {
1276                         DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1277                         return -ENOTSUP;
1278                 }
1279
1280                 data_len = data_len >> 3;
1281                 data_offset = data_offset >> 3;
1282         }
1283
1284         if (sym_op->m_dst)
1285                 dst = sym_op->m_dst;
1286         else
1287                 dst = sym_op->m_src;
1288
1289         retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1290         if (retval) {
1291                 DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
1292                 return -ENOMEM;
1293         }
1294         memset(fle, 0, FLE_POOL_BUF_SIZE);
1295         /* TODO we are using the first FLE entry to store Mbuf.
1296          * Currently we donot know which FLE has the mbuf stored.
1297          * So while retreiving we can go back 1 FLE from the FD -ADDR
1298          * to get the MBUF Addr from the previous FLE.
1299          * We can have a better approach to use the inline Mbuf
1300          */
1301         DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1302         DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1303         fle = fle + 1;
1304         sge = fle + 2;
1305
1306         if (likely(bpid < MAX_BPID)) {
1307                 DPAA2_SET_FD_BPID(fd, bpid);
1308                 DPAA2_SET_FLE_BPID(fle, bpid);
1309                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
1310                 DPAA2_SET_FLE_BPID(sge, bpid);
1311                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
1312         } else {
1313                 DPAA2_SET_FD_IVP(fd);
1314                 DPAA2_SET_FLE_IVP(fle);
1315                 DPAA2_SET_FLE_IVP((fle + 1));
1316                 DPAA2_SET_FLE_IVP(sge);
1317                 DPAA2_SET_FLE_IVP((sge + 1));
1318         }
1319
1320         flc = &priv->flc_desc[0].flc;
1321         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1322         DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1323         DPAA2_SET_FD_COMPOUND_FMT(fd);
1324         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1325
1326         DPAA2_SEC_DP_DEBUG(
1327                 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1328                 " data_off: 0x%x\n",
1329                 data_offset,
1330                 data_len,
1331                 sess->iv.length,
1332                 sym_op->m_src->data_off);
1333
1334         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1335         DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1336
1337         fle->length = data_len + sess->iv.length;
1338
1339         DPAA2_SEC_DP_DEBUG(
1340                 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1341                 flc, fle, fle->addr_hi, fle->addr_lo,
1342                 fle->length);
1343
1344         fle++;
1345
1346         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1347         fle->length = data_len + sess->iv.length;
1348
1349         DPAA2_SET_FLE_SG_EXT(fle);
1350
1351         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1352         sge->length = sess->iv.length;
1353
1354         sge++;
1355         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1356         DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1357
1358         sge->length = data_len;
1359         DPAA2_SET_FLE_FIN(sge);
1360         DPAA2_SET_FLE_FIN(fle);
1361
1362         DPAA2_SEC_DP_DEBUG(
1363                 "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1364                 " off =%d, len =%d\n",
1365                 DPAA2_GET_FD_ADDR(fd),
1366                 DPAA2_GET_FD_BPID(fd),
1367                 rte_dpaa2_bpid_info[bpid].meta_data_size,
1368                 DPAA2_GET_FD_OFFSET(fd),
1369                 DPAA2_GET_FD_LEN(fd));
1370
1371         return 0;
1372 }
1373
1374 static inline int
1375 build_sec_fd(struct rte_crypto_op *op,
1376              struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1377 {
1378         int ret = -1;
1379         dpaa2_sec_session *sess;
1380
1381         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1382                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1383                                 op->sym->session, cryptodev_driver_id);
1384 #ifdef RTE_LIB_SECURITY
1385         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1386                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1387                                 op->sym->sec_session);
1388 #endif
1389         else {
1390                 DPAA2_SEC_DP_ERR("Session type invalid\n");
1391                 return -ENOTSUP;
1392         }
1393
1394         if (!sess) {
1395                 DPAA2_SEC_DP_ERR("Session not available\n");
1396                 return -EINVAL;
1397         }
1398
1399         /* Any of the buffer is segmented*/
1400         if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1401                   ((op->sym->m_dst != NULL) &&
1402                    !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1403                 switch (sess->ctxt_type) {
1404                 case DPAA2_SEC_CIPHER:
1405                         ret = build_cipher_sg_fd(sess, op, fd, bpid);
1406                         break;
1407                 case DPAA2_SEC_AUTH:
1408                         ret = build_auth_sg_fd(sess, op, fd, bpid);
1409                         break;
1410                 case DPAA2_SEC_AEAD:
1411                         ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1412                         break;
1413                 case DPAA2_SEC_CIPHER_HASH:
1414                         ret = build_authenc_sg_fd(sess, op, fd, bpid);
1415                         break;
1416 #ifdef RTE_LIB_SECURITY
1417                 case DPAA2_SEC_IPSEC:
1418                 case DPAA2_SEC_PDCP:
1419                         ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1420                         break;
1421 #endif
1422                 case DPAA2_SEC_HASH_CIPHER:
1423                 default:
1424                         DPAA2_SEC_ERR("error: Unsupported session");
1425                 }
1426         } else {
1427                 switch (sess->ctxt_type) {
1428                 case DPAA2_SEC_CIPHER:
1429                         ret = build_cipher_fd(sess, op, fd, bpid, qp);
1430                         break;
1431                 case DPAA2_SEC_AUTH:
1432                         ret = build_auth_fd(sess, op, fd, bpid, qp);
1433                         break;
1434                 case DPAA2_SEC_AEAD:
1435                         ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
1436                         break;
1437                 case DPAA2_SEC_CIPHER_HASH:
1438                         ret = build_authenc_fd(sess, op, fd, bpid, qp);
1439                         break;
1440 #ifdef RTE_LIB_SECURITY
1441                 case DPAA2_SEC_IPSEC:
1442                         ret = build_proto_fd(sess, op, fd, bpid, qp);
1443                         break;
1444                 case DPAA2_SEC_PDCP:
1445                         ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
1446                         break;
1447 #endif
1448                 case DPAA2_SEC_HASH_CIPHER:
1449                 default:
1450                         DPAA2_SEC_ERR("error: Unsupported session");
1451                         ret = -ENOTSUP;
1452                 }
1453         }
1454         return ret;
1455 }
1456
1457 static uint16_t
1458 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1459                         uint16_t nb_ops)
1460 {
1461         /* Function to transmit the frames to given device and VQ*/
1462         uint32_t loop;
1463         int32_t ret;
1464         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1465         uint32_t frames_to_send, retry_count;
1466         struct qbman_eq_desc eqdesc;
1467         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1468         struct qbman_swp *swp;
1469         uint16_t num_tx = 0;
1470         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1471         /*todo - need to support multiple buffer pools */
1472         uint16_t bpid;
1473         struct rte_mempool *mb_pool;
1474
1475         if (unlikely(nb_ops == 0))
1476                 return 0;
1477
1478         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1479                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1480                 return 0;
1481         }
1482         /*Prepare enqueue descriptor*/
1483         qbman_eq_desc_clear(&eqdesc);
1484         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1485         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1486         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1487
1488         if (!DPAA2_PER_LCORE_DPIO) {
1489                 ret = dpaa2_affine_qbman_swp();
1490                 if (ret) {
1491                         DPAA2_SEC_ERR(
1492                                 "Failed to allocate IO portal, tid: %d\n",
1493                                 rte_gettid());
1494                         return 0;
1495                 }
1496         }
1497         swp = DPAA2_PER_LCORE_PORTAL;
1498
1499         while (nb_ops) {
1500                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1501                         dpaa2_eqcr_size : nb_ops;
1502
1503                 for (loop = 0; loop < frames_to_send; loop++) {
1504                         if (*dpaa2_seqn((*ops)->sym->m_src)) {
1505                                 if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1506                                         DPAA2_PER_LCORE_DQRR_SIZE--;
1507                                         DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1508                                         *dpaa2_seqn((*ops)->sym->m_src) &
1509                                         QBMAN_EQCR_DCA_IDXMASK);
1510                                 }
1511                                 flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1512                                 *dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1513                         }
1514
1515                         /*Clear the unused FD fields before sending*/
1516                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1517                         mb_pool = (*ops)->sym->m_src->pool;
1518                         bpid = mempool_to_bpid(mb_pool);
1519                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1520                         if (ret) {
1521                                 DPAA2_SEC_DP_DEBUG("FD build failed\n");
1522                                 goto skip_tx;
1523                         }
1524                         ops++;
1525                 }
1526
1527                 loop = 0;
1528                 retry_count = 0;
1529                 while (loop < frames_to_send) {
1530                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1531                                                          &fd_arr[loop],
1532                                                          &flags[loop],
1533                                                          frames_to_send - loop);
1534                         if (unlikely(ret < 0)) {
1535                                 retry_count++;
1536                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1537                                         num_tx += loop;
1538                                         nb_ops -= loop;
1539                                         DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1540                                         /* freeing the fle buffers */
1541                                         while (loop < frames_to_send) {
1542                                                 free_fle(&fd_arr[loop],
1543                                                                 dpaa2_qp);
1544                                                 loop++;
1545                                         }
1546                                         goto skip_tx;
1547                                 }
1548                         } else {
1549                                 loop += ret;
1550                                 retry_count = 0;
1551                         }
1552                 }
1553
1554                 num_tx += loop;
1555                 nb_ops -= loop;
1556         }
1557 skip_tx:
1558         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1559         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1560         return num_tx;
1561 }
1562
1563 #ifdef RTE_LIB_SECURITY
1564 static inline struct rte_crypto_op *
1565 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1566 {
1567         struct rte_crypto_op *op;
1568         uint16_t len = DPAA2_GET_FD_LEN(fd);
1569         int16_t diff = 0;
1570         dpaa2_sec_session *sess_priv __rte_unused;
1571
1572         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1573                 DPAA2_SEC_ERR("error: non inline buffer");
1574                 return NULL;
1575         }
1576         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1577                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1578                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1579
1580         diff = len - mbuf->pkt_len;
1581         mbuf->pkt_len += diff;
1582         mbuf->data_len += diff;
1583         op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1584         mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1585         op->sym->aead.digest.phys_addr = 0L;
1586
1587         sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1588                                 op->sym->sec_session);
1589         if (sess_priv->dir == DIR_ENC)
1590                 mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1591         else
1592                 mbuf->data_off += SEC_FLC_DHR_INBOUND;
1593
1594         return op;
1595 }
1596 #endif
1597
1598 static inline struct rte_crypto_op *
1599 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
1600 {
1601         struct qbman_fle *fle;
1602         struct rte_crypto_op *op;
1603         struct rte_mbuf *dst, *src;
1604
1605 #ifdef RTE_LIB_SECURITY
1606         if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1607                 return sec_simple_fd_to_mbuf(fd);
1608 #endif
1609         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1610
1611         DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1612                            fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1613
1614         /* we are using the first FLE entry to store Mbuf.
1615          * Currently we donot know which FLE has the mbuf stored.
1616          * So while retreiving we can go back 1 FLE from the FD -ADDR
1617          * to get the MBUF Addr from the previous FLE.
1618          * We can have a better approach to use the inline Mbuf
1619          */
1620
1621         op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1622
1623         /* Prefeth op */
1624         src = op->sym->m_src;
1625         rte_prefetch0(src);
1626
1627         if (op->sym->m_dst) {
1628                 dst = op->sym->m_dst;
1629                 rte_prefetch0(dst);
1630         } else
1631                 dst = src;
1632
1633 #ifdef RTE_LIB_SECURITY
1634         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1635                 uint16_t len = DPAA2_GET_FD_LEN(fd);
1636                 dst->pkt_len = len;
1637                 while (dst->next != NULL) {
1638                         len -= dst->data_len;
1639                         dst = dst->next;
1640                 }
1641                 dst->data_len = len;
1642         }
1643 #endif
1644         DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1645                 " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1646                 (void *)dst,
1647                 dst->buf_addr,
1648                 DPAA2_GET_FD_ADDR(fd),
1649                 DPAA2_GET_FD_BPID(fd),
1650                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1651                 DPAA2_GET_FD_OFFSET(fd),
1652                 DPAA2_GET_FD_LEN(fd));
1653
1654         /* free the fle memory */
1655         if (likely(rte_pktmbuf_is_contiguous(src))) {
1656                 rte_mempool_put(qp->fle_pool, (void *)(fle-1));
1657         } else
1658                 rte_free((void *)(fle-1));
1659
1660         return op;
1661 }
1662
1663 static void
1664 dpaa2_sec_dump(struct rte_crypto_op *op)
1665 {
1666         int i;
1667         dpaa2_sec_session *sess = NULL;
1668         struct ctxt_priv *priv;
1669         uint8_t bufsize;
1670         struct rte_crypto_sym_op *sym_op;
1671
1672         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1673                 sess = (dpaa2_sec_session *)get_sym_session_private_data(
1674                         op->sym->session, cryptodev_driver_id);
1675 #ifdef RTE_LIBRTE_SECURITY
1676         else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1677                 sess = (dpaa2_sec_session *)get_sec_session_private_data(
1678                         op->sym->sec_session);
1679 #endif
1680
1681         if (sess == NULL)
1682                 goto mbuf_dump;
1683
1684         priv = (struct ctxt_priv *)sess->ctxt;
1685         printf("\n****************************************\n"
1686                 "session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1687                 "\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1688                 "\tCipher key len:\t%zd\n", sess->ctxt_type,
1689                 (sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1690                 sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1691                 sess->cipher_key.length);
1692                 rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
1693                                 sess->cipher_key.length);
1694                 rte_hexdump(stdout, "auth key", sess->auth_key.data,
1695                                 sess->auth_key.length);
1696         printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1697                 "\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1698                 " len:\t%d\n\taead cipher text:\t%d\n",
1699                 sess->auth_key.length, sess->iv.length, sess->iv.offset,
1700                 sess->digest_length, sess->status,
1701                 sess->ext_params.aead_ctxt.auth_only_len,
1702                 sess->ext_params.aead_ctxt.auth_cipher_text);
1703 #ifdef RTE_LIBRTE_SECURITY
1704         printf("PDCP session params:\n"
1705                 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1706                 "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1707                 "\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1708                 sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1709                 sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1710                 sess->pdcp.hfn_threshold);
1711
1712 #endif
1713         bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1714         printf("Descriptor Dump:\n");
1715         for (i = 0; i < bufsize; i++)
1716                 printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1717
1718         printf("\n");
1719 mbuf_dump:
1720         sym_op = op->sym;
1721         if (sym_op->m_src) {
1722                 printf("Source mbuf:\n");
1723                 rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
1724         }
1725         if (sym_op->m_dst) {
1726                 printf("Destination mbuf:\n");
1727                 rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
1728         }
1729
1730         printf("Session address = %p\ncipher offset: %d, length: %d\n"
1731                 "auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
1732                 , sym_op->session,
1733                 sym_op->cipher.data.offset, sym_op->cipher.data.length,
1734                 sym_op->auth.data.offset, sym_op->auth.data.length,
1735                 sym_op->aead.data.offset, sym_op->aead.data.length);
1736         printf("\n");
1737
1738 }
1739
1740 static void
1741 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
1742                           struct dpaa2_queue *dpaa2_q)
1743 {
1744         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1745         struct rte_crypto_op *op;
1746         struct qbman_fd *fd;
1747         struct dpaa2_sec_qp *dpaa2_qp;
1748
1749         dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
1750         fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1751         op = sec_fd_to_mbuf(fd, dpaa2_qp);
1752         /* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1753          * after setting an error in FD. But this will have performance impact.
1754          */
1755         rte_pktmbuf_free(op->sym->m_src);
1756 }
1757
1758 static void
1759 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1760                              struct rte_mbuf *m,
1761                              struct qbman_eq_desc *eqdesc)
1762 {
1763         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1764         struct eqresp_metadata *eqresp_meta;
1765         struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1766         uint16_t orpid, seqnum;
1767         uint8_t dq_idx;
1768
1769         if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1770                 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1771                         DPAA2_EQCR_OPRID_SHIFT;
1772                 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1773                         DPAA2_EQCR_SEQNUM_SHIFT;
1774
1775
1776                 if (!priv->en_loose_ordered) {
1777                         qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1778                         qbman_eq_desc_set_response(eqdesc, (uint64_t)
1779                                 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1780                                 dpio_dev->eqresp_pi]), 1);
1781                         qbman_eq_desc_set_token(eqdesc, 1);
1782
1783                         eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1784                         eqresp_meta->dpaa2_q = dpaa2_q;
1785                         eqresp_meta->mp = m->pool;
1786
1787                         dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1788                                 dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1789                 } else {
1790                         qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1791                 }
1792         } else {
1793                 dq_idx = *dpaa2_seqn(m) - 1;
1794                 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1795                 DPAA2_PER_LCORE_DQRR_SIZE--;
1796                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1797         }
1798         *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1799 }
1800
1801
1802 static uint16_t
1803 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1804                         uint16_t nb_ops)
1805 {
1806         /* Function to transmit the frames to given device and VQ*/
1807         uint32_t loop;
1808         int32_t ret;
1809         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1810         uint32_t frames_to_send, num_free_eq_desc, retry_count;
1811         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1812         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1813         struct qbman_swp *swp;
1814         uint16_t num_tx = 0;
1815         uint16_t bpid;
1816         struct rte_mempool *mb_pool;
1817         struct dpaa2_sec_dev_private *priv =
1818                                 dpaa2_qp->tx_vq.crypto_data->dev_private;
1819
1820         if (unlikely(nb_ops == 0))
1821                 return 0;
1822
1823         if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1824                 DPAA2_SEC_ERR("sessionless crypto op not supported");
1825                 return 0;
1826         }
1827
1828         if (!DPAA2_PER_LCORE_DPIO) {
1829                 ret = dpaa2_affine_qbman_swp();
1830                 if (ret) {
1831                         DPAA2_SEC_ERR("Failure in affining portal");
1832                         return 0;
1833                 }
1834         }
1835         swp = DPAA2_PER_LCORE_PORTAL;
1836
1837         while (nb_ops) {
1838                 frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1839                         dpaa2_eqcr_size : nb_ops;
1840
1841                 if (!priv->en_loose_ordered) {
1842                         if (*dpaa2_seqn((*ops)->sym->m_src)) {
1843                                 num_free_eq_desc = dpaa2_free_eq_descriptors();
1844                                 if (num_free_eq_desc < frames_to_send)
1845                                         frames_to_send = num_free_eq_desc;
1846                         }
1847                 }
1848
1849                 for (loop = 0; loop < frames_to_send; loop++) {
1850                         /*Prepare enqueue descriptor*/
1851                         qbman_eq_desc_clear(&eqdesc[loop]);
1852                         qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1853
1854                         if (*dpaa2_seqn((*ops)->sym->m_src))
1855                                 dpaa2_sec_set_enqueue_descriptor(
1856                                                 &dpaa2_qp->tx_vq,
1857                                                 (*ops)->sym->m_src,
1858                                                 &eqdesc[loop]);
1859                         else
1860                                 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1861                                                          DPAA2_EQ_RESP_ERR_FQ);
1862
1863                         /*Clear the unused FD fields before sending*/
1864                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1865                         mb_pool = (*ops)->sym->m_src->pool;
1866                         bpid = mempool_to_bpid(mb_pool);
1867                         ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1868                         if (ret) {
1869                                 DPAA2_SEC_DP_DEBUG("FD build failed\n");
1870                                 goto skip_tx;
1871                         }
1872                         ops++;
1873                 }
1874
1875                 loop = 0;
1876                 retry_count = 0;
1877                 while (loop < frames_to_send) {
1878                         ret = qbman_swp_enqueue_multiple_desc(swp,
1879                                         &eqdesc[loop], &fd_arr[loop],
1880                                         frames_to_send - loop);
1881                         if (unlikely(ret < 0)) {
1882                                 retry_count++;
1883                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1884                                         num_tx += loop;
1885                                         nb_ops -= loop;
1886                                         DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1887                                         /* freeing the fle buffers */
1888                                         while (loop < frames_to_send) {
1889                                                 free_fle(&fd_arr[loop],
1890                                                                 dpaa2_qp);
1891                                                 loop++;
1892                                         }
1893                                         goto skip_tx;
1894                                 }
1895                         } else {
1896                                 loop += ret;
1897                                 retry_count = 0;
1898                         }
1899                 }
1900
1901                 num_tx += loop;
1902                 nb_ops -= loop;
1903         }
1904
1905 skip_tx:
1906         dpaa2_qp->tx_vq.tx_pkts += num_tx;
1907         dpaa2_qp->tx_vq.err_pkts += nb_ops;
1908         return num_tx;
1909 }
1910
1911 static uint16_t
1912 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1913                         uint16_t nb_ops)
1914 {
1915         /* Function is responsible to receive frames for a given device and VQ*/
1916         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1917         struct qbman_result *dq_storage;
1918         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1919         int ret, num_rx = 0;
1920         uint8_t is_last = 0, status;
1921         struct qbman_swp *swp;
1922         const struct qbman_fd *fd;
1923         struct qbman_pull_desc pulldesc;
1924
1925         if (!DPAA2_PER_LCORE_DPIO) {
1926                 ret = dpaa2_affine_qbman_swp();
1927                 if (ret) {
1928                         DPAA2_SEC_ERR(
1929                                 "Failed to allocate IO portal, tid: %d\n",
1930                                 rte_gettid());
1931                         return 0;
1932                 }
1933         }
1934         swp = DPAA2_PER_LCORE_PORTAL;
1935         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1936
1937         qbman_pull_desc_clear(&pulldesc);
1938         qbman_pull_desc_set_numframes(&pulldesc,
1939                                       (nb_ops > dpaa2_dqrr_size) ?
1940                                       dpaa2_dqrr_size : nb_ops);
1941         qbman_pull_desc_set_fq(&pulldesc, fqid);
1942         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1943                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1944                                     1);
1945
1946         /*Issue a volatile dequeue command. */
1947         while (1) {
1948                 if (qbman_swp_pull(swp, &pulldesc)) {
1949                         DPAA2_SEC_WARN(
1950                                 "SEC VDQ command is not issued : QBMAN busy");
1951                         /* Portal was busy, try again */
1952                         continue;
1953                 }
1954                 break;
1955         };
1956
1957         /* Receive the packets till Last Dequeue entry is found with
1958          * respect to the above issues PULL command.
1959          */
1960         while (!is_last) {
1961                 /* Check if the previous issued command is completed.
1962                  * Also seems like the SWP is shared between the Ethernet Driver
1963                  * and the SEC driver.
1964                  */
1965                 while (!qbman_check_command_complete(dq_storage))
1966                         ;
1967
1968                 /* Loop until the dq_storage is updated with
1969                  * new token by QBMAN
1970                  */
1971                 while (!qbman_check_new_result(dq_storage))
1972                         ;
1973                 /* Check whether Last Pull command is Expired and
1974                  * setting Condition for Loop termination
1975                  */
1976                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1977                         is_last = 1;
1978                         /* Check for valid frame. */
1979                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1980                         if (unlikely(
1981                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1982                                 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1983                                 continue;
1984                         }
1985                 }
1986
1987                 fd = qbman_result_DQ_fd(dq_storage);
1988                 ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
1989
1990                 if (unlikely(fd->simple.frc)) {
1991                         /* TODO Parse SEC errors */
1992                         if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
1993                                 DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1994                                                  fd->simple.frc);
1995                                 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
1996                                         dpaa2_sec_dump(ops[num_rx]);
1997                         }
1998
1999                         dpaa2_qp->rx_vq.err_pkts += 1;
2000                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
2001                 } else {
2002                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
2003                 }
2004
2005                 num_rx++;
2006                 dq_storage++;
2007         } /* End of Packet Rx loop */
2008
2009         dpaa2_qp->rx_vq.rx_pkts += num_rx;
2010
2011         DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
2012                                 dpaa2_qp->rx_vq.err_pkts);
2013         /*Return the total number of packets received to DPAA2 app*/
2014         return num_rx;
2015 }
2016
2017 /** Release queue pair */
2018 static int
2019 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
2020 {
2021         struct dpaa2_sec_qp *qp =
2022                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
2023
2024         PMD_INIT_FUNC_TRACE();
2025
2026         if (qp->rx_vq.q_storage) {
2027                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
2028                 rte_free(qp->rx_vq.q_storage);
2029         }
2030         rte_mempool_free(qp->fle_pool);
2031         rte_free(qp);
2032
2033         dev->data->queue_pairs[queue_pair_id] = NULL;
2034
2035         return 0;
2036 }
2037
2038 /** Setup a queue pair */
2039 static int
2040 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2041                 const struct rte_cryptodev_qp_conf *qp_conf,
2042                 __rte_unused int socket_id)
2043 {
2044         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2045         struct dpaa2_sec_qp *qp;
2046         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2047         struct dpseci_rx_queue_cfg cfg;
2048         int32_t retcode;
2049         char str[30];
2050
2051         PMD_INIT_FUNC_TRACE();
2052
2053         /* If qp is already in use free ring memory and qp metadata. */
2054         if (dev->data->queue_pairs[qp_id] != NULL) {
2055                 DPAA2_SEC_INFO("QP already setup");
2056                 return 0;
2057         }
2058
2059         DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2060                     dev, qp_id, qp_conf);
2061
2062         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2063
2064         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2065                         RTE_CACHE_LINE_SIZE);
2066         if (!qp) {
2067                 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2068                 return -ENOMEM;
2069         }
2070
2071         qp->rx_vq.crypto_data = dev->data;
2072         qp->tx_vq.crypto_data = dev->data;
2073         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
2074                 sizeof(struct queue_storage_info_t),
2075                 RTE_CACHE_LINE_SIZE);
2076         if (!qp->rx_vq.q_storage) {
2077                 DPAA2_SEC_ERR("malloc failed for q_storage");
2078                 return -ENOMEM;
2079         }
2080         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
2081
2082         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
2083                 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
2084                 return -ENOMEM;
2085         }
2086
2087         dev->data->queue_pairs[qp_id] = qp;
2088
2089         snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
2090                         getpid(), dev->data->dev_id, qp_id);
2091         qp->fle_pool = rte_mempool_create((const char *)str,
2092                         qp_conf->nb_descriptors,
2093                         FLE_POOL_BUF_SIZE,
2094                         FLE_POOL_CACHE_SIZE, 0,
2095                         NULL, NULL, NULL, NULL,
2096                         SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
2097         if (!qp->fle_pool) {
2098                 DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2099                 return -ENOMEM;
2100         }
2101
2102         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
2103         cfg.user_ctx = (size_t)(&qp->rx_vq);
2104         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2105                                       qp_id, &cfg);
2106         return retcode;
2107 }
2108
2109 /** Returns the size of the aesni gcm session structure */
2110 static unsigned int
2111 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2112 {
2113         PMD_INIT_FUNC_TRACE();
2114
2115         return sizeof(dpaa2_sec_session);
2116 }
2117
2118 static int
2119 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
2120                       dpaa2_sec_session *session)
2121 {
2122         struct alginfo cipherdata;
2123         int bufsize, ret = 0;
2124         struct ctxt_priv *priv;
2125         struct sec_flow_context *flc;
2126
2127         PMD_INIT_FUNC_TRACE();
2128
2129         /* For SEC CIPHER only one descriptor is required. */
2130         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2131                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2132                         RTE_CACHE_LINE_SIZE);
2133         if (priv == NULL) {
2134                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2135                 return -ENOMEM;
2136         }
2137
2138         flc = &priv->flc_desc[0].flc;
2139
2140         session->ctxt_type = DPAA2_SEC_CIPHER;
2141         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2142                         RTE_CACHE_LINE_SIZE);
2143         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2144                 DPAA2_SEC_ERR("No Memory for cipher key");
2145                 rte_free(priv);
2146                 return -ENOMEM;
2147         }
2148         session->cipher_key.length = xform->cipher.key.length;
2149
2150         memcpy(session->cipher_key.data, xform->cipher.key.data,
2151                xform->cipher.key.length);
2152         cipherdata.key = (size_t)session->cipher_key.data;
2153         cipherdata.keylen = session->cipher_key.length;
2154         cipherdata.key_enc_flags = 0;
2155         cipherdata.key_type = RTA_DATA_IMM;
2156
2157         /* Set IV parameters */
2158         session->iv.offset = xform->cipher.iv.offset;
2159         session->iv.length = xform->cipher.iv.length;
2160         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2161                                 DIR_ENC : DIR_DEC;
2162
2163         switch (xform->cipher.algo) {
2164         case RTE_CRYPTO_CIPHER_AES_CBC:
2165                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2166                 cipherdata.algmode = OP_ALG_AAI_CBC;
2167                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2168                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2169                                                 SHR_NEVER, &cipherdata,
2170                                                 session->iv.length,
2171                                                 session->dir);
2172                 break;
2173         case RTE_CRYPTO_CIPHER_3DES_CBC:
2174                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2175                 cipherdata.algmode = OP_ALG_AAI_CBC;
2176                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2177                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2178                                                 SHR_NEVER, &cipherdata,
2179                                                 session->iv.length,
2180                                                 session->dir);
2181                 break;
2182         case RTE_CRYPTO_CIPHER_DES_CBC:
2183                 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2184                 cipherdata.algmode = OP_ALG_AAI_CBC;
2185                 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2186                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2187                                                 SHR_NEVER, &cipherdata,
2188                                                 session->iv.length,
2189                                                 session->dir);
2190                 break;
2191         case RTE_CRYPTO_CIPHER_AES_CTR:
2192                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2193                 cipherdata.algmode = OP_ALG_AAI_CTR;
2194                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2195                 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2196                                                 SHR_NEVER, &cipherdata,
2197                                                 session->iv.length,
2198                                                 session->dir);
2199                 break;
2200         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2201                 cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2202                 session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2203                 bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2204                                               &cipherdata,
2205                                               session->dir);
2206                 break;
2207         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2208                 cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2209                 session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2210                 bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2211                                               &cipherdata,
2212                                               session->dir);
2213                 break;
2214         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2215         case RTE_CRYPTO_CIPHER_AES_F8:
2216         case RTE_CRYPTO_CIPHER_AES_ECB:
2217         case RTE_CRYPTO_CIPHER_3DES_ECB:
2218         case RTE_CRYPTO_CIPHER_3DES_CTR:
2219         case RTE_CRYPTO_CIPHER_AES_XTS:
2220         case RTE_CRYPTO_CIPHER_ARC4:
2221         case RTE_CRYPTO_CIPHER_NULL:
2222                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2223                         xform->cipher.algo);
2224                 ret = -ENOTSUP;
2225                 goto error_out;
2226         default:
2227                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2228                         xform->cipher.algo);
2229                 ret = -ENOTSUP;
2230                 goto error_out;
2231         }
2232
2233         if (bufsize < 0) {
2234                 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2235                 ret = -EINVAL;
2236                 goto error_out;
2237         }
2238
2239         flc->word1_sdl = (uint8_t)bufsize;
2240         session->ctxt = priv;
2241
2242 #ifdef CAAM_DESC_DEBUG
2243         int i;
2244         for (i = 0; i < bufsize; i++)
2245                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2246 #endif
2247         return ret;
2248
2249 error_out:
2250         rte_free(session->cipher_key.data);
2251         rte_free(priv);
2252         return ret;
2253 }
2254
2255 static int
2256 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
2257                     dpaa2_sec_session *session)
2258 {
2259         struct alginfo authdata;
2260         int bufsize, ret = 0;
2261         struct ctxt_priv *priv;
2262         struct sec_flow_context *flc;
2263
2264         PMD_INIT_FUNC_TRACE();
2265
2266         /* For SEC AUTH three descriptors are required for various stages */
2267         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2268                         sizeof(struct ctxt_priv) + 3 *
2269                         sizeof(struct sec_flc_desc),
2270                         RTE_CACHE_LINE_SIZE);
2271         if (priv == NULL) {
2272                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2273                 return -ENOMEM;
2274         }
2275
2276         flc = &priv->flc_desc[DESC_INITFINAL].flc;
2277
2278         session->ctxt_type = DPAA2_SEC_AUTH;
2279         session->auth_key.length = xform->auth.key.length;
2280         if (xform->auth.key.length) {
2281                 session->auth_key.data = rte_zmalloc(NULL,
2282                         xform->auth.key.length,
2283                         RTE_CACHE_LINE_SIZE);
2284                 if (session->auth_key.data == NULL) {
2285                         DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2286                         rte_free(priv);
2287                         return -ENOMEM;
2288                 }
2289                 memcpy(session->auth_key.data, xform->auth.key.data,
2290                        xform->auth.key.length);
2291                 authdata.key = (size_t)session->auth_key.data;
2292                 authdata.key_enc_flags = 0;
2293                 authdata.key_type = RTA_DATA_IMM;
2294         }
2295         authdata.keylen = session->auth_key.length;
2296
2297         session->digest_length = xform->auth.digest_length;
2298         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2299                                 DIR_ENC : DIR_DEC;
2300
2301         switch (xform->auth.algo) {
2302         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2303                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2304                 authdata.algmode = OP_ALG_AAI_HMAC;
2305                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2306                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2307                                            1, 0, SHR_NEVER, &authdata,
2308                                            !session->dir,
2309                                            session->digest_length);
2310                 break;
2311         case RTE_CRYPTO_AUTH_MD5_HMAC:
2312                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2313                 authdata.algmode = OP_ALG_AAI_HMAC;
2314                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2315                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2316                                            1, 0, SHR_NEVER, &authdata,
2317                                            !session->dir,
2318                                            session->digest_length);
2319                 break;
2320         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2321                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2322                 authdata.algmode = OP_ALG_AAI_HMAC;
2323                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2324                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2325                                            1, 0, SHR_NEVER, &authdata,
2326                                            !session->dir,
2327                                            session->digest_length);
2328                 break;
2329         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2330                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2331                 authdata.algmode = OP_ALG_AAI_HMAC;
2332                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2333                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2334                                            1, 0, SHR_NEVER, &authdata,
2335                                            !session->dir,
2336                                            session->digest_length);
2337                 break;
2338         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2339                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2340                 authdata.algmode = OP_ALG_AAI_HMAC;
2341                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2342                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2343                                            1, 0, SHR_NEVER, &authdata,
2344                                            !session->dir,
2345                                            session->digest_length);
2346                 break;
2347         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2348                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2349                 authdata.algmode = OP_ALG_AAI_HMAC;
2350                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2351                 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2352                                            1, 0, SHR_NEVER, &authdata,
2353                                            !session->dir,
2354                                            session->digest_length);
2355                 break;
2356         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2357                 authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2358                 authdata.algmode = OP_ALG_AAI_F9;
2359                 session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2360                 session->iv.offset = xform->auth.iv.offset;
2361                 session->iv.length = xform->auth.iv.length;
2362                 bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2363                                               1, 0, &authdata,
2364                                               !session->dir,
2365                                               session->digest_length);
2366                 break;
2367         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2368                 authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2369                 authdata.algmode = OP_ALG_AAI_F9;
2370                 session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2371                 session->iv.offset = xform->auth.iv.offset;
2372                 session->iv.length = xform->auth.iv.length;
2373                 bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2374                                            1, 0, &authdata,
2375                                            !session->dir,
2376                                            session->digest_length);
2377                 break;
2378         case RTE_CRYPTO_AUTH_SHA1:
2379                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2380                 authdata.algmode = OP_ALG_AAI_HASH;
2381                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2382                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2383                                            1, 0, SHR_NEVER, &authdata,
2384                                            !session->dir,
2385                                            session->digest_length);
2386                 break;
2387         case RTE_CRYPTO_AUTH_MD5:
2388                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2389                 authdata.algmode = OP_ALG_AAI_HASH;
2390                 session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2391                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2392                                            1, 0, SHR_NEVER, &authdata,
2393                                            !session->dir,
2394                                            session->digest_length);
2395                 break;
2396         case RTE_CRYPTO_AUTH_SHA256:
2397                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2398                 authdata.algmode = OP_ALG_AAI_HASH;
2399                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2400                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2401                                            1, 0, SHR_NEVER, &authdata,
2402                                            !session->dir,
2403                                            session->digest_length);
2404                 break;
2405         case RTE_CRYPTO_AUTH_SHA384:
2406                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2407                 authdata.algmode = OP_ALG_AAI_HASH;
2408                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2409                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2410                                            1, 0, SHR_NEVER, &authdata,
2411                                            !session->dir,
2412                                            session->digest_length);
2413                 break;
2414         case RTE_CRYPTO_AUTH_SHA512:
2415                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2416                 authdata.algmode = OP_ALG_AAI_HASH;
2417                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2418                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2419                                            1, 0, SHR_NEVER, &authdata,
2420                                            !session->dir,
2421                                            session->digest_length);
2422                 break;
2423         case RTE_CRYPTO_AUTH_SHA224:
2424                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2425                 authdata.algmode = OP_ALG_AAI_HASH;
2426                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2427                 bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2428                                            1, 0, SHR_NEVER, &authdata,
2429                                            !session->dir,
2430                                            session->digest_length);
2431                 break;
2432         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2433                 authdata.algtype = OP_ALG_ALGSEL_AES;
2434                 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2435                 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2436                 bufsize = cnstr_shdsc_aes_mac(
2437                                         priv->flc_desc[DESC_INITFINAL].desc,
2438                                         1, 0, SHR_NEVER, &authdata,
2439                                         !session->dir,
2440                                         session->digest_length);
2441                 break;
2442         case RTE_CRYPTO_AUTH_AES_CMAC:
2443                 authdata.algtype = OP_ALG_ALGSEL_AES;
2444                 authdata.algmode = OP_ALG_AAI_CMAC;
2445                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2446                 bufsize = cnstr_shdsc_aes_mac(
2447                                            priv->flc_desc[DESC_INITFINAL].desc,
2448                                            1, 0, SHR_NEVER, &authdata,
2449                                            !session->dir,
2450                                            session->digest_length);
2451                 break;
2452         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2453         case RTE_CRYPTO_AUTH_AES_GMAC:
2454         case RTE_CRYPTO_AUTH_KASUMI_F9:
2455         case RTE_CRYPTO_AUTH_NULL:
2456                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2457                               xform->auth.algo);
2458                 ret = -ENOTSUP;
2459                 goto error_out;
2460         default:
2461                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2462                               xform->auth.algo);
2463                 ret = -ENOTSUP;
2464                 goto error_out;
2465         }
2466
2467         if (bufsize < 0) {
2468                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2469                 ret = -EINVAL;
2470                 goto error_out;
2471         }
2472
2473         flc->word1_sdl = (uint8_t)bufsize;
2474         session->ctxt = priv;
2475 #ifdef CAAM_DESC_DEBUG
2476         int i;
2477         for (i = 0; i < bufsize; i++)
2478                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2479                                 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2480 #endif
2481
2482         return ret;
2483
2484 error_out:
2485         rte_free(session->auth_key.data);
2486         rte_free(priv);
2487         return ret;
2488 }
2489
2490 static int
2491 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
2492                     dpaa2_sec_session *session)
2493 {
2494         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2495         struct alginfo aeaddata;
2496         int bufsize;
2497         struct ctxt_priv *priv;
2498         struct sec_flow_context *flc;
2499         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2500         int err, ret = 0;
2501
2502         PMD_INIT_FUNC_TRACE();
2503
2504         /* Set IV parameters */
2505         session->iv.offset = aead_xform->iv.offset;
2506         session->iv.length = aead_xform->iv.length;
2507         session->ctxt_type = DPAA2_SEC_AEAD;
2508
2509         /* For SEC AEAD only one descriptor is required */
2510         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2511                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2512                         RTE_CACHE_LINE_SIZE);
2513         if (priv == NULL) {
2514                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2515                 return -ENOMEM;
2516         }
2517
2518         flc = &priv->flc_desc[0].flc;
2519
2520         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2521                                                RTE_CACHE_LINE_SIZE);
2522         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2523                 DPAA2_SEC_ERR("No Memory for aead key");
2524                 rte_free(priv);
2525                 return -ENOMEM;
2526         }
2527         memcpy(session->aead_key.data, aead_xform->key.data,
2528                aead_xform->key.length);
2529
2530         session->digest_length = aead_xform->digest_length;
2531         session->aead_key.length = aead_xform->key.length;
2532         ctxt->auth_only_len = aead_xform->aad_length;
2533
2534         aeaddata.key = (size_t)session->aead_key.data;
2535         aeaddata.keylen = session->aead_key.length;
2536         aeaddata.key_enc_flags = 0;
2537         aeaddata.key_type = RTA_DATA_IMM;
2538
2539         switch (aead_xform->algo) {
2540         case RTE_CRYPTO_AEAD_AES_GCM:
2541                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
2542                 aeaddata.algmode = OP_ALG_AAI_GCM;
2543                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2544                 break;
2545         case RTE_CRYPTO_AEAD_AES_CCM:
2546                 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2547                               aead_xform->algo);
2548                 ret = -ENOTSUP;
2549                 goto error_out;
2550         default:
2551                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2552                               aead_xform->algo);
2553                 ret = -ENOTSUP;
2554                 goto error_out;
2555         }
2556         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2557                                 DIR_ENC : DIR_DEC;
2558
2559         priv->flc_desc[0].desc[0] = aeaddata.keylen;
2560         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2561                                DESC_JOB_IO_LEN,
2562                                (unsigned int *)priv->flc_desc[0].desc,
2563                                &priv->flc_desc[0].desc[1], 1);
2564
2565         if (err < 0) {
2566                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2567                 ret = -EINVAL;
2568                 goto error_out;
2569         }
2570         if (priv->flc_desc[0].desc[1] & 1) {
2571                 aeaddata.key_type = RTA_DATA_IMM;
2572         } else {
2573                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2574                 aeaddata.key_type = RTA_DATA_PTR;
2575         }
2576         priv->flc_desc[0].desc[0] = 0;
2577         priv->flc_desc[0].desc[1] = 0;
2578
2579         if (session->dir == DIR_ENC)
2580                 bufsize = cnstr_shdsc_gcm_encap(
2581                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2582                                 &aeaddata, session->iv.length,
2583                                 session->digest_length);
2584         else
2585                 bufsize = cnstr_shdsc_gcm_decap(
2586                                 priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2587                                 &aeaddata, session->iv.length,
2588                                 session->digest_length);
2589         if (bufsize < 0) {
2590                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2591                 ret = -EINVAL;
2592                 goto error_out;
2593         }
2594
2595         flc->word1_sdl = (uint8_t)bufsize;
2596         session->ctxt = priv;
2597 #ifdef CAAM_DESC_DEBUG
2598         int i;
2599         for (i = 0; i < bufsize; i++)
2600                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2601                             i, priv->flc_desc[0].desc[i]);
2602 #endif
2603         return ret;
2604
2605 error_out:
2606         rte_free(session->aead_key.data);
2607         rte_free(priv);
2608         return ret;
2609 }
2610
2611
2612 static int
2613 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
2614                     dpaa2_sec_session *session)
2615 {
2616         struct alginfo authdata, cipherdata;
2617         int bufsize;
2618         struct ctxt_priv *priv;
2619         struct sec_flow_context *flc;
2620         struct rte_crypto_cipher_xform *cipher_xform;
2621         struct rte_crypto_auth_xform *auth_xform;
2622         int err, ret = 0;
2623
2624         PMD_INIT_FUNC_TRACE();
2625
2626         if (session->ext_params.aead_ctxt.auth_cipher_text) {
2627                 cipher_xform = &xform->cipher;
2628                 auth_xform = &xform->next->auth;
2629                 session->ctxt_type =
2630                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2631                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2632         } else {
2633                 cipher_xform = &xform->next->cipher;
2634                 auth_xform = &xform->auth;
2635                 session->ctxt_type =
2636                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2637                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2638         }
2639
2640         /* Set IV parameters */
2641         session->iv.offset = cipher_xform->iv.offset;
2642         session->iv.length = cipher_xform->iv.length;
2643
2644         /* For SEC AEAD only one descriptor is required */
2645         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2646                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2647                         RTE_CACHE_LINE_SIZE);
2648         if (priv == NULL) {
2649                 DPAA2_SEC_ERR("No Memory for priv CTXT");
2650                 return -ENOMEM;
2651         }
2652
2653         flc = &priv->flc_desc[0].flc;
2654
2655         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2656                                                RTE_CACHE_LINE_SIZE);
2657         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2658                 DPAA2_SEC_ERR("No Memory for cipher key");
2659                 rte_free(priv);
2660                 return -ENOMEM;
2661         }
2662         session->cipher_key.length = cipher_xform->key.length;
2663         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2664                                              RTE_CACHE_LINE_SIZE);
2665         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2666                 DPAA2_SEC_ERR("No Memory for auth key");
2667                 rte_free(session->cipher_key.data);
2668                 rte_free(priv);
2669                 return -ENOMEM;
2670         }
2671         session->auth_key.length = auth_xform->key.length;
2672         memcpy(session->cipher_key.data, cipher_xform->key.data,
2673                cipher_xform->key.length);
2674         memcpy(session->auth_key.data, auth_xform->key.data,
2675                auth_xform->key.length);
2676
2677         authdata.key = (size_t)session->auth_key.data;
2678         authdata.keylen = session->auth_key.length;
2679         authdata.key_enc_flags = 0;
2680         authdata.key_type = RTA_DATA_IMM;
2681
2682         session->digest_length = auth_xform->digest_length;
2683
2684         switch (auth_xform->algo) {
2685         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2686                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
2687                 authdata.algmode = OP_ALG_AAI_HMAC;
2688                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2689                 break;
2690         case RTE_CRYPTO_AUTH_MD5_HMAC:
2691                 authdata.algtype = OP_ALG_ALGSEL_MD5;
2692                 authdata.algmode = OP_ALG_AAI_HMAC;
2693                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2694                 break;
2695         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2696                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
2697                 authdata.algmode = OP_ALG_AAI_HMAC;
2698                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2699                 break;
2700         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2701                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
2702                 authdata.algmode = OP_ALG_AAI_HMAC;
2703                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2704                 break;
2705         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2706                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
2707                 authdata.algmode = OP_ALG_AAI_HMAC;
2708                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2709                 break;
2710         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2711                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
2712                 authdata.algmode = OP_ALG_AAI_HMAC;
2713                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2714                 break;
2715         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2716                 authdata.algtype = OP_ALG_ALGSEL_AES;
2717                 authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2718                 session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2719                 break;
2720         case RTE_CRYPTO_AUTH_AES_CMAC:
2721                 authdata.algtype = OP_ALG_ALGSEL_AES;
2722                 authdata.algmode = OP_ALG_AAI_CMAC;
2723                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2724                 break;
2725         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2726         case RTE_CRYPTO_AUTH_AES_GMAC:
2727         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2728         case RTE_CRYPTO_AUTH_NULL:
2729         case RTE_CRYPTO_AUTH_SHA1:
2730         case RTE_CRYPTO_AUTH_SHA256:
2731         case RTE_CRYPTO_AUTH_SHA512:
2732         case RTE_CRYPTO_AUTH_SHA224:
2733         case RTE_CRYPTO_AUTH_SHA384:
2734         case RTE_CRYPTO_AUTH_MD5:
2735         case RTE_CRYPTO_AUTH_KASUMI_F9:
2736         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2737                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2738                               auth_xform->algo);
2739                 ret = -ENOTSUP;
2740                 goto error_out;
2741         default:
2742                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2743                               auth_xform->algo);
2744                 ret = -ENOTSUP;
2745                 goto error_out;
2746         }
2747         cipherdata.key = (size_t)session->cipher_key.data;
2748         cipherdata.keylen = session->cipher_key.length;
2749         cipherdata.key_enc_flags = 0;
2750         cipherdata.key_type = RTA_DATA_IMM;
2751
2752         switch (cipher_xform->algo) {
2753         case RTE_CRYPTO_CIPHER_AES_CBC:
2754                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2755                 cipherdata.algmode = OP_ALG_AAI_CBC;
2756                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2757                 break;
2758         case RTE_CRYPTO_CIPHER_3DES_CBC:
2759                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2760                 cipherdata.algmode = OP_ALG_AAI_CBC;
2761                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2762                 break;
2763         case RTE_CRYPTO_CIPHER_DES_CBC:
2764                 cipherdata.algtype = OP_ALG_ALGSEL_DES;
2765                 cipherdata.algmode = OP_ALG_AAI_CBC;
2766                 session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2767                 break;
2768         case RTE_CRYPTO_CIPHER_AES_CTR:
2769                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
2770                 cipherdata.algmode = OP_ALG_AAI_CTR;
2771                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2772                 break;
2773         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2774         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2775         case RTE_CRYPTO_CIPHER_NULL:
2776         case RTE_CRYPTO_CIPHER_3DES_ECB:
2777         case RTE_CRYPTO_CIPHER_3DES_CTR:
2778         case RTE_CRYPTO_CIPHER_AES_ECB:
2779         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2780                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2781                               cipher_xform->algo);
2782                 ret = -ENOTSUP;
2783                 goto error_out;
2784         default:
2785                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2786                               cipher_xform->algo);
2787                 ret = -ENOTSUP;
2788                 goto error_out;
2789         }
2790         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2791                                 DIR_ENC : DIR_DEC;
2792
2793         priv->flc_desc[0].desc[0] = cipherdata.keylen;
2794         priv->flc_desc[0].desc[1] = authdata.keylen;
2795         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2796                                DESC_JOB_IO_LEN,
2797                                (unsigned int *)priv->flc_desc[0].desc,
2798                                &priv->flc_desc[0].desc[2], 2);
2799
2800         if (err < 0) {
2801                 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2802                 ret = -EINVAL;
2803                 goto error_out;
2804         }
2805         if (priv->flc_desc[0].desc[2] & 1) {
2806                 cipherdata.key_type = RTA_DATA_IMM;
2807         } else {
2808                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2809                 cipherdata.key_type = RTA_DATA_PTR;
2810         }
2811         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2812                 authdata.key_type = RTA_DATA_IMM;
2813         } else {
2814                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2815                 authdata.key_type = RTA_DATA_PTR;
2816         }
2817         priv->flc_desc[0].desc[0] = 0;
2818         priv->flc_desc[0].desc[1] = 0;
2819         priv->flc_desc[0].desc[2] = 0;
2820
2821         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2822                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2823                                               0, SHR_SERIAL,
2824                                               &cipherdata, &authdata,
2825                                               session->iv.length,
2826                                               session->digest_length,
2827                                               session->dir);
2828                 if (bufsize < 0) {
2829                         DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2830                         ret = -EINVAL;
2831                         goto error_out;
2832                 }
2833         } else {
2834                 DPAA2_SEC_ERR("Hash before cipher not supported");
2835                 ret = -ENOTSUP;
2836                 goto error_out;
2837         }
2838
2839         flc->word1_sdl = (uint8_t)bufsize;
2840         session->ctxt = priv;
2841 #ifdef CAAM_DESC_DEBUG
2842         int i;
2843         for (i = 0; i < bufsize; i++)
2844                 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2845                             i, priv->flc_desc[0].desc[i]);
2846 #endif
2847
2848         return ret;
2849
2850 error_out:
2851         rte_free(session->cipher_key.data);
2852         rte_free(session->auth_key.data);
2853         rte_free(priv);
2854         return ret;
2855 }
2856
2857 static int
2858 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
2859 {
2860         dpaa2_sec_session *session = sess;
2861         int ret;
2862
2863         PMD_INIT_FUNC_TRACE();
2864
2865         if (unlikely(sess == NULL)) {
2866                 DPAA2_SEC_ERR("Invalid session struct");
2867                 return -EINVAL;
2868         }
2869
2870         memset(session, 0, sizeof(dpaa2_sec_session));
2871         /* Default IV length = 0 */
2872         session->iv.length = 0;
2873
2874         /* Cipher Only */
2875         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2876                 ret = dpaa2_sec_cipher_init(xform, session);
2877
2878         /* Authentication Only */
2879         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2880                    xform->next == NULL) {
2881                 ret = dpaa2_sec_auth_init(xform, session);
2882
2883         /* Cipher then Authenticate */
2884         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2885                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2886                 session->ext_params.aead_ctxt.auth_cipher_text = true;
2887                 if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2888                         ret = dpaa2_sec_auth_init(xform, session);
2889                 else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2890                         ret = dpaa2_sec_cipher_init(xform, session);
2891                 else
2892                         ret = dpaa2_sec_aead_chain_init(xform, session);
2893         /* Authenticate then Cipher */
2894         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2895                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2896                 session->ext_params.aead_ctxt.auth_cipher_text = false;
2897                 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2898                         ret = dpaa2_sec_cipher_init(xform, session);
2899                 else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2900                         ret = dpaa2_sec_auth_init(xform, session);
2901                 else
2902                         ret = dpaa2_sec_aead_chain_init(xform, session);
2903         /* AEAD operation for AES-GCM kind of Algorithms */
2904         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2905                    xform->next == NULL) {
2906                 ret = dpaa2_sec_aead_init(xform, session);
2907
2908         } else {
2909                 DPAA2_SEC_ERR("Invalid crypto type");
2910                 return -EINVAL;
2911         }
2912
2913         return ret;
2914 }
2915
2916 #ifdef RTE_LIB_SECURITY
2917 static int
2918 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2919                         dpaa2_sec_session *session,
2920                         struct alginfo *aeaddata)
2921 {
2922         PMD_INIT_FUNC_TRACE();
2923
2924         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2925                                                RTE_CACHE_LINE_SIZE);
2926         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2927                 DPAA2_SEC_ERR("No Memory for aead key");
2928                 return -ENOMEM;
2929         }
2930         memcpy(session->aead_key.data, aead_xform->key.data,
2931                aead_xform->key.length);
2932
2933         session->digest_length = aead_xform->digest_length;
2934         session->aead_key.length = aead_xform->key.length;
2935
2936         aeaddata->key = (size_t)session->aead_key.data;
2937         aeaddata->keylen = session->aead_key.length;
2938         aeaddata->key_enc_flags = 0;
2939         aeaddata->key_type = RTA_DATA_IMM;
2940
2941         switch (aead_xform->algo) {
2942         case RTE_CRYPTO_AEAD_AES_GCM:
2943                 switch (session->digest_length) {
2944                 case 8:
2945                         aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2946                         break;
2947                 case 12:
2948                         aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2949                         break;
2950                 case 16:
2951                         aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2952                         break;
2953                 default:
2954                         DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2955                                       session->digest_length);
2956                         return -EINVAL;
2957                 }
2958                 aeaddata->algmode = OP_ALG_AAI_GCM;
2959                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2960                 break;
2961         case RTE_CRYPTO_AEAD_AES_CCM:
2962                 switch (session->digest_length) {
2963                 case 8:
2964                         aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2965                         break;
2966                 case 12:
2967                         aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2968                         break;
2969                 case 16:
2970                         aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2971                         break;
2972                 default:
2973                         DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2974                                       session->digest_length);
2975                         return -EINVAL;
2976                 }
2977                 aeaddata->algmode = OP_ALG_AAI_CCM;
2978                 session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2979                 break;
2980         default:
2981                 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2982                               aead_xform->algo);
2983                 return -ENOTSUP;
2984         }
2985         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2986                                 DIR_ENC : DIR_DEC;
2987
2988         return 0;
2989 }
2990
2991 static int
2992 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2993         struct rte_crypto_auth_xform *auth_xform,
2994         dpaa2_sec_session *session,
2995         struct alginfo *cipherdata,
2996         struct alginfo *authdata)
2997 {
2998         if (cipher_xform) {
2999                 session->cipher_key.data = rte_zmalloc(NULL,
3000                                                        cipher_xform->key.length,
3001                                                        RTE_CACHE_LINE_SIZE);
3002                 if (session->cipher_key.data == NULL &&
3003                                 cipher_xform->key.length > 0) {
3004                         DPAA2_SEC_ERR("No Memory for cipher key");
3005                         return -ENOMEM;
3006                 }
3007
3008                 session->cipher_key.length = cipher_xform->key.length;
3009                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3010                                 cipher_xform->key.length);
3011                 session->cipher_alg = cipher_xform->algo;
3012         } else {
3013                 session->cipher_key.data = NULL;
3014                 session->cipher_key.length = 0;
3015                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3016         }
3017
3018         if (auth_xform) {
3019                 session->auth_key.data = rte_zmalloc(NULL,
3020                                                 auth_xform->key.length,
3021                                                 RTE_CACHE_LINE_SIZE);
3022                 if (session->auth_key.data == NULL &&
3023                                 auth_xform->key.length > 0) {
3024                         DPAA2_SEC_ERR("No Memory for auth key");
3025                         return -ENOMEM;
3026                 }
3027                 session->auth_key.length = auth_xform->key.length;
3028                 memcpy(session->auth_key.data, auth_xform->key.data,
3029                                 auth_xform->key.length);
3030                 session->auth_alg = auth_xform->algo;
3031                 session->digest_length = auth_xform->digest_length;
3032         } else {
3033                 session->auth_key.data = NULL;
3034                 session->auth_key.length = 0;
3035                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
3036         }
3037
3038         authdata->key = (size_t)session->auth_key.data;
3039         authdata->keylen = session->auth_key.length;
3040         authdata->key_enc_flags = 0;
3041         authdata->key_type = RTA_DATA_IMM;
3042         switch (session->auth_alg) {
3043         case RTE_CRYPTO_AUTH_SHA1_HMAC:
3044                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
3045                 authdata->algmode = OP_ALG_AAI_HMAC;
3046                 break;
3047         case RTE_CRYPTO_AUTH_MD5_HMAC:
3048                 authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
3049                 authdata->algmode = OP_ALG_AAI_HMAC;
3050                 break;
3051         case RTE_CRYPTO_AUTH_SHA256_HMAC:
3052                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
3053                 authdata->algmode = OP_ALG_AAI_HMAC;
3054                 if (session->digest_length != 16)
3055                         DPAA2_SEC_WARN(
3056                         "+++Using sha256-hmac truncated len is non-standard,"
3057                         "it will not work with lookaside proto");
3058                 break;
3059         case RTE_CRYPTO_AUTH_SHA384_HMAC:
3060                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
3061                 authdata->algmode = OP_ALG_AAI_HMAC;
3062                 break;
3063         case RTE_CRYPTO_AUTH_SHA512_HMAC:
3064                 authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3065                 authdata->algmode = OP_ALG_AAI_HMAC;
3066                 break;
3067         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3068                 authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3069                 authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3070                 break;
3071         case RTE_CRYPTO_AUTH_AES_CMAC:
3072                 authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3073                 authdata->algmode = OP_ALG_AAI_CMAC;
3074                 break;
3075         case RTE_CRYPTO_AUTH_NULL:
3076                 authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3077                 break;
3078         case RTE_CRYPTO_AUTH_SHA224_HMAC:
3079         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3080         case RTE_CRYPTO_AUTH_SHA1:
3081         case RTE_CRYPTO_AUTH_SHA256:
3082         case RTE_CRYPTO_AUTH_SHA512:
3083         case RTE_CRYPTO_AUTH_SHA224:
3084         case RTE_CRYPTO_AUTH_SHA384:
3085         case RTE_CRYPTO_AUTH_MD5:
3086         case RTE_CRYPTO_AUTH_AES_GMAC:
3087         case RTE_CRYPTO_AUTH_KASUMI_F9:
3088         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
3089         case RTE_CRYPTO_AUTH_ZUC_EIA3:
3090                 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3091                               session->auth_alg);
3092                 return -ENOTSUP;
3093         default:
3094                 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
3095                               session->auth_alg);
3096                 return -ENOTSUP;
3097         }
3098         cipherdata->key = (size_t)session->cipher_key.data;
3099         cipherdata->keylen = session->cipher_key.length;
3100         cipherdata->key_enc_flags = 0;
3101         cipherdata->key_type = RTA_DATA_IMM;
3102
3103         switch (session->cipher_alg) {
3104         case RTE_CRYPTO_CIPHER_AES_CBC:
3105                 cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3106                 cipherdata->algmode = OP_ALG_AAI_CBC;
3107                 break;
3108         case RTE_CRYPTO_CIPHER_3DES_CBC:
3109                 cipherdata->algtype = OP_PCL_IPSEC_3DES;
3110                 cipherdata->algmode = OP_ALG_AAI_CBC;
3111                 break;
3112         case RTE_CRYPTO_CIPHER_DES_CBC:
3113                 cipherdata->algtype = OP_PCL_IPSEC_DES;
3114                 cipherdata->algmode = OP_ALG_AAI_CBC;
3115                 break;
3116         case RTE_CRYPTO_CIPHER_AES_CTR:
3117                 cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3118                 cipherdata->algmode = OP_ALG_AAI_CTR;
3119                 break;
3120         case RTE_CRYPTO_CIPHER_NULL:
3121                 cipherdata->algtype = OP_PCL_IPSEC_NULL;
3122                 break;
3123         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3124         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3125         case RTE_CRYPTO_CIPHER_3DES_ECB:
3126         case RTE_CRYPTO_CIPHER_3DES_CTR:
3127         case RTE_CRYPTO_CIPHER_AES_ECB:
3128         case RTE_CRYPTO_CIPHER_KASUMI_F8:
3129                 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
3130                               session->cipher_alg);
3131                 return -ENOTSUP;
3132         default:
3133                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3134                               session->cipher_alg);
3135                 return -ENOTSUP;
3136         }
3137
3138         return 0;
3139 }
3140
3141 static int
3142 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3143                             struct rte_security_session_conf *conf,
3144                             void *sess)
3145 {
3146         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3147         struct rte_crypto_cipher_xform *cipher_xform = NULL;
3148         struct rte_crypto_auth_xform *auth_xform = NULL;
3149         struct rte_crypto_aead_xform *aead_xform = NULL;
3150         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3151         struct ctxt_priv *priv;
3152         struct alginfo authdata, cipherdata;
3153         int bufsize;
3154         struct sec_flow_context *flc;
3155         int ret = -1;
3156
3157         PMD_INIT_FUNC_TRACE();
3158
3159         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3160                                 sizeof(struct ctxt_priv) +
3161                                 sizeof(struct sec_flc_desc),
3162                                 RTE_CACHE_LINE_SIZE);
3163
3164         if (priv == NULL) {
3165                 DPAA2_SEC_ERR("No memory for priv CTXT");
3166                 return -ENOMEM;
3167         }
3168
3169         flc = &priv->flc_desc[0].flc;
3170
3171         if (ipsec_xform->life.bytes_hard_limit != 0 ||
3172             ipsec_xform->life.bytes_soft_limit != 0 ||
3173             ipsec_xform->life.packets_hard_limit != 0 ||
3174             ipsec_xform->life.packets_soft_limit != 0)
3175                 return -ENOTSUP;
3176
3177         memset(session, 0, sizeof(dpaa2_sec_session));
3178
3179         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3180                 cipher_xform = &conf->crypto_xform->cipher;
3181                 if (conf->crypto_xform->next)
3182                         auth_xform = &conf->crypto_xform->next->auth;
3183                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3184                                         session, &cipherdata, &authdata);
3185         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3186                 auth_xform = &conf->crypto_xform->auth;
3187                 if (conf->crypto_xform->next)
3188                         cipher_xform = &conf->crypto_xform->next->cipher;
3189                 ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3190                                         session, &cipherdata, &authdata);
3191         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3192                 aead_xform = &conf->crypto_xform->aead;
3193                 ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3194                                         session, &cipherdata);
3195                 authdata.keylen = 0;
3196                 authdata.algtype = 0;
3197         } else {
3198                 DPAA2_SEC_ERR("XFORM not specified");
3199                 ret = -EINVAL;
3200                 goto out;
3201         }
3202         if (ret) {
3203                 DPAA2_SEC_ERR("Failed to process xform");
3204                 goto out;
3205         }
3206
3207         session->ctxt_type = DPAA2_SEC_IPSEC;
3208         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3209                 uint8_t *hdr = NULL;
3210                 struct ip ip4_hdr;
3211                 struct rte_ipv6_hdr ip6_hdr;
3212                 struct ipsec_encap_pdb encap_pdb;
3213
3214                 flc->dhr = SEC_FLC_DHR_OUTBOUND;
3215                 /* For Sec Proto only one descriptor is required. */
3216                 memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3217
3218                 /* copy algo specific data to PDB */
3219                 switch (cipherdata.algtype) {
3220                 case OP_PCL_IPSEC_AES_CTR:
3221                         encap_pdb.ctr.ctr_initial = 0x00000001;
3222                         encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3223                         break;
3224                 case OP_PCL_IPSEC_AES_GCM8:
3225                 case OP_PCL_IPSEC_AES_GCM12:
3226                 case OP_PCL_IPSEC_AES_GCM16:
3227                         memcpy(encap_pdb.gcm.salt,
3228                                 (uint8_t *)&(ipsec_xform->salt), 4);
3229                         break;
3230                 }
3231
3232                 encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3233                         PDBOPTS_ESP_OIHI_PDB_INL |
3234                         PDBOPTS_ESP_IVSRC |
3235                         PDBHMO_ESP_SNR;
3236                 if (ipsec_xform->options.dec_ttl)
3237                         encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3238                 if (ipsec_xform->options.esn)
3239                         encap_pdb.options |= PDBOPTS_ESP_ESN;
3240                 encap_pdb.spi = ipsec_xform->spi;
3241                 session->dir = DIR_ENC;
3242                 if (ipsec_xform->tunnel.type ==
3243                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3244                         encap_pdb.ip_hdr_len = sizeof(struct ip);
3245                         ip4_hdr.ip_v = IPVERSION;
3246                         ip4_hdr.ip_hl = 5;
3247                         ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
3248                         ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3249                         ip4_hdr.ip_id = 0;
3250                         ip4_hdr.ip_off = 0;
3251                         ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3252                         ip4_hdr.ip_p = IPPROTO_ESP;
3253                         ip4_hdr.ip_sum = 0;
3254                         ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
3255                         ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
3256                         ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
3257                                         &ip4_hdr, sizeof(struct ip));
3258                         hdr = (uint8_t *)&ip4_hdr;
3259                 } else if (ipsec_xform->tunnel.type ==
3260                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3261                         ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3262                                 DPAA2_IPv6_DEFAULT_VTC_FLOW |
3263                                 ((ipsec_xform->tunnel.ipv6.dscp <<
3264                                         RTE_IPV6_HDR_TC_SHIFT) &
3265                                         RTE_IPV6_HDR_TC_MASK) |
3266                                 ((ipsec_xform->tunnel.ipv6.flabel <<
3267                                         RTE_IPV6_HDR_FL_SHIFT) &
3268                                         RTE_IPV6_HDR_FL_MASK));
3269                         /* Payload length will be updated by HW */
3270                         ip6_hdr.payload_len = 0;
3271                         ip6_hdr.hop_limits =
3272                                         ipsec_xform->tunnel.ipv6.hlimit;
3273                         ip6_hdr.proto = (ipsec_xform->proto ==
3274                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3275                                         IPPROTO_ESP : IPPROTO_AH;
3276                         memcpy(&ip6_hdr.src_addr,
3277                                 &ipsec_xform->tunnel.ipv6.src_addr, 16);
3278                         memcpy(&ip6_hdr.dst_addr,
3279                                 &ipsec_xform->tunnel.ipv6.dst_addr, 16);
3280                         encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3281                         hdr = (uint8_t *)&ip6_hdr;
3282                 }
3283
3284                 bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3285                                 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3286                                 SHR_WAIT : SHR_SERIAL, &encap_pdb,
3287                                 hdr, &cipherdata, &authdata);
3288         } else if (ipsec_xform->direction ==
3289                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3290                 struct ipsec_decap_pdb decap_pdb;
3291
3292                 flc->dhr = SEC_FLC_DHR_INBOUND;
3293                 memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3294                 /* copy algo specific data to PDB */
3295                 switch (cipherdata.algtype) {
3296                 case OP_PCL_IPSEC_AES_CTR:
3297                         decap_pdb.ctr.ctr_initial = 0x00000001;
3298                         decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3299                         break;
3300                 case OP_PCL_IPSEC_AES_GCM8:
3301                 case OP_PCL_IPSEC_AES_GCM12:
3302                 case OP_PCL_IPSEC_AES_GCM16:
3303                         memcpy(decap_pdb.gcm.salt,
3304                                 (uint8_t *)&(ipsec_xform->salt), 4);
3305                         break;
3306                 }
3307
3308                 decap_pdb.options = (ipsec_xform->tunnel.type ==
3309                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3310                                 sizeof(struct ip) << 16 :
3311                                 sizeof(struct rte_ipv6_hdr) << 16;
3312                 if (ipsec_xform->options.esn)
3313                         decap_pdb.options |= PDBOPTS_ESP_ESN;
3314
3315                 if (ipsec_xform->replay_win_sz) {
3316                         uint32_t win_sz;
3317                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3318
3319                         if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3320                                 DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3321                                 win_sz = 128;
3322                         }
3323                         switch (win_sz) {
3324                         case 1:
3325                         case 2:
3326                         case 4:
3327                         case 8:
3328                         case 16:
3329                         case 32:
3330                                 decap_pdb.options |= PDBOPTS_ESP_ARS32;
3331                                 break;
3332                         case 64:
3333                                 decap_pdb.options |= PDBOPTS_ESP_ARS64;
3334                                 break;
3335                         case 256:
3336                                 decap_pdb.options |= PDBOPTS_ESP_ARS256;
3337                                 break;
3338                         case 512:
3339                                 decap_pdb.options |= PDBOPTS_ESP_ARS512;
3340                                 break;
3341                         case 1024:
3342                                 decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3343                                 break;
3344                         case 128:
3345                         default:
3346                                 decap_pdb.options |= PDBOPTS_ESP_ARS128;
3347                         }
3348                 }
3349                 session->dir = DIR_DEC;
3350                 bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3351                                 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3352                                 SHR_WAIT : SHR_SERIAL,
3353                                 &decap_pdb, &cipherdata, &authdata);
3354         } else
3355                 goto out;
3356
3357         if (bufsize < 0) {
3358                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3359                 goto out;
3360         }
3361
3362         flc->word1_sdl = (uint8_t)bufsize;
3363
3364         /* Enable the stashing control bit */
3365         DPAA2_SET_FLC_RSC(flc);
3366         flc->word2_rflc_31_0 = lower_32_bits(
3367                         (size_t)&(((struct dpaa2_sec_qp *)
3368                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
3369         flc->word3_rflc_63_32 = upper_32_bits(
3370                         (size_t)&(((struct dpaa2_sec_qp *)
3371                         dev->data->queue_pairs[0])->rx_vq));
3372
3373         /* Set EWS bit i.e. enable write-safe */
3374         DPAA2_SET_FLC_EWS(flc);
3375         /* Set BS = 1 i.e reuse input buffers as output buffers */
3376         DPAA2_SET_FLC_REUSE_BS(flc);
3377         /* Set FF = 10; reuse input buffers if they provide sufficient space */
3378         DPAA2_SET_FLC_REUSE_FF(flc);
3379
3380         session->ctxt = priv;
3381
3382         return 0;
3383 out:
3384         rte_free(session->auth_key.data);
3385         rte_free(session->cipher_key.data);
3386         rte_free(priv);
3387         return ret;
3388 }
3389
3390 static int
3391 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3392                            struct rte_security_session_conf *conf,
3393                            void *sess)
3394 {
3395         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3396         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3397         struct rte_crypto_auth_xform *auth_xform = NULL;
3398         struct rte_crypto_cipher_xform *cipher_xform = NULL;
3399         dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3400         struct ctxt_priv *priv;
3401         struct alginfo authdata, cipherdata;
3402         struct alginfo *p_authdata = NULL;
3403         int bufsize = -1;
3404         struct sec_flow_context *flc;
3405 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3406         int swap = true;
3407 #else
3408         int swap = false;
3409 #endif
3410
3411         PMD_INIT_FUNC_TRACE();
3412
3413         memset(session, 0, sizeof(dpaa2_sec_session));
3414
3415         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3416                                 sizeof(struct ctxt_priv) +
3417                                 sizeof(struct sec_flc_desc),
3418                                 RTE_CACHE_LINE_SIZE);
3419
3420         if (priv == NULL) {
3421                 DPAA2_SEC_ERR("No memory for priv CTXT");
3422                 return -ENOMEM;
3423         }
3424
3425         flc = &priv->flc_desc[0].flc;
3426
3427         /* find xfrm types */
3428         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3429                 cipher_xform = &xform->cipher;
3430                 if (xform->next != NULL &&
3431                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3432                         session->ext_params.aead_ctxt.auth_cipher_text = true;
3433                         auth_xform = &xform->next->auth;
3434                 }
3435         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3436                 auth_xform = &xform->auth;
3437                 if (xform->next != NULL &&
3438                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3439                         session->ext_params.aead_ctxt.auth_cipher_text = false;
3440                         cipher_xform = &xform->next->cipher;
3441                 }
3442         } else {
3443                 DPAA2_SEC_ERR("Invalid crypto type");
3444                 return -EINVAL;
3445         }
3446
3447         session->ctxt_type = DPAA2_SEC_PDCP;
3448         if (cipher_xform) {
3449                 session->cipher_key.data = rte_zmalloc(NULL,
3450                                                cipher_xform->key.length,
3451                                                RTE_CACHE_LINE_SIZE);
3452                 if (session->cipher_key.data == NULL &&
3453                                 cipher_xform->key.length > 0) {
3454                         DPAA2_SEC_ERR("No Memory for cipher key");
3455                         rte_free(priv);
3456                         return -ENOMEM;
3457                 }
3458                 session->cipher_key.length = cipher_xform->key.length;
3459                 memcpy(session->cipher_key.data, cipher_xform->key.data,
3460                         cipher_xform->key.length);
3461                 session->dir =
3462                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3463                                         DIR_ENC : DIR_DEC;
3464                 session->cipher_alg = cipher_xform->algo;
3465         } else {
3466                 session->cipher_key.data = NULL;
3467                 session->cipher_key.length = 0;
3468                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3469                 session->dir = DIR_ENC;
3470         }
3471
3472         session->pdcp.domain = pdcp_xform->domain;
3473         session->pdcp.bearer = pdcp_xform->bearer;
3474         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3475         session->pdcp.sn_size = pdcp_xform->sn_size;
3476         session->pdcp.hfn = pdcp_xform->hfn;
3477         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3478         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3479         /* hfv ovd offset location is stored in iv.offset value*/
3480         if (cipher_xform)
3481                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3482
3483         cipherdata.key = (size_t)session->cipher_key.data;
3484         cipherdata.keylen = session->cipher_key.length;
3485         cipherdata.key_enc_flags = 0;
3486         cipherdata.key_type = RTA_DATA_IMM;
3487
3488         switch (session->cipher_alg) {
3489         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3490                 cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3491                 break;
3492         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3493                 cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3494                 break;
3495         case RTE_CRYPTO_CIPHER_AES_CTR:
3496                 cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3497                 break;
3498         case RTE_CRYPTO_CIPHER_NULL:
3499                 cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3500                 break;
3501         default:
3502                 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3503                               session->cipher_alg);
3504                 goto out;
3505         }
3506
3507         if (auth_xform) {
3508                 session->auth_key.data = rte_zmalloc(NULL,
3509                                                      auth_xform->key.length,
3510                                                      RTE_CACHE_LINE_SIZE);
3511                 if (!session->auth_key.data &&
3512                     auth_xform->key.length > 0) {
3513                         DPAA2_SEC_ERR("No Memory for auth key");
3514                         rte_free(session->cipher_key.data);
3515                         rte_free(priv);
3516                         return -ENOMEM;
3517                 }
3518                 session->auth_key.length = auth_xform->key.length;
3519                 memcpy(session->auth_key.data, auth_xform->key.data,
3520                        auth_xform->key.length);
3521                 session->auth_alg = auth_xform->algo;
3522         } else {
3523                 session->auth_key.data = NULL;
3524                 session->auth_key.length = 0;
3525                 session->auth_alg = 0;
3526         }
3527         authdata.key = (size_t)session->auth_key.data;
3528         authdata.keylen = session->auth_key.length;
3529         authdata.key_enc_flags = 0;
3530         authdata.key_type = RTA_DATA_IMM;
3531
3532         if (session->auth_alg) {
3533                 switch (session->auth_alg) {
3534                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3535                         authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3536                         break;
3537                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
3538                         authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3539                         break;
3540                 case RTE_CRYPTO_AUTH_AES_CMAC:
3541                         authdata.algtype = PDCP_AUTH_TYPE_AES;
3542                         break;
3543                 case RTE_CRYPTO_AUTH_NULL:
3544                         authdata.algtype = PDCP_AUTH_TYPE_NULL;
3545                         break;
3546                 default:
3547                         DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3548                                       session->auth_alg);
3549                         goto out;
3550                 }
3551
3552                 p_authdata = &authdata;
3553         } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3554                 DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3555                 goto out;
3556         }
3557
3558         if (pdcp_xform->sdap_enabled) {
3559                 int nb_keys_to_inline =
3560                         rta_inline_pdcp_sdap_query(authdata.algtype,
3561                                         cipherdata.algtype,
3562                                         session->pdcp.sn_size,
3563                                         session->pdcp.hfn_ovd);
3564                 if (nb_keys_to_inline >= 1) {
3565                         cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3566                         cipherdata.key_type = RTA_DATA_PTR;
3567                 }
3568                 if (nb_keys_to_inline >= 2) {
3569                         authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3570                         authdata.key_type = RTA_DATA_PTR;
3571                 }
3572         } else {
3573                 if (rta_inline_pdcp_query(authdata.algtype,
3574                                         cipherdata.algtype,
3575                                         session->pdcp.sn_size,
3576                                         session->pdcp.hfn_ovd)) {
3577                         cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3578                         cipherdata.key_type = RTA_DATA_PTR;
3579                 }
3580         }
3581
3582         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3583                 if (session->dir == DIR_ENC)
3584                         bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3585                                         priv->flc_desc[0].desc, 1, swap,
3586                                         pdcp_xform->hfn,
3587                                         session->pdcp.sn_size,
3588                                         pdcp_xform->bearer,
3589                                         pdcp_xform->pkt_dir,
3590                                         pdcp_xform->hfn_threshold,
3591                                         &cipherdata, &authdata);
3592                 else if (session->dir == DIR_DEC)
3593                         bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3594                                         priv->flc_desc[0].desc, 1, swap,
3595                                         pdcp_xform->hfn,
3596                                         session->pdcp.sn_size,
3597                                         pdcp_xform->bearer,
3598                                         pdcp_xform->pkt_dir,
3599                                         pdcp_xform->hfn_threshold,
3600                                         &cipherdata, &authdata);
3601
3602         } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3603                 bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3604                                                      1, swap, &authdata);
3605         } else {
3606                 if (session->dir == DIR_ENC) {
3607                         if (pdcp_xform->sdap_enabled)
3608                                 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3609                                         priv->flc_desc[0].desc, 1, swap,
3610                                         session->pdcp.sn_size,
3611                                         pdcp_xform->hfn,
3612                                         pdcp_xform->bearer,
3613                                         pdcp_xform->pkt_dir,
3614                                         pdcp_xform->hfn_threshold,
3615                                         &cipherdata, p_authdata);
3616                         else
3617                                 bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3618                                         priv->flc_desc[0].desc, 1, swap,
3619                                         session->pdcp.sn_size,
3620                                         pdcp_xform->hfn,
3621                                         pdcp_xform->bearer,
3622                                         pdcp_xform->pkt_dir,
3623                                         pdcp_xform->hfn_threshold,
3624                                         &cipherdata, p_authdata);
3625                 } else if (session->dir == DIR_DEC) {
3626                         if (pdcp_xform->sdap_enabled)
3627                                 bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3628                                         priv->flc_desc[0].desc, 1, swap,
3629                                         session->pdcp.sn_size,
3630                                         pdcp_xform->hfn,
3631                                         pdcp_xform->bearer,
3632                                         pdcp_xform->pkt_dir,
3633                                         pdcp_xform->hfn_threshold,
3634                                         &cipherdata, p_authdata);
3635                         else
3636                                 bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3637                                         priv->flc_desc[0].desc, 1, swap,
3638                                         session->pdcp.sn_size,
3639                                         pdcp_xform->hfn,
3640                                         pdcp_xform->bearer,
3641                                         pdcp_xform->pkt_dir,
3642                                         pdcp_xform->hfn_threshold,
3643                                         &cipherdata, p_authdata);
3644                 }
3645         }
3646
3647         if (bufsize < 0) {
3648                 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3649                 goto out;
3650         }
3651
3652         /* Enable the stashing control bit */
3653         DPAA2_SET_FLC_RSC(flc);
3654         flc->word2_rflc_31_0 = lower_32_bits(
3655                         (size_t)&(((struct dpaa2_sec_qp *)
3656                         dev->data->queue_pairs[0])->rx_vq) | 0x14);
3657         flc->word3_rflc_63_32 = upper_32_bits(
3658                         (size_t)&(((struct dpaa2_sec_qp *)
3659                         dev->data->queue_pairs[0])->rx_vq));
3660
3661         flc->word1_sdl = (uint8_t)bufsize;
3662
3663         /* TODO - check the perf impact or
3664          * align as per descriptor type
3665          * Set EWS bit i.e. enable write-safe
3666          * DPAA2_SET_FLC_EWS(flc);
3667          */
3668
3669         /* Set BS = 1 i.e reuse input buffers as output buffers */
3670         DPAA2_SET_FLC_REUSE_BS(flc);
3671         /* Set FF = 10; reuse input buffers if they provide sufficient space */
3672         DPAA2_SET_FLC_REUSE_FF(flc);
3673
3674         session->ctxt = priv;
3675
3676         return 0;
3677 out:
3678         rte_free(session->auth_key.data);
3679         rte_free(session->cipher_key.data);
3680         rte_free(priv);
3681         return -EINVAL;
3682 }
3683
3684 static int
3685 dpaa2_sec_security_session_create(void *dev,
3686                                   struct rte_security_session_conf *conf,
3687                                   struct rte_security_session *sess,
3688                                   struct rte_mempool *mempool)
3689 {
3690         void *sess_private_data;
3691         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3692         int ret;
3693
3694         if (rte_mempool_get(mempool, &sess_private_data)) {
3695                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3696                 return -ENOMEM;
3697         }
3698
3699         switch (conf->protocol) {
3700         case RTE_SECURITY_PROTOCOL_IPSEC:
3701                 ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3702                                 sess_private_data);
3703                 break;
3704         case RTE_SECURITY_PROTOCOL_MACSEC:
3705                 return -ENOTSUP;
3706         case RTE_SECURITY_PROTOCOL_PDCP:
3707                 ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3708                                 sess_private_data);
3709                 break;
3710         default:
3711                 return -EINVAL;
3712         }
3713         if (ret != 0) {
3714                 DPAA2_SEC_ERR("Failed to configure session parameters");
3715                 /* Return session to mempool */
3716                 rte_mempool_put(mempool, sess_private_data);
3717                 return ret;
3718         }
3719
3720         set_sec_session_private_data(sess, sess_private_data);
3721
3722         return ret;
3723 }
3724
3725 /** Clear the memory of session so it doesn't leave key material behind */
3726 static int
3727 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3728                 struct rte_security_session *sess)
3729 {
3730         PMD_INIT_FUNC_TRACE();
3731         void *sess_priv = get_sec_session_private_data(sess);
3732
3733         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3734
3735         if (sess_priv) {
3736                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3737
3738                 rte_free(s->ctxt);
3739                 rte_free(s->cipher_key.data);
3740                 rte_free(s->auth_key.data);
3741                 memset(s, 0, sizeof(dpaa2_sec_session));
3742                 set_sec_session_private_data(sess, NULL);
3743                 rte_mempool_put(sess_mp, sess_priv);
3744         }
3745         return 0;
3746 }
3747 #endif
3748 static int
3749 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3750                 struct rte_crypto_sym_xform *xform,
3751                 struct rte_cryptodev_sym_session *sess,
3752                 struct rte_mempool *mempool)
3753 {
3754         void *sess_private_data;
3755         int ret;
3756
3757         if (rte_mempool_get(mempool, &sess_private_data)) {
3758                 DPAA2_SEC_ERR("Couldn't get object from session mempool");
3759                 return -ENOMEM;
3760         }
3761
3762         ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
3763         if (ret != 0) {
3764                 DPAA2_SEC_ERR("Failed to configure session parameters");
3765                 /* Return session to mempool */
3766                 rte_mempool_put(mempool, sess_private_data);
3767                 return ret;
3768         }
3769
3770         set_sym_session_private_data(sess, dev->driver_id,
3771                 sess_private_data);
3772
3773         return 0;
3774 }
3775
3776 /** Clear the memory of session so it doesn't leave key material behind */
3777 static void
3778 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3779                 struct rte_cryptodev_sym_session *sess)
3780 {
3781         PMD_INIT_FUNC_TRACE();
3782         uint8_t index = dev->driver_id;
3783         void *sess_priv = get_sym_session_private_data(sess, index);
3784         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3785
3786         if (sess_priv) {
3787                 rte_free(s->ctxt);
3788                 rte_free(s->cipher_key.data);
3789                 rte_free(s->auth_key.data);
3790                 memset(s, 0, sizeof(dpaa2_sec_session));
3791                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3792                 set_sym_session_private_data(sess, index, NULL);
3793                 rte_mempool_put(sess_mp, sess_priv);
3794         }
3795 }
3796
3797 static int
3798 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3799                         struct rte_cryptodev_config *config __rte_unused)
3800 {
3801         PMD_INIT_FUNC_TRACE();
3802
3803         return 0;
3804 }
3805
3806 static int
3807 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3808 {
3809         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3810         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3811         struct dpseci_attr attr;
3812         struct dpaa2_queue *dpaa2_q;
3813         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3814                                         dev->data->queue_pairs;
3815         struct dpseci_rx_queue_attr rx_attr;
3816         struct dpseci_tx_queue_attr tx_attr;
3817         int ret, i;
3818
3819         PMD_INIT_FUNC_TRACE();
3820
3821         /* Change the tx burst function if ordered queues are used */
3822         if (priv->en_ordered)
3823                 dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3824
3825         memset(&attr, 0, sizeof(struct dpseci_attr));
3826
3827         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3828         if (ret) {
3829                 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3830                               priv->hw_id);
3831                 goto get_attr_failure;
3832         }
3833         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3834         if (ret) {
3835                 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3836                 goto get_attr_failure;
3837         }
3838         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3839                 dpaa2_q = &qp[i]->rx_vq;
3840                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3841                                     &rx_attr);
3842                 dpaa2_q->fqid = rx_attr.fqid;
3843                 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3844         }
3845         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3846                 dpaa2_q = &qp[i]->tx_vq;
3847                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3848                                     &tx_attr);
3849                 dpaa2_q->fqid = tx_attr.fqid;
3850                 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3851         }
3852
3853         return 0;
3854 get_attr_failure:
3855         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3856         return -1;
3857 }
3858
3859 static void
3860 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3861 {
3862         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3863         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3864         int ret;
3865
3866         PMD_INIT_FUNC_TRACE();
3867
3868         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3869         if (ret) {
3870                 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3871                              priv->hw_id);
3872                 return;
3873         }
3874
3875         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3876         if (ret < 0) {
3877                 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3878                 return;
3879         }
3880 }
3881
3882 static int
3883 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3884 {
3885         PMD_INIT_FUNC_TRACE();
3886
3887         return 0;
3888 }
3889
3890 static void
3891 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3892                         struct rte_cryptodev_info *info)
3893 {
3894         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3895
3896         PMD_INIT_FUNC_TRACE();
3897         if (info != NULL) {
3898                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3899                 info->feature_flags = dev->feature_flags;
3900                 info->capabilities = dpaa2_sec_capabilities;
3901                 /* No limit of number of sessions */
3902                 info->sym.max_nb_sessions = 0;
3903                 info->driver_id = cryptodev_driver_id;
3904         }
3905 }
3906
3907 static
3908 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3909                          struct rte_cryptodev_stats *stats)
3910 {
3911         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3912         struct fsl_mc_io dpseci;
3913         struct dpseci_sec_counters counters = {0};
3914         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3915                                         dev->data->queue_pairs;
3916         int ret, i;
3917
3918         PMD_INIT_FUNC_TRACE();
3919         if (stats == NULL) {
3920                 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3921                 return;
3922         }
3923         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3924                 if (qp == NULL || qp[i] == NULL) {
3925                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3926                         continue;
3927                 }
3928
3929                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3930                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3931                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3932                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3933         }
3934
3935         /* In case as secondary process access stats, MCP portal in priv-hw
3936          * may have primary process address. Need the secondary process
3937          * based MCP portal address for this object.
3938          */
3939         dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3940         ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3941                                       &counters);
3942         if (ret) {
3943                 DPAA2_SEC_ERR("SEC counters failed");
3944         } else {
3945                 DPAA2_SEC_INFO("dpseci hardware stats:"
3946                             "\n\tNum of Requests Dequeued = %" PRIu64
3947                             "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3948                             "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3949                             "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3950                             "\n\tNum of Outbound Bytes Protected = %" PRIu64
3951                             "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3952                             "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3953                             counters.dequeued_requests,
3954                             counters.ob_enc_requests,
3955                             counters.ib_dec_requests,
3956                             counters.ob_enc_bytes,
3957                             counters.ob_prot_bytes,
3958                             counters.ib_dec_bytes,
3959                             counters.ib_valid_bytes);
3960         }
3961 }
3962
3963 static
3964 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3965 {
3966         int i;
3967         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3968                                    (dev->data->queue_pairs);
3969
3970         PMD_INIT_FUNC_TRACE();
3971
3972         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3973                 if (qp[i] == NULL) {
3974                         DPAA2_SEC_DEBUG("Uninitialised queue pair");
3975                         continue;
3976                 }
3977                 qp[i]->tx_vq.rx_pkts = 0;
3978                 qp[i]->tx_vq.tx_pkts = 0;
3979                 qp[i]->tx_vq.err_pkts = 0;
3980                 qp[i]->rx_vq.rx_pkts = 0;
3981                 qp[i]->rx_vq.tx_pkts = 0;
3982                 qp[i]->rx_vq.err_pkts = 0;
3983         }
3984 }
3985
3986 static void __rte_hot
3987 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3988                                  const struct qbman_fd *fd,
3989                                  const struct qbman_result *dq,
3990                                  struct dpaa2_queue *rxq,
3991                                  struct rte_event *ev)
3992 {
3993         struct dpaa2_sec_qp *qp;
3994         /* Prefetching mbuf */
3995         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3996                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3997
3998         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3999         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4000
4001         qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4002         ev->flow_id = rxq->ev.flow_id;
4003         ev->sub_event_type = rxq->ev.sub_event_type;
4004         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4005         ev->op = RTE_EVENT_OP_NEW;
4006         ev->sched_type = rxq->ev.sched_type;
4007         ev->queue_id = rxq->ev.queue_id;
4008         ev->priority = rxq->ev.priority;
4009         ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4010
4011         qbman_swp_dqrr_consume(swp, dq);
4012 }
4013 static void
4014 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
4015                                  const struct qbman_fd *fd,
4016                                  const struct qbman_result *dq,
4017                                  struct dpaa2_queue *rxq,
4018                                  struct rte_event *ev)
4019 {
4020         uint8_t dqrr_index;
4021         struct dpaa2_sec_qp *qp;
4022         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
4023         /* Prefetching mbuf */
4024         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4025                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4026
4027         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4028         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4029
4030         qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4031         ev->flow_id = rxq->ev.flow_id;
4032         ev->sub_event_type = rxq->ev.sub_event_type;
4033         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4034         ev->op = RTE_EVENT_OP_NEW;
4035         ev->sched_type = rxq->ev.sched_type;
4036         ev->queue_id = rxq->ev.queue_id;
4037         ev->priority = rxq->ev.priority;
4038
4039         ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4040         dqrr_index = qbman_get_dqrr_idx(dq);
4041         *dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4042         DPAA2_PER_LCORE_DQRR_SIZE++;
4043         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4044         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4045 }
4046
4047 static void __rte_hot
4048 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4049                                 const struct qbman_fd *fd,
4050                                 const struct qbman_result *dq,
4051                                 struct dpaa2_queue *rxq,
4052                                 struct rte_event *ev)
4053 {
4054         struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
4055         struct dpaa2_sec_qp *qp;
4056
4057         /* Prefetching mbuf */
4058         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4059                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4060
4061         /* Prefetching ipsec crypto_op stored in priv data of mbuf */
4062         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4063
4064         qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4065         ev->flow_id = rxq->ev.flow_id;
4066         ev->sub_event_type = rxq->ev.sub_event_type;
4067         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4068         ev->op = RTE_EVENT_OP_NEW;
4069         ev->sched_type = rxq->ev.sched_type;
4070         ev->queue_id = rxq->ev.queue_id;
4071         ev->priority = rxq->ev.priority;
4072         ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4073
4074         *dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4075         *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4076                 DPAA2_EQCR_OPRID_SHIFT;
4077         *dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4078                 DPAA2_EQCR_SEQNUM_SHIFT;
4079
4080         qbman_swp_dqrr_consume(swp, dq);
4081 }
4082
4083 int
4084 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4085                 int qp_id,
4086                 struct dpaa2_dpcon_dev *dpcon,
4087                 const struct rte_event *event)
4088 {
4089         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4090         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4091         struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4092         struct dpseci_rx_queue_cfg cfg;
4093         uint8_t priority;
4094         int ret;
4095
4096         if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4097                 qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4098         else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4099                 qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4100         else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4101                 qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4102         else
4103                 return -EINVAL;
4104
4105         priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4106                    (dpcon->num_priorities - 1);
4107
4108         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4109         cfg.options = DPSECI_QUEUE_OPT_DEST;
4110         cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4111         cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4112         cfg.dest_cfg.priority = priority;
4113
4114         cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4115         cfg.user_ctx = (size_t)(qp);
4116         if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4117                 cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4118                 cfg.order_preservation_en = 1;
4119         }
4120
4121         if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4122                 struct opr_cfg ocfg;
4123
4124                 /* Restoration window size = 256 frames */
4125                 ocfg.oprrws = 3;
4126                 /* Restoration window size = 512 frames for LX2 */
4127                 if (dpaa2_svr_family == SVR_LX2160A)
4128                         ocfg.oprrws = 4;
4129                 /* Auto advance NESN window enabled */
4130                 ocfg.oa = 1;
4131                 /* Late arrival window size disabled */
4132                 ocfg.olws = 0;
4133                 /* ORL resource exhaustaion advance NESN disabled */
4134                 ocfg.oeane = 0;
4135
4136                 if (priv->en_loose_ordered)
4137                         ocfg.oloe = 1;
4138                 else
4139                         ocfg.oloe = 0;
4140
4141                 ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4142                                    qp_id, OPR_OPT_CREATE, &ocfg);
4143                 if (ret) {
4144                         RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
4145                         return ret;
4146                 }
4147                 qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4148                 priv->en_ordered = 1;
4149         }
4150
4151         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4152                                   qp_id, &cfg);
4153         if (ret) {
4154                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4155                 return ret;
4156         }
4157
4158         memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4159
4160         return 0;
4161 }
4162
4163 int
4164 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4165                         int qp_id)
4166 {
4167         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4168         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4169         struct dpseci_rx_queue_cfg cfg;
4170         int ret;
4171
4172         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4173         cfg.options = DPSECI_QUEUE_OPT_DEST;
4174         cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4175
4176         ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4177                                   qp_id, &cfg);
4178         if (ret)
4179                 RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4180
4181         return ret;
4182 }
4183
4184 static struct rte_cryptodev_ops crypto_ops = {
4185         .dev_configure        = dpaa2_sec_dev_configure,
4186         .dev_start            = dpaa2_sec_dev_start,
4187         .dev_stop             = dpaa2_sec_dev_stop,
4188         .dev_close            = dpaa2_sec_dev_close,
4189         .dev_infos_get        = dpaa2_sec_dev_infos_get,
4190         .stats_get            = dpaa2_sec_stats_get,
4191         .stats_reset          = dpaa2_sec_stats_reset,
4192         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
4193         .queue_pair_release   = dpaa2_sec_queue_pair_release,
4194         .sym_session_get_size     = dpaa2_sec_sym_session_get_size,
4195         .sym_session_configure    = dpaa2_sec_sym_session_configure,
4196         .sym_session_clear        = dpaa2_sec_sym_session_clear,
4197         /* Raw data-path API related operations */
4198         .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4199         .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4200 };
4201
4202 #ifdef RTE_LIB_SECURITY
4203 static const struct rte_security_capability *
4204 dpaa2_sec_capabilities_get(void *device __rte_unused)
4205 {
4206         return dpaa2_sec_security_cap;
4207 }
4208
4209 static const struct rte_security_ops dpaa2_sec_security_ops = {
4210         .session_create = dpaa2_sec_security_session_create,
4211         .session_update = NULL,
4212         .session_stats_get = NULL,
4213         .session_destroy = dpaa2_sec_security_session_destroy,
4214         .set_pkt_metadata = NULL,
4215         .capabilities_get = dpaa2_sec_capabilities_get
4216 };
4217 #endif
4218
4219 static int
4220 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4221 {
4222         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4223         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4224         int ret;
4225
4226         PMD_INIT_FUNC_TRACE();
4227
4228         /* Function is reverse of dpaa2_sec_dev_init.
4229          * It does the following:
4230          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4231          * 2. Close the DPSECI device
4232          * 3. Free the allocated resources.
4233          */
4234
4235         /*Close the device at underlying layer*/
4236         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4237         if (ret) {
4238                 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4239                 return -1;
4240         }
4241
4242         /*Free the allocated memory for ethernet private data and dpseci*/
4243         priv->hw = NULL;
4244         rte_free(dpseci);
4245         rte_free(dev->security_ctx);
4246
4247         DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4248                        dev->data->name, rte_socket_id());
4249
4250         return 0;
4251 }
4252
4253 static int
4254 check_devargs_handler(const char *key, const char *value,
4255                       void *opaque)
4256 {
4257         struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4258         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4259
4260         if (!strcmp(key, "drv_strict_order")) {
4261                 priv->en_loose_ordered = false;
4262         } else if (!strcmp(key, "drv_dump_mode")) {
4263                 dpaa2_sec_dp_dump = atoi(value);
4264                 if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4265                         DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4266                                       "supported, changing to FULL error"
4267                                       " prints\n");
4268                         dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4269                 }
4270         } else
4271                 return -1;
4272
4273         return 0;
4274 }
4275
4276 static void
4277 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4278 {
4279         struct rte_kvargs *kvlist;
4280         struct rte_devargs *devargs;
4281
4282         devargs = cryptodev->device->devargs;
4283         if (!devargs)
4284                 return;
4285
4286         kvlist = rte_kvargs_parse(devargs->args, NULL);
4287         if (!kvlist)
4288                 return;
4289
4290         if (!rte_kvargs_count(kvlist, key)) {
4291                 rte_kvargs_free(kvlist);
4292                 return;
4293         }
4294
4295         rte_kvargs_process(kvlist, key,
4296                         check_devargs_handler, (void *)cryptodev);
4297         rte_kvargs_free(kvlist);
4298 }
4299
4300 static int
4301 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4302 {
4303         struct dpaa2_sec_dev_private *internals;
4304         struct rte_device *dev = cryptodev->device;
4305         struct rte_dpaa2_device *dpaa2_dev;
4306 #ifdef RTE_LIB_SECURITY
4307         struct rte_security_ctx *security_instance;
4308 #endif
4309         struct fsl_mc_io *dpseci;
4310         uint16_t token;
4311         struct dpseci_attr attr;
4312         int retcode, hw_id;
4313
4314         PMD_INIT_FUNC_TRACE();
4315         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4316         hw_id = dpaa2_dev->object_id;
4317
4318         cryptodev->driver_id = cryptodev_driver_id;
4319         cryptodev->dev_ops = &crypto_ops;
4320
4321         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4322         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4323         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4324                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
4325                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4326                         RTE_CRYPTODEV_FF_SECURITY |
4327                         RTE_CRYPTODEV_FF_SYM_RAW_DP |
4328                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4329                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4330                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4331                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4332                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4333
4334         internals = cryptodev->data->dev_private;
4335
4336         /*
4337          * For secondary processes, we don't initialise any further as primary
4338          * has already done this work. Only check we don't need a different
4339          * RX function
4340          */
4341         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4342                 DPAA2_SEC_DEBUG("Device already init by primary process");
4343                 return 0;
4344         }
4345 #ifdef RTE_LIB_SECURITY
4346         /* Initialize security_ctx only for primary process*/
4347         security_instance = rte_malloc("rte_security_instances_ops",
4348                                 sizeof(struct rte_security_ctx), 0);
4349         if (security_instance == NULL)
4350                 return -ENOMEM;
4351         security_instance->device = (void *)cryptodev;
4352         security_instance->ops = &dpaa2_sec_security_ops;
4353         security_instance->sess_cnt = 0;
4354         cryptodev->security_ctx = security_instance;
4355 #endif
4356         /*Open the rte device via MC and save the handle for further use*/
4357         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4358                                 sizeof(struct fsl_mc_io), 0);
4359         if (!dpseci) {
4360                 DPAA2_SEC_ERR(
4361                         "Error in allocating the memory for dpsec object");
4362                 return -ENOMEM;
4363         }
4364         dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4365
4366         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4367         if (retcode != 0) {
4368                 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4369                               retcode);
4370                 goto init_error;
4371         }
4372         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4373         if (retcode != 0) {
4374                 DPAA2_SEC_ERR(
4375                              "Cannot get dpsec device attributed: Error = %x",
4376                              retcode);
4377                 goto init_error;
4378         }
4379         snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4380                         "dpsec-%u", hw_id);
4381
4382         internals->max_nb_queue_pairs = attr.num_tx_queues;
4383         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4384         internals->hw = dpseci;
4385         internals->token = token;
4386         internals->en_loose_ordered = true;
4387
4388         dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4389         dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4390         DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4391         return 0;
4392
4393 init_error:
4394         DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4395
4396         /* dpaa2_sec_uninit(crypto_dev_name); */
4397         return -EFAULT;
4398 }
4399
4400 static int
4401 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4402                           struct rte_dpaa2_device *dpaa2_dev)
4403 {
4404         struct rte_cryptodev *cryptodev;
4405         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4406
4407         int retval;
4408
4409         snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4410                         dpaa2_dev->object_id);
4411
4412         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4413         if (cryptodev == NULL)
4414                 return -ENOMEM;
4415
4416         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4417                 cryptodev->data->dev_private = rte_zmalloc_socket(
4418                                         "cryptodev private structure",
4419                                         sizeof(struct dpaa2_sec_dev_private),
4420                                         RTE_CACHE_LINE_SIZE,
4421                                         rte_socket_id());
4422
4423                 if (cryptodev->data->dev_private == NULL)
4424                         rte_panic("Cannot allocate memzone for private "
4425                                   "device data");
4426         }
4427
4428         dpaa2_dev->cryptodev = cryptodev;
4429         cryptodev->device = &dpaa2_dev->device;
4430
4431         /* init user callbacks */
4432         TAILQ_INIT(&(cryptodev->link_intr_cbs));
4433
4434         if (dpaa2_svr_family == SVR_LX2160A)
4435                 rta_set_sec_era(RTA_SEC_ERA_10);
4436         else
4437                 rta_set_sec_era(RTA_SEC_ERA_8);
4438
4439         DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4440
4441         /* Invoke PMD device initialization function */
4442         retval = dpaa2_sec_dev_init(cryptodev);
4443         if (retval == 0) {
4444                 rte_cryptodev_pmd_probing_finish(cryptodev);
4445                 return 0;
4446         }
4447
4448         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4449                 rte_free(cryptodev->data->dev_private);
4450
4451         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4452
4453         return -ENXIO;
4454 }
4455
4456 static int
4457 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4458 {
4459         struct rte_cryptodev *cryptodev;
4460         int ret;
4461
4462         cryptodev = dpaa2_dev->cryptodev;
4463         if (cryptodev == NULL)
4464                 return -ENODEV;
4465
4466         ret = dpaa2_sec_uninit(cryptodev);
4467         if (ret)
4468                 return ret;
4469
4470         return rte_cryptodev_pmd_destroy(cryptodev);
4471 }
4472
4473 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4474         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4475         .drv_type = DPAA2_CRYPTO,
4476         .driver = {
4477                 .name = "DPAA2 SEC PMD"
4478         },
4479         .probe = cryptodev_dpaa2_sec_probe,
4480         .remove = cryptodev_dpaa2_sec_remove,
4481 };
4482
4483 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4484
4485 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4486 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4487                 rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4488 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4489                 DRIVER_STRICT_ORDER "=<int>"
4490                 DRIVER_DUMP_MODE "=<int>");
4491 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);