cryptodev: add mempool pointer in queue pair setup
[dpdk.git] / drivers / crypto / dpaa2_sec / dpaa2_sec_dpseci.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
54
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
57
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
61
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63  * a pointer to the shared descriptor
64  */
65 #define MIN_JOB_DESC_SIZE       (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID           0x1957
67 #define FSL_DEVICE_ID           0x410
68 #define FSL_SUBSYSTEM_SEC       1
69 #define FSL_MC_DPSECI_DEVID     3
70
71 #define NO_PREFETCH 0
72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
73 #define FLE_POOL_NUM_BUFS       32000
74 #define FLE_POOL_BUF_SIZE       256
75 #define FLE_POOL_CACHE_SIZE     512
76
77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
78
79 static uint8_t cryptodev_driver_id;
80
81 static inline int
82 build_authenc_gcm_fd(dpaa2_sec_session *sess,
83                      struct rte_crypto_op *op,
84                      struct qbman_fd *fd, uint16_t bpid)
85 {
86         struct rte_crypto_sym_op *sym_op = op->sym;
87         struct ctxt_priv *priv = sess->ctxt;
88         struct qbman_fle *fle, *sge;
89         struct sec_flow_context *flc;
90         uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
91         int icv_len = sess->digest_length, retval;
92         uint8_t *old_icv;
93         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
94                         sess->iv.offset);
95
96         PMD_INIT_FUNC_TRACE();
97
98         /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
99          * Currently we donot know which FLE has the mbuf stored.
100          * So while retreiving we can go back 1 FLE from the FD -ADDR
101          * to get the MBUF Addr from the previous FLE.
102          * We can have a better approach to use the inline Mbuf
103          */
104         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
105         if (retval) {
106                 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
107                 return -1;
108         }
109         memset(fle, 0, FLE_POOL_BUF_SIZE);
110         DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
111         DPAA2_FLE_SAVE_CTXT(fle, priv);
112         fle = fle + 1;
113         sge = fle + 2;
114         if (likely(bpid < MAX_BPID)) {
115                 DPAA2_SET_FD_BPID(fd, bpid);
116                 DPAA2_SET_FLE_BPID(fle, bpid);
117                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
118                 DPAA2_SET_FLE_BPID(sge, bpid);
119                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
120                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
121                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
122         } else {
123                 DPAA2_SET_FD_IVP(fd);
124                 DPAA2_SET_FLE_IVP(fle);
125                 DPAA2_SET_FLE_IVP((fle + 1));
126                 DPAA2_SET_FLE_IVP(sge);
127                 DPAA2_SET_FLE_IVP((sge + 1));
128                 DPAA2_SET_FLE_IVP((sge + 2));
129                 DPAA2_SET_FLE_IVP((sge + 3));
130         }
131
132         /* Save the shared descriptor */
133         flc = &priv->flc_desc[0].flc;
134         /* Configure FD as a FRAME LIST */
135         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
136         DPAA2_SET_FD_COMPOUND_FMT(fd);
137         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
138
139         PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
140                    "iv-len=%d data_off: 0x%x\n",
141                    sym_op->aead.data.offset,
142                    sym_op->aead.data.length,
143                    sym_op->aead.digest.length,
144                    sess->iv.length,
145                    sym_op->m_src->data_off);
146
147         /* Configure Output FLE with Scatter/Gather Entry */
148         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
149         if (auth_only_len)
150                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
151         fle->length = (sess->dir == DIR_ENC) ?
152                         (sym_op->aead.data.length + icv_len + auth_only_len) :
153                         sym_op->aead.data.length + auth_only_len;
154
155         DPAA2_SET_FLE_SG_EXT(fle);
156
157         /* Configure Output SGE for Encap/Decap */
158         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
159         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
160                                 sym_op->m_src->data_off - auth_only_len);
161         sge->length = sym_op->aead.data.length + auth_only_len;
162
163         if (sess->dir == DIR_ENC) {
164                 sge++;
165                 DPAA2_SET_FLE_ADDR(sge,
166                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
167                 sge->length = sess->digest_length;
168                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
169                                         sess->iv.length + auth_only_len));
170         }
171         DPAA2_SET_FLE_FIN(sge);
172
173         sge++;
174         fle++;
175
176         /* Configure Input FLE with Scatter/Gather Entry */
177         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
178         DPAA2_SET_FLE_SG_EXT(fle);
179         DPAA2_SET_FLE_FIN(fle);
180         fle->length = (sess->dir == DIR_ENC) ?
181                 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
182                 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
183                  sess->digest_length);
184
185         /* Configure Input SGE for Encap/Decap */
186         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
187         sge->length = sess->iv.length;
188         sge++;
189         if (auth_only_len) {
190                 DPAA2_SET_FLE_ADDR(sge,
191                                 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
192                 sge->length = auth_only_len;
193                 DPAA2_SET_FLE_BPID(sge, bpid);
194                 sge++;
195         }
196
197         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
198         DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
199                                 sym_op->m_src->data_off);
200         sge->length = sym_op->aead.data.length;
201         if (sess->dir == DIR_DEC) {
202                 sge++;
203                 old_icv = (uint8_t *)(sge + 1);
204                 memcpy(old_icv, sym_op->aead.digest.data,
205                        sess->digest_length);
206                 memset(sym_op->aead.digest.data, 0, sess->digest_length);
207                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
208                 sge->length = sess->digest_length;
209                 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
210                                  sess->digest_length +
211                                  sess->iv.length +
212                                  auth_only_len));
213         }
214         DPAA2_SET_FLE_FIN(sge);
215
216         if (auth_only_len) {
217                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
218                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
219         }
220
221         return 0;
222 }
223
224 static inline int
225 build_authenc_fd(dpaa2_sec_session *sess,
226                  struct rte_crypto_op *op,
227                  struct qbman_fd *fd, uint16_t bpid)
228 {
229         struct rte_crypto_sym_op *sym_op = op->sym;
230         struct ctxt_priv *priv = sess->ctxt;
231         struct qbman_fle *fle, *sge;
232         struct sec_flow_context *flc;
233         uint32_t auth_only_len = sym_op->auth.data.length -
234                                 sym_op->cipher.data.length;
235         int icv_len = sess->digest_length, retval;
236         uint8_t *old_icv;
237         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
238                         sess->iv.offset);
239
240         PMD_INIT_FUNC_TRACE();
241
242         /* we are using the first FLE entry to store Mbuf.
243          * Currently we donot know which FLE has the mbuf stored.
244          * So while retreiving we can go back 1 FLE from the FD -ADDR
245          * to get the MBUF Addr from the previous FLE.
246          * We can have a better approach to use the inline Mbuf
247          */
248         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
249         if (retval) {
250                 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
251                 return -1;
252         }
253         memset(fle, 0, FLE_POOL_BUF_SIZE);
254         DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
255         DPAA2_FLE_SAVE_CTXT(fle, priv);
256         fle = fle + 1;
257         sge = fle + 2;
258         if (likely(bpid < MAX_BPID)) {
259                 DPAA2_SET_FD_BPID(fd, bpid);
260                 DPAA2_SET_FLE_BPID(fle, bpid);
261                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
262                 DPAA2_SET_FLE_BPID(sge, bpid);
263                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
264                 DPAA2_SET_FLE_BPID(sge + 2, bpid);
265                 DPAA2_SET_FLE_BPID(sge + 3, bpid);
266         } else {
267                 DPAA2_SET_FD_IVP(fd);
268                 DPAA2_SET_FLE_IVP(fle);
269                 DPAA2_SET_FLE_IVP((fle + 1));
270                 DPAA2_SET_FLE_IVP(sge);
271                 DPAA2_SET_FLE_IVP((sge + 1));
272                 DPAA2_SET_FLE_IVP((sge + 2));
273                 DPAA2_SET_FLE_IVP((sge + 3));
274         }
275
276         /* Save the shared descriptor */
277         flc = &priv->flc_desc[0].flc;
278         /* Configure FD as a FRAME LIST */
279         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
280         DPAA2_SET_FD_COMPOUND_FMT(fd);
281         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
282
283         PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
284                    "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
285                    sym_op->auth.data.offset,
286                    sym_op->auth.data.length,
287                    sess->digest_length,
288                    sym_op->cipher.data.offset,
289                    sym_op->cipher.data.length,
290                    sess->iv.length,
291                    sym_op->m_src->data_off);
292
293         /* Configure Output FLE with Scatter/Gather Entry */
294         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
295         if (auth_only_len)
296                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
297         fle->length = (sess->dir == DIR_ENC) ?
298                         (sym_op->cipher.data.length + icv_len) :
299                         sym_op->cipher.data.length;
300
301         DPAA2_SET_FLE_SG_EXT(fle);
302
303         /* Configure Output SGE for Encap/Decap */
304         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
305         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
306                                 sym_op->m_src->data_off);
307         sge->length = sym_op->cipher.data.length;
308
309         if (sess->dir == DIR_ENC) {
310                 sge++;
311                 DPAA2_SET_FLE_ADDR(sge,
312                                 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
313                 sge->length = sess->digest_length;
314                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
315                                         sess->iv.length));
316         }
317         DPAA2_SET_FLE_FIN(sge);
318
319         sge++;
320         fle++;
321
322         /* Configure Input FLE with Scatter/Gather Entry */
323         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
324         DPAA2_SET_FLE_SG_EXT(fle);
325         DPAA2_SET_FLE_FIN(fle);
326         fle->length = (sess->dir == DIR_ENC) ?
327                         (sym_op->auth.data.length + sess->iv.length) :
328                         (sym_op->auth.data.length + sess->iv.length +
329                          sess->digest_length);
330
331         /* Configure Input SGE for Encap/Decap */
332         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
333         sge->length = sess->iv.length;
334         sge++;
335
336         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
337         DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
338                                 sym_op->m_src->data_off);
339         sge->length = sym_op->auth.data.length;
340         if (sess->dir == DIR_DEC) {
341                 sge++;
342                 old_icv = (uint8_t *)(sge + 1);
343                 memcpy(old_icv, sym_op->auth.digest.data,
344                        sess->digest_length);
345                 memset(sym_op->auth.digest.data, 0, sess->digest_length);
346                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
347                 sge->length = sess->digest_length;
348                 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
349                                  sess->digest_length +
350                                  sess->iv.length));
351         }
352         DPAA2_SET_FLE_FIN(sge);
353         if (auth_only_len) {
354                 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
355                 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
356         }
357         return 0;
358 }
359
360 static inline int
361 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
362               struct qbman_fd *fd, uint16_t bpid)
363 {
364         struct rte_crypto_sym_op *sym_op = op->sym;
365         struct qbman_fle *fle, *sge;
366         struct sec_flow_context *flc;
367         struct ctxt_priv *priv = sess->ctxt;
368         uint8_t *old_digest;
369         int retval;
370
371         PMD_INIT_FUNC_TRACE();
372
373         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
374         if (retval) {
375                 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
376                 return -1;
377         }
378         memset(fle, 0, FLE_POOL_BUF_SIZE);
379         /* TODO we are using the first FLE entry to store Mbuf.
380          * Currently we donot know which FLE has the mbuf stored.
381          * So while retreiving we can go back 1 FLE from the FD -ADDR
382          * to get the MBUF Addr from the previous FLE.
383          * We can have a better approach to use the inline Mbuf
384          */
385         DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
386         DPAA2_FLE_SAVE_CTXT(fle, priv);
387         fle = fle + 1;
388
389         if (likely(bpid < MAX_BPID)) {
390                 DPAA2_SET_FD_BPID(fd, bpid);
391                 DPAA2_SET_FLE_BPID(fle, bpid);
392                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
393         } else {
394                 DPAA2_SET_FD_IVP(fd);
395                 DPAA2_SET_FLE_IVP(fle);
396                 DPAA2_SET_FLE_IVP((fle + 1));
397         }
398         flc = &priv->flc_desc[DESC_INITFINAL].flc;
399         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
400
401         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
402         fle->length = sess->digest_length;
403
404         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
405         DPAA2_SET_FD_COMPOUND_FMT(fd);
406         fle++;
407
408         if (sess->dir == DIR_ENC) {
409                 DPAA2_SET_FLE_ADDR(fle,
410                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
411                 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
412                                      sym_op->m_src->data_off);
413                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
414                 fle->length = sym_op->auth.data.length;
415         } else {
416                 sge = fle + 2;
417                 DPAA2_SET_FLE_SG_EXT(fle);
418                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
419
420                 if (likely(bpid < MAX_BPID)) {
421                         DPAA2_SET_FLE_BPID(sge, bpid);
422                         DPAA2_SET_FLE_BPID(sge + 1, bpid);
423                 } else {
424                         DPAA2_SET_FLE_IVP(sge);
425                         DPAA2_SET_FLE_IVP((sge + 1));
426                 }
427                 DPAA2_SET_FLE_ADDR(sge,
428                                    DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
429                 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
430                                      sym_op->m_src->data_off);
431
432                 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
433                                  sess->digest_length);
434                 sge->length = sym_op->auth.data.length;
435                 sge++;
436                 old_digest = (uint8_t *)(sge + 1);
437                 rte_memcpy(old_digest, sym_op->auth.digest.data,
438                            sess->digest_length);
439                 memset(sym_op->auth.digest.data, 0, sess->digest_length);
440                 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
441                 sge->length = sess->digest_length;
442                 fle->length = sym_op->auth.data.length +
443                                 sess->digest_length;
444                 DPAA2_SET_FLE_FIN(sge);
445         }
446         DPAA2_SET_FLE_FIN(fle);
447
448         return 0;
449 }
450
451 static int
452 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
453                 struct qbman_fd *fd, uint16_t bpid)
454 {
455         struct rte_crypto_sym_op *sym_op = op->sym;
456         struct qbman_fle *fle, *sge;
457         int retval;
458         struct sec_flow_context *flc;
459         struct ctxt_priv *priv = sess->ctxt;
460         uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
461                         sess->iv.offset);
462
463         PMD_INIT_FUNC_TRACE();
464
465         retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
466         if (retval) {
467                 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
468                 return -1;
469         }
470         memset(fle, 0, FLE_POOL_BUF_SIZE);
471         /* TODO we are using the first FLE entry to store Mbuf.
472          * Currently we donot know which FLE has the mbuf stored.
473          * So while retreiving we can go back 1 FLE from the FD -ADDR
474          * to get the MBUF Addr from the previous FLE.
475          * We can have a better approach to use the inline Mbuf
476          */
477         DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
478         DPAA2_FLE_SAVE_CTXT(fle, priv);
479         fle = fle + 1;
480         sge = fle + 2;
481
482         if (likely(bpid < MAX_BPID)) {
483                 DPAA2_SET_FD_BPID(fd, bpid);
484                 DPAA2_SET_FLE_BPID(fle, bpid);
485                 DPAA2_SET_FLE_BPID(fle + 1, bpid);
486                 DPAA2_SET_FLE_BPID(sge, bpid);
487                 DPAA2_SET_FLE_BPID(sge + 1, bpid);
488         } else {
489                 DPAA2_SET_FD_IVP(fd);
490                 DPAA2_SET_FLE_IVP(fle);
491                 DPAA2_SET_FLE_IVP((fle + 1));
492                 DPAA2_SET_FLE_IVP(sge);
493                 DPAA2_SET_FLE_IVP((sge + 1));
494         }
495
496         flc = &priv->flc_desc[0].flc;
497         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
498         DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
499                          sess->iv.length);
500         DPAA2_SET_FD_COMPOUND_FMT(fd);
501         DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
502
503         PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
504                    sym_op->cipher.data.offset,
505                    sym_op->cipher.data.length,
506                    sess->iv.length,
507                    sym_op->m_src->data_off);
508
509         DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
510         DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
511                              sym_op->m_src->data_off);
512
513         fle->length = sym_op->cipher.data.length + sess->iv.length;
514
515         PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
516                    flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
517
518         fle++;
519
520         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
521         fle->length = sym_op->cipher.data.length + sess->iv.length;
522
523         DPAA2_SET_FLE_SG_EXT(fle);
524
525         DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
526         sge->length = sess->iv.length;
527
528         sge++;
529         DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
530         DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
531                              sym_op->m_src->data_off);
532
533         sge->length = sym_op->cipher.data.length;
534         DPAA2_SET_FLE_FIN(sge);
535         DPAA2_SET_FLE_FIN(fle);
536
537         PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
538                    (void *)DPAA2_GET_FD_ADDR(fd),
539                    DPAA2_GET_FD_BPID(fd),
540                    rte_dpaa2_bpid_info[bpid].meta_data_size,
541                    DPAA2_GET_FD_OFFSET(fd),
542                    DPAA2_GET_FD_LEN(fd));
543
544         return 0;
545 }
546
547 static inline int
548 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
549              struct qbman_fd *fd, uint16_t bpid)
550 {
551         int ret = -1;
552
553         PMD_INIT_FUNC_TRACE();
554
555         switch (sess->ctxt_type) {
556         case DPAA2_SEC_CIPHER:
557                 ret = build_cipher_fd(sess, op, fd, bpid);
558                 break;
559         case DPAA2_SEC_AUTH:
560                 ret = build_auth_fd(sess, op, fd, bpid);
561                 break;
562         case DPAA2_SEC_AEAD:
563                 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
564                 break;
565         case DPAA2_SEC_CIPHER_HASH:
566                 ret = build_authenc_fd(sess, op, fd, bpid);
567                 break;
568         case DPAA2_SEC_HASH_CIPHER:
569         default:
570                 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
571         }
572         return ret;
573 }
574
575 static uint16_t
576 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
577                         uint16_t nb_ops)
578 {
579         /* Function to transmit the frames to given device and VQ*/
580         uint32_t loop;
581         int32_t ret;
582         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
583         uint32_t frames_to_send;
584         struct qbman_eq_desc eqdesc;
585         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
586         struct qbman_swp *swp;
587         uint16_t num_tx = 0;
588         /*todo - need to support multiple buffer pools */
589         uint16_t bpid;
590         struct rte_mempool *mb_pool;
591         dpaa2_sec_session *sess;
592
593         if (unlikely(nb_ops == 0))
594                 return 0;
595
596         if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
597                 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
598                 return 0;
599         }
600         /*Prepare enqueue descriptor*/
601         qbman_eq_desc_clear(&eqdesc);
602         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
603         qbman_eq_desc_set_response(&eqdesc, 0, 0);
604         qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
605
606         if (!DPAA2_PER_LCORE_SEC_DPIO) {
607                 ret = dpaa2_affine_qbman_swp_sec();
608                 if (ret) {
609                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
610                         return 0;
611                 }
612         }
613         swp = DPAA2_PER_LCORE_SEC_PORTAL;
614
615         while (nb_ops) {
616                 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
617
618                 for (loop = 0; loop < frames_to_send; loop++) {
619                         /*Clear the unused FD fields before sending*/
620                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
621                         sess = (dpaa2_sec_session *)
622                                         get_session_private_data(
623                                         (*ops)->sym->session,
624                                         cryptodev_driver_id);
625                         mb_pool = (*ops)->sym->m_src->pool;
626                         bpid = mempool_to_bpid(mb_pool);
627                         ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
628                         if (ret) {
629                                 PMD_DRV_LOG(ERR, "error: Improper packet"
630                                             " contents for crypto operation\n");
631                                 goto skip_tx;
632                         }
633                         ops++;
634                 }
635                 loop = 0;
636                 while (loop < frames_to_send) {
637                         loop += qbman_swp_send_multiple(swp, &eqdesc,
638                                                         &fd_arr[loop],
639                                                         frames_to_send - loop);
640                 }
641
642                 num_tx += frames_to_send;
643                 nb_ops -= frames_to_send;
644         }
645 skip_tx:
646         dpaa2_qp->tx_vq.tx_pkts += num_tx;
647         dpaa2_qp->tx_vq.err_pkts += nb_ops;
648         return num_tx;
649 }
650
651 static inline struct rte_crypto_op *
652 sec_fd_to_mbuf(const struct qbman_fd *fd)
653 {
654         struct qbman_fle *fle;
655         struct rte_crypto_op *op;
656         struct ctxt_priv *priv;
657
658         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
659
660         PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
661                    fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
662
663         /* we are using the first FLE entry to store Mbuf.
664          * Currently we donot know which FLE has the mbuf stored.
665          * So while retreiving we can go back 1 FLE from the FD -ADDR
666          * to get the MBUF Addr from the previous FLE.
667          * We can have a better approach to use the inline Mbuf
668          */
669
670         if (unlikely(DPAA2_GET_FD_IVP(fd))) {
671                 /* TODO complete it. */
672                 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
673                 return NULL;
674         }
675         op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
676                         DPAA2_GET_FLE_ADDR((fle - 1)));
677
678         /* Prefeth op */
679         rte_prefetch0(op->sym->m_src);
680
681         PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
682                    (void *)op->sym->m_src, op->sym->m_src->buf_addr);
683
684         PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
685                    (void *)DPAA2_GET_FD_ADDR(fd),
686                    DPAA2_GET_FD_BPID(fd),
687                    rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
688                    DPAA2_GET_FD_OFFSET(fd),
689                    DPAA2_GET_FD_LEN(fd));
690
691         /* free the fle memory */
692         priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
693         rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
694
695         return op;
696 }
697
698 static uint16_t
699 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
700                         uint16_t nb_ops)
701 {
702         /* Function is responsible to receive frames for a given device and VQ*/
703         struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
704         struct qbman_result *dq_storage;
705         uint32_t fqid = dpaa2_qp->rx_vq.fqid;
706         int ret, num_rx = 0;
707         uint8_t is_last = 0, status;
708         struct qbman_swp *swp;
709         const struct qbman_fd *fd;
710         struct qbman_pull_desc pulldesc;
711
712         if (!DPAA2_PER_LCORE_SEC_DPIO) {
713                 ret = dpaa2_affine_qbman_swp_sec();
714                 if (ret) {
715                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
716                         return 0;
717                 }
718         }
719         swp = DPAA2_PER_LCORE_SEC_PORTAL;
720         dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
721
722         qbman_pull_desc_clear(&pulldesc);
723         qbman_pull_desc_set_numframes(&pulldesc,
724                                       (nb_ops > DPAA2_DQRR_RING_SIZE) ?
725                                       DPAA2_DQRR_RING_SIZE : nb_ops);
726         qbman_pull_desc_set_fq(&pulldesc, fqid);
727         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
728                                     (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
729                                     1);
730
731         /*Issue a volatile dequeue command. */
732         while (1) {
733                 if (qbman_swp_pull(swp, &pulldesc)) {
734                         RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
735                                 "QBMAN is busy\n");
736                         /* Portal was busy, try again */
737                         continue;
738                 }
739                 break;
740         };
741
742         /* Receive the packets till Last Dequeue entry is found with
743          * respect to the above issues PULL command.
744          */
745         while (!is_last) {
746                 /* Check if the previous issued command is completed.
747                  * Also seems like the SWP is shared between the Ethernet Driver
748                  * and the SEC driver.
749                  */
750                 while (!qbman_check_command_complete(swp, dq_storage))
751                         ;
752
753                 /* Loop until the dq_storage is updated with
754                  * new token by QBMAN
755                  */
756                 while (!qbman_result_has_new_result(swp, dq_storage))
757                         ;
758                 /* Check whether Last Pull command is Expired and
759                  * setting Condition for Loop termination
760                  */
761                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
762                         is_last = 1;
763                         /* Check for valid frame. */
764                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
765                         if (unlikely(
766                                 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
767                                 PMD_RX_LOG(DEBUG, "No frame is delivered");
768                                 continue;
769                         }
770                 }
771
772                 fd = qbman_result_DQ_fd(dq_storage);
773                 ops[num_rx] = sec_fd_to_mbuf(fd);
774
775                 if (unlikely(fd->simple.frc)) {
776                         /* TODO Parse SEC errors */
777                         RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
778                                 fd->simple.frc);
779                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
780                 } else {
781                         ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
782                 }
783
784                 num_rx++;
785                 dq_storage++;
786         } /* End of Packet Rx loop */
787
788         dpaa2_qp->rx_vq.rx_pkts += num_rx;
789
790         PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
791         /*Return the total number of packets received to DPAA2 app*/
792         return num_rx;
793 }
794
795 /** Release queue pair */
796 static int
797 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
798 {
799         struct dpaa2_sec_qp *qp =
800                 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
801
802         PMD_INIT_FUNC_TRACE();
803
804         if (qp->rx_vq.q_storage) {
805                 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
806                 rte_free(qp->rx_vq.q_storage);
807         }
808         rte_free(qp);
809
810         dev->data->queue_pairs[queue_pair_id] = NULL;
811
812         return 0;
813 }
814
815 /** Setup a queue pair */
816 static int
817 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
818                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
819                 __rte_unused int socket_id,
820                 __rte_unused struct rte_mempool *session_pool)
821 {
822         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
823         struct dpaa2_sec_qp *qp;
824         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
825         struct dpseci_rx_queue_cfg cfg;
826         int32_t retcode;
827
828         PMD_INIT_FUNC_TRACE();
829
830         /* If qp is already in use free ring memory and qp metadata. */
831         if (dev->data->queue_pairs[qp_id] != NULL) {
832                 PMD_DRV_LOG(INFO, "QP already setup");
833                 return 0;
834         }
835
836         PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
837                     dev, qp_id, qp_conf);
838
839         memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
840
841         qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
842                         RTE_CACHE_LINE_SIZE);
843         if (!qp) {
844                 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
845                 return -1;
846         }
847
848         qp->rx_vq.dev = dev;
849         qp->tx_vq.dev = dev;
850         qp->rx_vq.q_storage = rte_malloc("sec dq storage",
851                 sizeof(struct queue_storage_info_t),
852                 RTE_CACHE_LINE_SIZE);
853         if (!qp->rx_vq.q_storage) {
854                 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
855                 return -1;
856         }
857         memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
858
859         if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
860                 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
861                 return -1;
862         }
863
864         dev->data->queue_pairs[qp_id] = qp;
865
866         cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
867         cfg.user_ctx = (uint64_t)(&qp->rx_vq);
868         retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
869                                       qp_id, &cfg);
870         return retcode;
871 }
872
873 /** Start queue pair */
874 static int
875 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
876                            __rte_unused uint16_t queue_pair_id)
877 {
878         PMD_INIT_FUNC_TRACE();
879
880         return 0;
881 }
882
883 /** Stop queue pair */
884 static int
885 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
886                           __rte_unused uint16_t queue_pair_id)
887 {
888         PMD_INIT_FUNC_TRACE();
889
890         return 0;
891 }
892
893 /** Return the number of allocated queue pairs */
894 static uint32_t
895 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
896 {
897         PMD_INIT_FUNC_TRACE();
898
899         return dev->data->nb_queue_pairs;
900 }
901
902 /** Returns the size of the aesni gcm session structure */
903 static unsigned int
904 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
905 {
906         PMD_INIT_FUNC_TRACE();
907
908         return sizeof(dpaa2_sec_session);
909 }
910
911 static void
912 dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
913                              void *sess __rte_unused)
914 {
915         PMD_INIT_FUNC_TRACE();
916 }
917
918 static int
919 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
920                       struct rte_crypto_sym_xform *xform,
921                       dpaa2_sec_session *session)
922 {
923         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
924         struct alginfo cipherdata;
925         int bufsize, i;
926         struct ctxt_priv *priv;
927         struct sec_flow_context *flc;
928
929         PMD_INIT_FUNC_TRACE();
930
931         /* For SEC CIPHER only one descriptor is required. */
932         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
933                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
934                         RTE_CACHE_LINE_SIZE);
935         if (priv == NULL) {
936                 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
937                 return -1;
938         }
939
940         priv->fle_pool = dev_priv->fle_pool;
941
942         flc = &priv->flc_desc[0].flc;
943
944         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
945                         RTE_CACHE_LINE_SIZE);
946         if (session->cipher_key.data == NULL) {
947                 RTE_LOG(ERR, PMD, "No Memory for cipher key");
948                 rte_free(priv);
949                 return -1;
950         }
951         session->cipher_key.length = xform->cipher.key.length;
952
953         memcpy(session->cipher_key.data, xform->cipher.key.data,
954                xform->cipher.key.length);
955         cipherdata.key = (uint64_t)session->cipher_key.data;
956         cipherdata.keylen = session->cipher_key.length;
957         cipherdata.key_enc_flags = 0;
958         cipherdata.key_type = RTA_DATA_IMM;
959
960         /* Set IV parameters */
961         session->iv.offset = xform->cipher.iv.offset;
962         session->iv.length = xform->cipher.iv.length;
963
964         switch (xform->cipher.algo) {
965         case RTE_CRYPTO_CIPHER_AES_CBC:
966                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
967                 cipherdata.algmode = OP_ALG_AAI_CBC;
968                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
969                 break;
970         case RTE_CRYPTO_CIPHER_3DES_CBC:
971                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
972                 cipherdata.algmode = OP_ALG_AAI_CBC;
973                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
974                 break;
975         case RTE_CRYPTO_CIPHER_AES_CTR:
976                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
977                 cipherdata.algmode = OP_ALG_AAI_CTR;
978                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
979                 break;
980         case RTE_CRYPTO_CIPHER_3DES_CTR:
981         case RTE_CRYPTO_CIPHER_AES_ECB:
982         case RTE_CRYPTO_CIPHER_3DES_ECB:
983         case RTE_CRYPTO_CIPHER_AES_XTS:
984         case RTE_CRYPTO_CIPHER_AES_F8:
985         case RTE_CRYPTO_CIPHER_ARC4:
986         case RTE_CRYPTO_CIPHER_KASUMI_F8:
987         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
988         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
989         case RTE_CRYPTO_CIPHER_NULL:
990                 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
991                         xform->cipher.algo);
992                 goto error_out;
993         default:
994                 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
995                         xform->cipher.algo);
996                 goto error_out;
997         }
998         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
999                                 DIR_ENC : DIR_DEC;
1000
1001         bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1002                                         &cipherdata, NULL, session->iv.length,
1003                                         session->dir);
1004         if (bufsize < 0) {
1005                 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1006                 goto error_out;
1007         }
1008         flc->dhr = 0;
1009         flc->bpv0 = 0x1;
1010         flc->mode_bits = 0x8000;
1011
1012         flc->word1_sdl = (uint8_t)bufsize;
1013         flc->word2_rflc_31_0 = lower_32_bits(
1014                         (uint64_t)&(((struct dpaa2_sec_qp *)
1015                         dev->data->queue_pairs[0])->rx_vq));
1016         flc->word3_rflc_63_32 = upper_32_bits(
1017                         (uint64_t)&(((struct dpaa2_sec_qp *)
1018                         dev->data->queue_pairs[0])->rx_vq));
1019         session->ctxt = priv;
1020
1021         for (i = 0; i < bufsize; i++)
1022                 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1023                             i, priv->flc_desc[0].desc[i]);
1024
1025         return 0;
1026
1027 error_out:
1028         rte_free(session->cipher_key.data);
1029         rte_free(priv);
1030         return -1;
1031 }
1032
1033 static int
1034 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1035                     struct rte_crypto_sym_xform *xform,
1036                     dpaa2_sec_session *session)
1037 {
1038         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1039         struct alginfo authdata;
1040         unsigned int bufsize, i;
1041         struct ctxt_priv *priv;
1042         struct sec_flow_context *flc;
1043
1044         PMD_INIT_FUNC_TRACE();
1045
1046         /* For SEC AUTH three descriptors are required for various stages */
1047         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1048                         sizeof(struct ctxt_priv) + 3 *
1049                         sizeof(struct sec_flc_desc),
1050                         RTE_CACHE_LINE_SIZE);
1051         if (priv == NULL) {
1052                 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1053                 return -1;
1054         }
1055
1056         priv->fle_pool = dev_priv->fle_pool;
1057         flc = &priv->flc_desc[DESC_INITFINAL].flc;
1058
1059         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1060                         RTE_CACHE_LINE_SIZE);
1061         if (session->auth_key.data == NULL) {
1062                 RTE_LOG(ERR, PMD, "No Memory for auth key");
1063                 rte_free(priv);
1064                 return -1;
1065         }
1066         session->auth_key.length = xform->auth.key.length;
1067
1068         memcpy(session->auth_key.data, xform->auth.key.data,
1069                xform->auth.key.length);
1070         authdata.key = (uint64_t)session->auth_key.data;
1071         authdata.keylen = session->auth_key.length;
1072         authdata.key_enc_flags = 0;
1073         authdata.key_type = RTA_DATA_IMM;
1074
1075         session->digest_length = xform->auth.digest_length;
1076
1077         switch (xform->auth.algo) {
1078         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1079                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1080                 authdata.algmode = OP_ALG_AAI_HMAC;
1081                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1082                 break;
1083         case RTE_CRYPTO_AUTH_MD5_HMAC:
1084                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1085                 authdata.algmode = OP_ALG_AAI_HMAC;
1086                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1087                 break;
1088         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1089                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1090                 authdata.algmode = OP_ALG_AAI_HMAC;
1091                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1092                 break;
1093         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1094                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1095                 authdata.algmode = OP_ALG_AAI_HMAC;
1096                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1097                 break;
1098         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1099                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1100                 authdata.algmode = OP_ALG_AAI_HMAC;
1101                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1102                 break;
1103         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1104                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1105                 authdata.algmode = OP_ALG_AAI_HMAC;
1106                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1107                 break;
1108         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1109         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1110         case RTE_CRYPTO_AUTH_NULL:
1111         case RTE_CRYPTO_AUTH_SHA1:
1112         case RTE_CRYPTO_AUTH_SHA256:
1113         case RTE_CRYPTO_AUTH_SHA512:
1114         case RTE_CRYPTO_AUTH_SHA224:
1115         case RTE_CRYPTO_AUTH_SHA384:
1116         case RTE_CRYPTO_AUTH_MD5:
1117         case RTE_CRYPTO_AUTH_AES_GMAC:
1118         case RTE_CRYPTO_AUTH_KASUMI_F9:
1119         case RTE_CRYPTO_AUTH_AES_CMAC:
1120         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1121         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1122                 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1123                         xform->auth.algo);
1124                 goto error_out;
1125         default:
1126                 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1127                         xform->auth.algo);
1128                 goto error_out;
1129         }
1130         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1131                                 DIR_ENC : DIR_DEC;
1132
1133         bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1134                                    1, 0, &authdata, !session->dir,
1135                                    session->digest_length);
1136
1137         flc->word1_sdl = (uint8_t)bufsize;
1138         flc->word2_rflc_31_0 = lower_32_bits(
1139                         (uint64_t)&(((struct dpaa2_sec_qp *)
1140                         dev->data->queue_pairs[0])->rx_vq));
1141         flc->word3_rflc_63_32 = upper_32_bits(
1142                         (uint64_t)&(((struct dpaa2_sec_qp *)
1143                         dev->data->queue_pairs[0])->rx_vq));
1144         session->ctxt = priv;
1145         for (i = 0; i < bufsize; i++)
1146                 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1147                             i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1148
1149
1150         return 0;
1151
1152 error_out:
1153         rte_free(session->auth_key.data);
1154         rte_free(priv);
1155         return -1;
1156 }
1157
1158 static int
1159 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1160                     struct rte_crypto_sym_xform *xform,
1161                     dpaa2_sec_session *session)
1162 {
1163         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1164         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1165         struct alginfo aeaddata;
1166         unsigned int bufsize, i;
1167         struct ctxt_priv *priv;
1168         struct sec_flow_context *flc;
1169         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1170         int err;
1171
1172         PMD_INIT_FUNC_TRACE();
1173
1174         /* Set IV parameters */
1175         session->iv.offset = aead_xform->iv.offset;
1176         session->iv.length = aead_xform->iv.length;
1177         session->ctxt_type = DPAA2_SEC_AEAD;
1178
1179         /* For SEC AEAD only one descriptor is required */
1180         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1181                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1182                         RTE_CACHE_LINE_SIZE);
1183         if (priv == NULL) {
1184                 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1185                 return -1;
1186         }
1187
1188         priv->fle_pool = dev_priv->fle_pool;
1189         flc = &priv->flc_desc[0].flc;
1190
1191         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1192                                                RTE_CACHE_LINE_SIZE);
1193         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1194                 RTE_LOG(ERR, PMD, "No Memory for aead key");
1195                 rte_free(priv);
1196                 return -1;
1197         }
1198         memcpy(session->aead_key.data, aead_xform->key.data,
1199                aead_xform->key.length);
1200
1201         session->digest_length = aead_xform->digest_length;
1202         session->aead_key.length = aead_xform->key.length;
1203         ctxt->auth_only_len = aead_xform->add_auth_data_length;
1204
1205         aeaddata.key = (uint64_t)session->aead_key.data;
1206         aeaddata.keylen = session->aead_key.length;
1207         aeaddata.key_enc_flags = 0;
1208         aeaddata.key_type = RTA_DATA_IMM;
1209
1210         switch (aead_xform->algo) {
1211         case RTE_CRYPTO_AEAD_AES_GCM:
1212                 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1213                 aeaddata.algmode = OP_ALG_AAI_GCM;
1214                 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
1215                 break;
1216         case RTE_CRYPTO_AEAD_AES_CCM:
1217                 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
1218                         aead_xform->algo);
1219                 goto error_out;
1220         default:
1221                 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1222                         aead_xform->algo);
1223                 goto error_out;
1224         }
1225         session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1226                                 DIR_ENC : DIR_DEC;
1227
1228         priv->flc_desc[0].desc[0] = aeaddata.keylen;
1229         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1230                                MIN_JOB_DESC_SIZE,
1231                                (unsigned int *)priv->flc_desc[0].desc,
1232                                &priv->flc_desc[0].desc[1], 1);
1233
1234         if (err < 0) {
1235                 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1236                 goto error_out;
1237         }
1238         if (priv->flc_desc[0].desc[1] & 1) {
1239                 aeaddata.key_type = RTA_DATA_IMM;
1240         } else {
1241                 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1242                 aeaddata.key_type = RTA_DATA_PTR;
1243         }
1244         priv->flc_desc[0].desc[0] = 0;
1245         priv->flc_desc[0].desc[1] = 0;
1246
1247         if (session->dir == DIR_ENC)
1248                 bufsize = cnstr_shdsc_gcm_encap(
1249                                 priv->flc_desc[0].desc, 1, 0,
1250                                 &aeaddata, session->iv.length,
1251                                 session->digest_length);
1252         else
1253                 bufsize = cnstr_shdsc_gcm_decap(
1254                                 priv->flc_desc[0].desc, 1, 0,
1255                                 &aeaddata, session->iv.length,
1256                                 session->digest_length);
1257         flc->word1_sdl = (uint8_t)bufsize;
1258         flc->word2_rflc_31_0 = lower_32_bits(
1259                         (uint64_t)&(((struct dpaa2_sec_qp *)
1260                         dev->data->queue_pairs[0])->rx_vq));
1261         flc->word3_rflc_63_32 = upper_32_bits(
1262                         (uint64_t)&(((struct dpaa2_sec_qp *)
1263                         dev->data->queue_pairs[0])->rx_vq));
1264         session->ctxt = priv;
1265         for (i = 0; i < bufsize; i++)
1266                 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1267                             i, priv->flc_desc[0].desc[i]);
1268
1269         return 0;
1270
1271 error_out:
1272         rte_free(session->aead_key.data);
1273         rte_free(priv);
1274         return -1;
1275 }
1276
1277
1278 static int
1279 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1280                     struct rte_crypto_sym_xform *xform,
1281                     dpaa2_sec_session *session)
1282 {
1283         struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1284         struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1285         struct alginfo authdata, cipherdata;
1286         unsigned int bufsize, i;
1287         struct ctxt_priv *priv;
1288         struct sec_flow_context *flc;
1289         struct rte_crypto_cipher_xform *cipher_xform;
1290         struct rte_crypto_auth_xform *auth_xform;
1291         int err;
1292
1293         PMD_INIT_FUNC_TRACE();
1294
1295         if (session->ext_params.aead_ctxt.auth_cipher_text) {
1296                 cipher_xform = &xform->cipher;
1297                 auth_xform = &xform->next->auth;
1298                 session->ctxt_type =
1299                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1300                         DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1301         } else {
1302                 cipher_xform = &xform->next->cipher;
1303                 auth_xform = &xform->auth;
1304                 session->ctxt_type =
1305                         (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1306                         DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1307         }
1308
1309         /* Set IV parameters */
1310         session->iv.offset = cipher_xform->iv.offset;
1311         session->iv.length = cipher_xform->iv.length;
1312
1313         /* For SEC AEAD only one descriptor is required */
1314         priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1315                         sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1316                         RTE_CACHE_LINE_SIZE);
1317         if (priv == NULL) {
1318                 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1319                 return -1;
1320         }
1321
1322         priv->fle_pool = dev_priv->fle_pool;
1323         flc = &priv->flc_desc[0].flc;
1324
1325         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1326                                                RTE_CACHE_LINE_SIZE);
1327         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1328                 RTE_LOG(ERR, PMD, "No Memory for cipher key");
1329                 rte_free(priv);
1330                 return -1;
1331         }
1332         session->cipher_key.length = cipher_xform->key.length;
1333         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1334                                              RTE_CACHE_LINE_SIZE);
1335         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1336                 RTE_LOG(ERR, PMD, "No Memory for auth key");
1337                 rte_free(session->cipher_key.data);
1338                 rte_free(priv);
1339                 return -1;
1340         }
1341         session->auth_key.length = auth_xform->key.length;
1342         memcpy(session->cipher_key.data, cipher_xform->key.data,
1343                cipher_xform->key.length);
1344         memcpy(session->auth_key.data, auth_xform->key.data,
1345                auth_xform->key.length);
1346
1347         authdata.key = (uint64_t)session->auth_key.data;
1348         authdata.keylen = session->auth_key.length;
1349         authdata.key_enc_flags = 0;
1350         authdata.key_type = RTA_DATA_IMM;
1351
1352         session->digest_length = auth_xform->digest_length;
1353
1354         switch (auth_xform->algo) {
1355         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1356                 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1357                 authdata.algmode = OP_ALG_AAI_HMAC;
1358                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1359                 break;
1360         case RTE_CRYPTO_AUTH_MD5_HMAC:
1361                 authdata.algtype = OP_ALG_ALGSEL_MD5;
1362                 authdata.algmode = OP_ALG_AAI_HMAC;
1363                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1364                 break;
1365         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1366                 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1367                 authdata.algmode = OP_ALG_AAI_HMAC;
1368                 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1369                 break;
1370         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1371                 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1372                 authdata.algmode = OP_ALG_AAI_HMAC;
1373                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1374                 break;
1375         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1376                 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1377                 authdata.algmode = OP_ALG_AAI_HMAC;
1378                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1379                 break;
1380         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1381                 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1382                 authdata.algmode = OP_ALG_AAI_HMAC;
1383                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1384                 break;
1385         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1386         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1387         case RTE_CRYPTO_AUTH_NULL:
1388         case RTE_CRYPTO_AUTH_SHA1:
1389         case RTE_CRYPTO_AUTH_SHA256:
1390         case RTE_CRYPTO_AUTH_SHA512:
1391         case RTE_CRYPTO_AUTH_SHA224:
1392         case RTE_CRYPTO_AUTH_SHA384:
1393         case RTE_CRYPTO_AUTH_MD5:
1394         case RTE_CRYPTO_AUTH_AES_GMAC:
1395         case RTE_CRYPTO_AUTH_KASUMI_F9:
1396         case RTE_CRYPTO_AUTH_AES_CMAC:
1397         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1398         case RTE_CRYPTO_AUTH_ZUC_EIA3:
1399                 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1400                         auth_xform->algo);
1401                 goto error_out;
1402         default:
1403                 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1404                         auth_xform->algo);
1405                 goto error_out;
1406         }
1407         cipherdata.key = (uint64_t)session->cipher_key.data;
1408         cipherdata.keylen = session->cipher_key.length;
1409         cipherdata.key_enc_flags = 0;
1410         cipherdata.key_type = RTA_DATA_IMM;
1411
1412         switch (cipher_xform->algo) {
1413         case RTE_CRYPTO_CIPHER_AES_CBC:
1414                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1415                 cipherdata.algmode = OP_ALG_AAI_CBC;
1416                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1417                 break;
1418         case RTE_CRYPTO_CIPHER_3DES_CBC:
1419                 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1420                 cipherdata.algmode = OP_ALG_AAI_CBC;
1421                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1422                 break;
1423         case RTE_CRYPTO_CIPHER_AES_CTR:
1424                 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1425                 cipherdata.algmode = OP_ALG_AAI_CTR;
1426                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1427                 break;
1428         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1429         case RTE_CRYPTO_CIPHER_NULL:
1430         case RTE_CRYPTO_CIPHER_3DES_ECB:
1431         case RTE_CRYPTO_CIPHER_AES_ECB:
1432         case RTE_CRYPTO_CIPHER_KASUMI_F8:
1433                 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
1434                         cipher_xform->algo);
1435                 goto error_out;
1436         default:
1437                 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1438                         cipher_xform->algo);
1439                 goto error_out;
1440         }
1441         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1442                                 DIR_ENC : DIR_DEC;
1443
1444         priv->flc_desc[0].desc[0] = cipherdata.keylen;
1445         priv->flc_desc[0].desc[1] = authdata.keylen;
1446         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1447                                MIN_JOB_DESC_SIZE,
1448                                (unsigned int *)priv->flc_desc[0].desc,
1449                                &priv->flc_desc[0].desc[2], 2);
1450
1451         if (err < 0) {
1452                 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1453                 goto error_out;
1454         }
1455         if (priv->flc_desc[0].desc[2] & 1) {
1456                 cipherdata.key_type = RTA_DATA_IMM;
1457         } else {
1458                 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1459                 cipherdata.key_type = RTA_DATA_PTR;
1460         }
1461         if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1462                 authdata.key_type = RTA_DATA_IMM;
1463         } else {
1464                 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1465                 authdata.key_type = RTA_DATA_PTR;
1466         }
1467         priv->flc_desc[0].desc[0] = 0;
1468         priv->flc_desc[0].desc[1] = 0;
1469         priv->flc_desc[0].desc[2] = 0;
1470
1471         if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1472                 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1473                                               0, &cipherdata, &authdata,
1474                                               session->iv.length,
1475                                               ctxt->auth_only_len,
1476                                               session->digest_length,
1477                                               session->dir);
1478         } else {
1479                 RTE_LOG(ERR, PMD, "Hash before cipher not supported");
1480                 goto error_out;
1481         }
1482
1483         flc->word1_sdl = (uint8_t)bufsize;
1484         flc->word2_rflc_31_0 = lower_32_bits(
1485                         (uint64_t)&(((struct dpaa2_sec_qp *)
1486                         dev->data->queue_pairs[0])->rx_vq));
1487         flc->word3_rflc_63_32 = upper_32_bits(
1488                         (uint64_t)&(((struct dpaa2_sec_qp *)
1489                         dev->data->queue_pairs[0])->rx_vq));
1490         session->ctxt = priv;
1491         for (i = 0; i < bufsize; i++)
1492                 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1493                             i, priv->flc_desc[0].desc[i]);
1494
1495         return 0;
1496
1497 error_out:
1498         rte_free(session->cipher_key.data);
1499         rte_free(session->auth_key.data);
1500         rte_free(priv);
1501         return -1;
1502 }
1503
1504 static int
1505 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
1506                             struct rte_crypto_sym_xform *xform, void *sess)
1507 {
1508         dpaa2_sec_session *session = sess;
1509
1510         PMD_INIT_FUNC_TRACE();
1511
1512         if (unlikely(sess == NULL)) {
1513                 RTE_LOG(ERR, PMD, "invalid session struct");
1514                 return -1;
1515         }
1516
1517         /* Default IV length = 0 */
1518         session->iv.length = 0;
1519
1520         /* Cipher Only */
1521         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1522                 session->ctxt_type = DPAA2_SEC_CIPHER;
1523                 dpaa2_sec_cipher_init(dev, xform, session);
1524
1525         /* Authentication Only */
1526         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1527                    xform->next == NULL) {
1528                 session->ctxt_type = DPAA2_SEC_AUTH;
1529                 dpaa2_sec_auth_init(dev, xform, session);
1530
1531         /* Cipher then Authenticate */
1532         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1533                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1534                 session->ext_params.aead_ctxt.auth_cipher_text = true;
1535                 dpaa2_sec_aead_chain_init(dev, xform, session);
1536
1537         /* Authenticate then Cipher */
1538         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1539                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1540                 session->ext_params.aead_ctxt.auth_cipher_text = false;
1541                 dpaa2_sec_aead_chain_init(dev, xform, session);
1542
1543         /* AEAD operation for AES-GCM kind of Algorithms */
1544         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1545                    xform->next == NULL) {
1546                 dpaa2_sec_aead_init(dev, xform, session);
1547
1548         } else {
1549                 RTE_LOG(ERR, PMD, "Invalid crypto type");
1550                 return -1;
1551         }
1552
1553         return 0;
1554 }
1555
1556 static int
1557 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1558                 struct rte_crypto_sym_xform *xform,
1559                 struct rte_cryptodev_sym_session *sess,
1560                 struct rte_mempool *mempool)
1561 {
1562         void *sess_private_data;
1563
1564         if (rte_mempool_get(mempool, &sess_private_data)) {
1565                 CDEV_LOG_ERR(
1566                         "Couldn't get object from session mempool");
1567                 return -1;
1568         }
1569
1570         if (dpaa2_sec_set_session_parameters(dev, xform, sess_private_data) != 0) {
1571                 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
1572                                 "session parameters");
1573
1574                 /* Return session to mempool */
1575                 rte_mempool_put(mempool, sess_private_data);
1576                 return -1;
1577         }
1578
1579         set_session_private_data(sess, dev->driver_id,
1580                 sess_private_data);
1581
1582         return 0;
1583 }
1584
1585 /** Clear the memory of session so it doesn't leave key material behind */
1586 static void
1587 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
1588                 struct rte_cryptodev_sym_session *sess)
1589 {
1590         PMD_INIT_FUNC_TRACE();
1591         uint8_t index = dev->driver_id;
1592         void *sess_priv = get_session_private_data(sess, index);
1593         dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
1594
1595         if (sess_priv) {
1596                 rte_free(s->ctxt);
1597                 rte_free(s->cipher_key.data);
1598                 rte_free(s->auth_key.data);
1599                 memset(sess, 0, sizeof(dpaa2_sec_session));
1600                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1601                 set_session_private_data(sess, index, NULL);
1602                 rte_mempool_put(sess_mp, sess_priv);
1603         }
1604 }
1605
1606 static int
1607 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1608                         struct rte_cryptodev_config *config __rte_unused)
1609 {
1610         PMD_INIT_FUNC_TRACE();
1611
1612         return 0;
1613 }
1614
1615 static int
1616 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1617 {
1618         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1619         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1620         struct dpseci_attr attr;
1621         struct dpaa2_queue *dpaa2_q;
1622         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1623                                         dev->data->queue_pairs;
1624         struct dpseci_rx_queue_attr rx_attr;
1625         struct dpseci_tx_queue_attr tx_attr;
1626         int ret, i;
1627
1628         PMD_INIT_FUNC_TRACE();
1629
1630         memset(&attr, 0, sizeof(struct dpseci_attr));
1631
1632         ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1633         if (ret) {
1634                 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1635                              priv->hw_id);
1636                 goto get_attr_failure;
1637         }
1638         ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1639         if (ret) {
1640                 PMD_INIT_LOG(ERR,
1641                              "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1642                 goto get_attr_failure;
1643         }
1644         for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1645                 dpaa2_q = &qp[i]->rx_vq;
1646                 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1647                                     &rx_attr);
1648                 dpaa2_q->fqid = rx_attr.fqid;
1649                 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1650         }
1651         for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1652                 dpaa2_q = &qp[i]->tx_vq;
1653                 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1654                                     &tx_attr);
1655                 dpaa2_q->fqid = tx_attr.fqid;
1656                 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1657         }
1658
1659         return 0;
1660 get_attr_failure:
1661         dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1662         return -1;
1663 }
1664
1665 static void
1666 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1667 {
1668         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1669         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1670         int ret;
1671
1672         PMD_INIT_FUNC_TRACE();
1673
1674         ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1675         if (ret) {
1676                 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1677                              priv->hw_id);
1678                 return;
1679         }
1680
1681         ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1682         if (ret < 0) {
1683                 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1684                              ret);
1685                 return;
1686         }
1687 }
1688
1689 static int
1690 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1691 {
1692         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1693         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1694         int ret;
1695
1696         PMD_INIT_FUNC_TRACE();
1697
1698         /* Function is reverse of dpaa2_sec_dev_init.
1699          * It does the following:
1700          * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1701          * 2. Close the DPSECI device
1702          * 3. Free the allocated resources.
1703          */
1704
1705         /*Close the device at underlying layer*/
1706         ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1707         if (ret) {
1708                 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1709                              " error code %d\n", ret);
1710                 return -1;
1711         }
1712
1713         /*Free the allocated memory for ethernet private data and dpseci*/
1714         priv->hw = NULL;
1715         free(dpseci);
1716
1717         return 0;
1718 }
1719
1720 static void
1721 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1722                         struct rte_cryptodev_info *info)
1723 {
1724         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1725
1726         PMD_INIT_FUNC_TRACE();
1727         if (info != NULL) {
1728                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1729                 info->feature_flags = dev->feature_flags;
1730                 info->capabilities = dpaa2_sec_capabilities;
1731                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1732                 info->driver_id = cryptodev_driver_id;
1733         }
1734 }
1735
1736 static
1737 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1738                          struct rte_cryptodev_stats *stats)
1739 {
1740         struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1741         struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1742         struct dpseci_sec_counters counters = {0};
1743         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1744                                         dev->data->queue_pairs;
1745         int ret, i;
1746
1747         PMD_INIT_FUNC_TRACE();
1748         if (stats == NULL) {
1749                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1750                 return;
1751         }
1752         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1753                 if (qp[i] == NULL) {
1754                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1755                         continue;
1756                 }
1757
1758                 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1759                 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1760                 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1761                 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1762         }
1763
1764         ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1765                                       &counters);
1766         if (ret) {
1767                 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1768         } else {
1769                 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1770                             "\n\tNumber of Requests Dequeued = %lu"
1771                             "\n\tNumber of Outbound Encrypt Requests = %lu"
1772                             "\n\tNumber of Inbound Decrypt Requests = %lu"
1773                             "\n\tNumber of Outbound Bytes Encrypted = %lu"
1774                             "\n\tNumber of Outbound Bytes Protected = %lu"
1775                             "\n\tNumber of Inbound Bytes Decrypted = %lu"
1776                             "\n\tNumber of Inbound Bytes Validated = %lu",
1777                             counters.dequeued_requests,
1778                             counters.ob_enc_requests,
1779                             counters.ib_dec_requests,
1780                             counters.ob_enc_bytes,
1781                             counters.ob_prot_bytes,
1782                             counters.ib_dec_bytes,
1783                             counters.ib_valid_bytes);
1784         }
1785 }
1786
1787 static
1788 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1789 {
1790         int i;
1791         struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1792                                    (dev->data->queue_pairs);
1793
1794         PMD_INIT_FUNC_TRACE();
1795
1796         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1797                 if (qp[i] == NULL) {
1798                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1799                         continue;
1800                 }
1801                 qp[i]->tx_vq.rx_pkts = 0;
1802                 qp[i]->tx_vq.tx_pkts = 0;
1803                 qp[i]->tx_vq.err_pkts = 0;
1804                 qp[i]->rx_vq.rx_pkts = 0;
1805                 qp[i]->rx_vq.tx_pkts = 0;
1806                 qp[i]->rx_vq.err_pkts = 0;
1807         }
1808 }
1809
1810 static struct rte_cryptodev_ops crypto_ops = {
1811         .dev_configure        = dpaa2_sec_dev_configure,
1812         .dev_start            = dpaa2_sec_dev_start,
1813         .dev_stop             = dpaa2_sec_dev_stop,
1814         .dev_close            = dpaa2_sec_dev_close,
1815         .dev_infos_get        = dpaa2_sec_dev_infos_get,
1816         .stats_get            = dpaa2_sec_stats_get,
1817         .stats_reset          = dpaa2_sec_stats_reset,
1818         .queue_pair_setup     = dpaa2_sec_queue_pair_setup,
1819         .queue_pair_release   = dpaa2_sec_queue_pair_release,
1820         .queue_pair_start     = dpaa2_sec_queue_pair_start,
1821         .queue_pair_stop      = dpaa2_sec_queue_pair_stop,
1822         .queue_pair_count     = dpaa2_sec_queue_pair_count,
1823         .session_get_size     = dpaa2_sec_session_get_size,
1824         .session_initialize   = dpaa2_sec_session_initialize,
1825         .session_configure    = dpaa2_sec_session_configure,
1826         .session_clear        = dpaa2_sec_session_clear,
1827 };
1828
1829 static int
1830 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1831 {
1832         struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1833
1834         rte_mempool_free(internals->fle_pool);
1835
1836         PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1837                      dev->data->name, rte_socket_id());
1838
1839         return 0;
1840 }
1841
1842 static int
1843 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1844 {
1845         struct dpaa2_sec_dev_private *internals;
1846         struct rte_device *dev = cryptodev->device;
1847         struct rte_dpaa2_device *dpaa2_dev;
1848         struct fsl_mc_io *dpseci;
1849         uint16_t token;
1850         struct dpseci_attr attr;
1851         int retcode, hw_id;
1852         char str[20];
1853
1854         PMD_INIT_FUNC_TRACE();
1855         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1856         if (dpaa2_dev == NULL) {
1857                 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1858                 return -1;
1859         }
1860         hw_id = dpaa2_dev->object_id;
1861
1862         cryptodev->driver_id = cryptodev_driver_id;
1863         cryptodev->dev_ops = &crypto_ops;
1864
1865         cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1866         cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1867         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1868                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
1869                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1870
1871         internals = cryptodev->data->dev_private;
1872         internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1873
1874         /*
1875          * For secondary processes, we don't initialise any further as primary
1876          * has already done this work. Only check we don't need a different
1877          * RX function
1878          */
1879         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1880                 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1881                 return 0;
1882         }
1883         /*Open the rte device via MC and save the handle for further use*/
1884         dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1885                                 sizeof(struct fsl_mc_io), 0);
1886         if (!dpseci) {
1887                 PMD_INIT_LOG(ERR,
1888                              "Error in allocating the memory for dpsec object");
1889                 return -1;
1890         }
1891         dpseci->regs = rte_mcp_ptr_list[0];
1892
1893         retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1894         if (retcode != 0) {
1895                 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1896                              retcode);
1897                 goto init_error;
1898         }
1899         retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1900         if (retcode != 0) {
1901                 PMD_INIT_LOG(ERR,
1902                              "Cannot get dpsec device attributed: Error = %x",
1903                              retcode);
1904                 goto init_error;
1905         }
1906         sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1907
1908         internals->max_nb_queue_pairs = attr.num_tx_queues;
1909         cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1910         internals->hw = dpseci;
1911         internals->token = token;
1912
1913         sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
1914         internals->fle_pool = rte_mempool_create((const char *)str,
1915                         FLE_POOL_NUM_BUFS,
1916                         FLE_POOL_BUF_SIZE,
1917                         FLE_POOL_CACHE_SIZE, 0,
1918                         NULL, NULL, NULL, NULL,
1919                         SOCKET_ID_ANY, 0);
1920         if (!internals->fle_pool) {
1921                 RTE_LOG(ERR, PMD, "%s create failed", str);
1922                 goto init_error;
1923         } else
1924                 RTE_LOG(INFO, PMD, "%s created: %p\n", str,
1925                                 internals->fle_pool);
1926
1927         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1928         return 0;
1929
1930 init_error:
1931         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1932
1933         /* dpaa2_sec_uninit(crypto_dev_name); */
1934         return -EFAULT;
1935 }
1936
1937 static int
1938 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
1939                           struct rte_dpaa2_device *dpaa2_dev)
1940 {
1941         struct rte_cryptodev *cryptodev;
1942         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1943
1944         int retval;
1945
1946         sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1947
1948         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1949         if (cryptodev == NULL)
1950                 return -ENOMEM;
1951
1952         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1953                 cryptodev->data->dev_private = rte_zmalloc_socket(
1954                                         "cryptodev private structure",
1955                                         sizeof(struct dpaa2_sec_dev_private),
1956                                         RTE_CACHE_LINE_SIZE,
1957                                         rte_socket_id());
1958
1959                 if (cryptodev->data->dev_private == NULL)
1960                         rte_panic("Cannot allocate memzone for private "
1961                                         "device data");
1962         }
1963
1964         dpaa2_dev->cryptodev = cryptodev;
1965         cryptodev->device = &dpaa2_dev->device;
1966         cryptodev->device->driver = &dpaa2_drv->driver;
1967
1968         /* init user callbacks */
1969         TAILQ_INIT(&(cryptodev->link_intr_cbs));
1970
1971         /* Invoke PMD device initialization function */
1972         retval = dpaa2_sec_dev_init(cryptodev);
1973         if (retval == 0)
1974                 return 0;
1975
1976         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1977                 rte_free(cryptodev->data->dev_private);
1978
1979         cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1980
1981         return -ENXIO;
1982 }
1983
1984 static int
1985 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1986 {
1987         struct rte_cryptodev *cryptodev;
1988         int ret;
1989
1990         cryptodev = dpaa2_dev->cryptodev;
1991         if (cryptodev == NULL)
1992                 return -ENODEV;
1993
1994         ret = dpaa2_sec_uninit(cryptodev);
1995         if (ret)
1996                 return ret;
1997
1998         /* free crypto device */
1999         rte_cryptodev_pmd_release_device(cryptodev);
2000
2001         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2002                 rte_free(cryptodev->data->dev_private);
2003
2004         cryptodev->device = NULL;
2005         cryptodev->data = NULL;
2006
2007         return 0;
2008 }
2009
2010 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2011         .drv_type = DPAA2_MC_DPSECI_DEVID,
2012         .driver = {
2013                 .name = "DPAA2 SEC PMD"
2014         },
2015         .probe = cryptodev_dpaa2_sec_probe,
2016         .remove = cryptodev_dpaa2_sec_remove,
2017 };
2018
2019 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2020 RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);