4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63 * a pointer to the shared descriptor
65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID 0x1957
67 #define FSL_DEVICE_ID 0x410
68 #define FSL_SUBSYSTEM_SEC 1
69 #define FSL_MC_DPSECI_DEVID 3
72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
73 #define FLE_POOL_NUM_BUFS 32000
74 #define FLE_POOL_BUF_SIZE 256
75 #define FLE_POOL_CACHE_SIZE 512
77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
80 build_authenc_gcm_fd(dpaa2_sec_session *sess,
81 struct rte_crypto_op *op,
82 struct qbman_fd *fd, uint16_t bpid)
84 struct rte_crypto_sym_op *sym_op = op->sym;
85 struct ctxt_priv *priv = sess->ctxt;
86 struct qbman_fle *fle, *sge;
87 struct sec_flow_context *flc;
88 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
89 int icv_len = sess->digest_length, retval;
91 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
94 PMD_INIT_FUNC_TRACE();
96 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
97 * Currently we donot know which FLE has the mbuf stored.
98 * So while retreiving we can go back 1 FLE from the FD -ADDR
99 * to get the MBUF Addr from the previous FLE.
100 * We can have a better approach to use the inline Mbuf
102 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
104 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
107 memset(fle, 0, FLE_POOL_BUF_SIZE);
108 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
109 DPAA2_FLE_SAVE_CTXT(fle, priv);
112 if (likely(bpid < MAX_BPID)) {
113 DPAA2_SET_FD_BPID(fd, bpid);
114 DPAA2_SET_FLE_BPID(fle, bpid);
115 DPAA2_SET_FLE_BPID(fle + 1, bpid);
116 DPAA2_SET_FLE_BPID(sge, bpid);
117 DPAA2_SET_FLE_BPID(sge + 1, bpid);
118 DPAA2_SET_FLE_BPID(sge + 2, bpid);
119 DPAA2_SET_FLE_BPID(sge + 3, bpid);
121 DPAA2_SET_FD_IVP(fd);
122 DPAA2_SET_FLE_IVP(fle);
123 DPAA2_SET_FLE_IVP((fle + 1));
124 DPAA2_SET_FLE_IVP(sge);
125 DPAA2_SET_FLE_IVP((sge + 1));
126 DPAA2_SET_FLE_IVP((sge + 2));
127 DPAA2_SET_FLE_IVP((sge + 3));
130 /* Save the shared descriptor */
131 flc = &priv->flc_desc[0].flc;
132 /* Configure FD as a FRAME LIST */
133 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
134 DPAA2_SET_FD_COMPOUND_FMT(fd);
135 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
137 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
138 "iv-len=%d data_off: 0x%x\n",
139 sym_op->aead.data.offset,
140 sym_op->aead.data.length,
141 sym_op->aead.digest.length,
143 sym_op->m_src->data_off);
145 /* Configure Output FLE with Scatter/Gather Entry */
146 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
148 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
149 fle->length = (sess->dir == DIR_ENC) ?
150 (sym_op->aead.data.length + icv_len + auth_only_len) :
151 sym_op->aead.data.length + auth_only_len;
153 DPAA2_SET_FLE_SG_EXT(fle);
155 /* Configure Output SGE for Encap/Decap */
156 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
157 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
158 sym_op->m_src->data_off - auth_only_len);
159 sge->length = sym_op->aead.data.length + auth_only_len;
161 if (sess->dir == DIR_ENC) {
163 DPAA2_SET_FLE_ADDR(sge,
164 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
165 sge->length = sess->digest_length;
166 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
167 sess->iv.length + auth_only_len));
169 DPAA2_SET_FLE_FIN(sge);
174 /* Configure Input FLE with Scatter/Gather Entry */
175 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
176 DPAA2_SET_FLE_SG_EXT(fle);
177 DPAA2_SET_FLE_FIN(fle);
178 fle->length = (sess->dir == DIR_ENC) ?
179 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
180 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
181 sess->digest_length);
183 /* Configure Input SGE for Encap/Decap */
184 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
185 sge->length = sess->iv.length;
188 DPAA2_SET_FLE_ADDR(sge,
189 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
190 sge->length = auth_only_len;
191 DPAA2_SET_FLE_BPID(sge, bpid);
195 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
196 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
197 sym_op->m_src->data_off);
198 sge->length = sym_op->aead.data.length;
199 if (sess->dir == DIR_DEC) {
201 old_icv = (uint8_t *)(sge + 1);
202 memcpy(old_icv, sym_op->aead.digest.data,
203 sess->digest_length);
204 memset(sym_op->aead.digest.data, 0, sess->digest_length);
205 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
206 sge->length = sess->digest_length;
207 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
208 sess->digest_length +
212 DPAA2_SET_FLE_FIN(sge);
215 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
216 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
223 build_authenc_fd(dpaa2_sec_session *sess,
224 struct rte_crypto_op *op,
225 struct qbman_fd *fd, uint16_t bpid)
227 struct rte_crypto_sym_op *sym_op = op->sym;
228 struct ctxt_priv *priv = sess->ctxt;
229 struct qbman_fle *fle, *sge;
230 struct sec_flow_context *flc;
231 uint32_t auth_only_len = sym_op->auth.data.length -
232 sym_op->cipher.data.length;
233 int icv_len = sess->digest_length, retval;
235 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
238 PMD_INIT_FUNC_TRACE();
240 /* we are using the first FLE entry to store Mbuf.
241 * Currently we donot know which FLE has the mbuf stored.
242 * So while retreiving we can go back 1 FLE from the FD -ADDR
243 * to get the MBUF Addr from the previous FLE.
244 * We can have a better approach to use the inline Mbuf
246 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
248 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
251 memset(fle, 0, FLE_POOL_BUF_SIZE);
252 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
253 DPAA2_FLE_SAVE_CTXT(fle, priv);
256 if (likely(bpid < MAX_BPID)) {
257 DPAA2_SET_FD_BPID(fd, bpid);
258 DPAA2_SET_FLE_BPID(fle, bpid);
259 DPAA2_SET_FLE_BPID(fle + 1, bpid);
260 DPAA2_SET_FLE_BPID(sge, bpid);
261 DPAA2_SET_FLE_BPID(sge + 1, bpid);
262 DPAA2_SET_FLE_BPID(sge + 2, bpid);
263 DPAA2_SET_FLE_BPID(sge + 3, bpid);
265 DPAA2_SET_FD_IVP(fd);
266 DPAA2_SET_FLE_IVP(fle);
267 DPAA2_SET_FLE_IVP((fle + 1));
268 DPAA2_SET_FLE_IVP(sge);
269 DPAA2_SET_FLE_IVP((sge + 1));
270 DPAA2_SET_FLE_IVP((sge + 2));
271 DPAA2_SET_FLE_IVP((sge + 3));
274 /* Save the shared descriptor */
275 flc = &priv->flc_desc[0].flc;
276 /* Configure FD as a FRAME LIST */
277 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
278 DPAA2_SET_FD_COMPOUND_FMT(fd);
279 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
281 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
282 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
283 sym_op->auth.data.offset,
284 sym_op->auth.data.length,
286 sym_op->cipher.data.offset,
287 sym_op->cipher.data.length,
289 sym_op->m_src->data_off);
291 /* Configure Output FLE with Scatter/Gather Entry */
292 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
294 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
295 fle->length = (sess->dir == DIR_ENC) ?
296 (sym_op->cipher.data.length + icv_len) :
297 sym_op->cipher.data.length;
299 DPAA2_SET_FLE_SG_EXT(fle);
301 /* Configure Output SGE for Encap/Decap */
302 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
303 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
304 sym_op->m_src->data_off);
305 sge->length = sym_op->cipher.data.length;
307 if (sess->dir == DIR_ENC) {
309 DPAA2_SET_FLE_ADDR(sge,
310 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
311 sge->length = sess->digest_length;
312 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
315 DPAA2_SET_FLE_FIN(sge);
320 /* Configure Input FLE with Scatter/Gather Entry */
321 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
322 DPAA2_SET_FLE_SG_EXT(fle);
323 DPAA2_SET_FLE_FIN(fle);
324 fle->length = (sess->dir == DIR_ENC) ?
325 (sym_op->auth.data.length + sess->iv.length) :
326 (sym_op->auth.data.length + sess->iv.length +
327 sess->digest_length);
329 /* Configure Input SGE for Encap/Decap */
330 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
331 sge->length = sess->iv.length;
334 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
335 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
336 sym_op->m_src->data_off);
337 sge->length = sym_op->auth.data.length;
338 if (sess->dir == DIR_DEC) {
340 old_icv = (uint8_t *)(sge + 1);
341 memcpy(old_icv, sym_op->auth.digest.data,
342 sess->digest_length);
343 memset(sym_op->auth.digest.data, 0, sess->digest_length);
344 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
345 sge->length = sess->digest_length;
346 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
347 sess->digest_length +
350 DPAA2_SET_FLE_FIN(sge);
352 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
353 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
359 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
360 struct qbman_fd *fd, uint16_t bpid)
362 struct rte_crypto_sym_op *sym_op = op->sym;
363 struct qbman_fle *fle, *sge;
364 struct sec_flow_context *flc;
365 struct ctxt_priv *priv = sess->ctxt;
369 PMD_INIT_FUNC_TRACE();
371 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
373 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
376 memset(fle, 0, FLE_POOL_BUF_SIZE);
377 /* TODO we are using the first FLE entry to store Mbuf.
378 * Currently we donot know which FLE has the mbuf stored.
379 * So while retreiving we can go back 1 FLE from the FD -ADDR
380 * to get the MBUF Addr from the previous FLE.
381 * We can have a better approach to use the inline Mbuf
383 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
384 DPAA2_FLE_SAVE_CTXT(fle, priv);
387 if (likely(bpid < MAX_BPID)) {
388 DPAA2_SET_FD_BPID(fd, bpid);
389 DPAA2_SET_FLE_BPID(fle, bpid);
390 DPAA2_SET_FLE_BPID(fle + 1, bpid);
392 DPAA2_SET_FD_IVP(fd);
393 DPAA2_SET_FLE_IVP(fle);
394 DPAA2_SET_FLE_IVP((fle + 1));
396 flc = &priv->flc_desc[DESC_INITFINAL].flc;
397 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
399 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
400 fle->length = sess->digest_length;
402 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
403 DPAA2_SET_FD_COMPOUND_FMT(fd);
406 if (sess->dir == DIR_ENC) {
407 DPAA2_SET_FLE_ADDR(fle,
408 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
409 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
410 sym_op->m_src->data_off);
411 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
412 fle->length = sym_op->auth.data.length;
415 DPAA2_SET_FLE_SG_EXT(fle);
416 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
418 if (likely(bpid < MAX_BPID)) {
419 DPAA2_SET_FLE_BPID(sge, bpid);
420 DPAA2_SET_FLE_BPID(sge + 1, bpid);
422 DPAA2_SET_FLE_IVP(sge);
423 DPAA2_SET_FLE_IVP((sge + 1));
425 DPAA2_SET_FLE_ADDR(sge,
426 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
427 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
428 sym_op->m_src->data_off);
430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
431 sess->digest_length);
432 sge->length = sym_op->auth.data.length;
434 old_digest = (uint8_t *)(sge + 1);
435 rte_memcpy(old_digest, sym_op->auth.digest.data,
436 sess->digest_length);
437 memset(sym_op->auth.digest.data, 0, sess->digest_length);
438 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
439 sge->length = sess->digest_length;
440 fle->length = sym_op->auth.data.length +
442 DPAA2_SET_FLE_FIN(sge);
444 DPAA2_SET_FLE_FIN(fle);
450 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
451 struct qbman_fd *fd, uint16_t bpid)
453 struct rte_crypto_sym_op *sym_op = op->sym;
454 struct qbman_fle *fle, *sge;
456 struct sec_flow_context *flc;
457 struct ctxt_priv *priv = sess->ctxt;
458 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
461 PMD_INIT_FUNC_TRACE();
463 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
465 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
468 memset(fle, 0, FLE_POOL_BUF_SIZE);
469 /* TODO we are using the first FLE entry to store Mbuf.
470 * Currently we donot know which FLE has the mbuf stored.
471 * So while retreiving we can go back 1 FLE from the FD -ADDR
472 * to get the MBUF Addr from the previous FLE.
473 * We can have a better approach to use the inline Mbuf
475 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
476 DPAA2_FLE_SAVE_CTXT(fle, priv);
480 if (likely(bpid < MAX_BPID)) {
481 DPAA2_SET_FD_BPID(fd, bpid);
482 DPAA2_SET_FLE_BPID(fle, bpid);
483 DPAA2_SET_FLE_BPID(fle + 1, bpid);
484 DPAA2_SET_FLE_BPID(sge, bpid);
485 DPAA2_SET_FLE_BPID(sge + 1, bpid);
487 DPAA2_SET_FD_IVP(fd);
488 DPAA2_SET_FLE_IVP(fle);
489 DPAA2_SET_FLE_IVP((fle + 1));
490 DPAA2_SET_FLE_IVP(sge);
491 DPAA2_SET_FLE_IVP((sge + 1));
494 flc = &priv->flc_desc[0].flc;
495 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
496 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
498 DPAA2_SET_FD_COMPOUND_FMT(fd);
499 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
501 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
502 sym_op->cipher.data.offset,
503 sym_op->cipher.data.length,
505 sym_op->m_src->data_off);
507 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
508 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
509 sym_op->m_src->data_off);
511 fle->length = sym_op->cipher.data.length + sess->iv.length;
513 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
514 flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
518 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
519 fle->length = sym_op->cipher.data.length + sess->iv.length;
521 DPAA2_SET_FLE_SG_EXT(fle);
523 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
524 sge->length = sess->iv.length;
527 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
528 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
529 sym_op->m_src->data_off);
531 sge->length = sym_op->cipher.data.length;
532 DPAA2_SET_FLE_FIN(sge);
533 DPAA2_SET_FLE_FIN(fle);
535 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
536 (void *)DPAA2_GET_FD_ADDR(fd),
537 DPAA2_GET_FD_BPID(fd),
538 rte_dpaa2_bpid_info[bpid].meta_data_size,
539 DPAA2_GET_FD_OFFSET(fd),
540 DPAA2_GET_FD_LEN(fd));
546 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
547 struct qbman_fd *fd, uint16_t bpid)
551 PMD_INIT_FUNC_TRACE();
553 switch (sess->ctxt_type) {
554 case DPAA2_SEC_CIPHER:
555 ret = build_cipher_fd(sess, op, fd, bpid);
558 ret = build_auth_fd(sess, op, fd, bpid);
561 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
563 case DPAA2_SEC_CIPHER_HASH:
564 ret = build_authenc_fd(sess, op, fd, bpid);
566 case DPAA2_SEC_HASH_CIPHER:
568 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
574 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
577 /* Function to transmit the frames to given device and VQ*/
580 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
581 uint32_t frames_to_send;
582 struct qbman_eq_desc eqdesc;
583 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
584 struct qbman_swp *swp;
586 /*todo - need to support multiple buffer pools */
588 struct rte_mempool *mb_pool;
589 dpaa2_sec_session *sess;
591 if (unlikely(nb_ops == 0))
594 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
595 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
598 /*Prepare enqueue descriptor*/
599 qbman_eq_desc_clear(&eqdesc);
600 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
601 qbman_eq_desc_set_response(&eqdesc, 0, 0);
602 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
604 if (!DPAA2_PER_LCORE_SEC_DPIO) {
605 ret = dpaa2_affine_qbman_swp_sec();
607 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
611 swp = DPAA2_PER_LCORE_SEC_PORTAL;
614 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
616 for (loop = 0; loop < frames_to_send; loop++) {
617 /*Clear the unused FD fields before sending*/
618 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
619 sess = (dpaa2_sec_session *)
620 (*ops)->sym->session->_private;
621 mb_pool = (*ops)->sym->m_src->pool;
622 bpid = mempool_to_bpid(mb_pool);
623 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
625 PMD_DRV_LOG(ERR, "error: Improper packet"
626 " contents for crypto operation\n");
632 while (loop < frames_to_send) {
633 loop += qbman_swp_send_multiple(swp, &eqdesc,
635 frames_to_send - loop);
638 num_tx += frames_to_send;
639 nb_ops -= frames_to_send;
642 dpaa2_qp->tx_vq.tx_pkts += num_tx;
643 dpaa2_qp->tx_vq.err_pkts += nb_ops;
647 static inline struct rte_crypto_op *
648 sec_fd_to_mbuf(const struct qbman_fd *fd)
650 struct qbman_fle *fle;
651 struct rte_crypto_op *op;
652 struct ctxt_priv *priv;
654 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
656 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
657 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
659 /* we are using the first FLE entry to store Mbuf.
660 * Currently we donot know which FLE has the mbuf stored.
661 * So while retreiving we can go back 1 FLE from the FD -ADDR
662 * to get the MBUF Addr from the previous FLE.
663 * We can have a better approach to use the inline Mbuf
666 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
667 /* TODO complete it. */
668 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
671 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
672 DPAA2_GET_FLE_ADDR((fle - 1)));
675 rte_prefetch0(op->sym->m_src);
677 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
678 (void *)op->sym->m_src, op->sym->m_src->buf_addr);
680 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
681 (void *)DPAA2_GET_FD_ADDR(fd),
682 DPAA2_GET_FD_BPID(fd),
683 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
684 DPAA2_GET_FD_OFFSET(fd),
685 DPAA2_GET_FD_LEN(fd));
687 /* free the fle memory */
688 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
689 rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
695 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
698 /* Function is responsible to receive frames for a given device and VQ*/
699 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
700 struct qbman_result *dq_storage;
701 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
703 uint8_t is_last = 0, status;
704 struct qbman_swp *swp;
705 const struct qbman_fd *fd;
706 struct qbman_pull_desc pulldesc;
708 if (!DPAA2_PER_LCORE_SEC_DPIO) {
709 ret = dpaa2_affine_qbman_swp_sec();
711 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
715 swp = DPAA2_PER_LCORE_SEC_PORTAL;
716 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
718 qbman_pull_desc_clear(&pulldesc);
719 qbman_pull_desc_set_numframes(&pulldesc,
720 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
721 DPAA2_DQRR_RING_SIZE : nb_ops);
722 qbman_pull_desc_set_fq(&pulldesc, fqid);
723 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
724 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
727 /*Issue a volatile dequeue command. */
729 if (qbman_swp_pull(swp, &pulldesc)) {
730 RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
732 /* Portal was busy, try again */
738 /* Receive the packets till Last Dequeue entry is found with
739 * respect to the above issues PULL command.
742 /* Check if the previous issued command is completed.
743 * Also seems like the SWP is shared between the Ethernet Driver
744 * and the SEC driver.
746 while (!qbman_check_command_complete(swp, dq_storage))
749 /* Loop until the dq_storage is updated with
752 while (!qbman_result_has_new_result(swp, dq_storage))
754 /* Check whether Last Pull command is Expired and
755 * setting Condition for Loop termination
757 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
759 /* Check for valid frame. */
760 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
762 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
763 PMD_RX_LOG(DEBUG, "No frame is delivered");
768 fd = qbman_result_DQ_fd(dq_storage);
769 ops[num_rx] = sec_fd_to_mbuf(fd);
771 if (unlikely(fd->simple.frc)) {
772 /* TODO Parse SEC errors */
773 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
775 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
777 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
782 } /* End of Packet Rx loop */
784 dpaa2_qp->rx_vq.rx_pkts += num_rx;
786 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
787 /*Return the total number of packets received to DPAA2 app*/
791 /** Release queue pair */
793 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
795 struct dpaa2_sec_qp *qp =
796 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
798 PMD_INIT_FUNC_TRACE();
800 if (qp->rx_vq.q_storage) {
801 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
802 rte_free(qp->rx_vq.q_storage);
806 dev->data->queue_pairs[queue_pair_id] = NULL;
811 /** Setup a queue pair */
813 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
814 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
815 __rte_unused int socket_id)
817 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
818 struct dpaa2_sec_qp *qp;
819 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
820 struct dpseci_rx_queue_cfg cfg;
823 PMD_INIT_FUNC_TRACE();
825 /* If qp is already in use free ring memory and qp metadata. */
826 if (dev->data->queue_pairs[qp_id] != NULL) {
827 PMD_DRV_LOG(INFO, "QP already setup");
831 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
832 dev, qp_id, qp_conf);
834 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
836 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
837 RTE_CACHE_LINE_SIZE);
839 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
845 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
846 sizeof(struct queue_storage_info_t),
847 RTE_CACHE_LINE_SIZE);
848 if (!qp->rx_vq.q_storage) {
849 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
852 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
854 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
855 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
859 dev->data->queue_pairs[qp_id] = qp;
861 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
862 cfg.user_ctx = (uint64_t)(&qp->rx_vq);
863 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
868 /** Start queue pair */
870 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
871 __rte_unused uint16_t queue_pair_id)
873 PMD_INIT_FUNC_TRACE();
878 /** Stop queue pair */
880 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
881 __rte_unused uint16_t queue_pair_id)
883 PMD_INIT_FUNC_TRACE();
888 /** Return the number of allocated queue pairs */
890 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
892 PMD_INIT_FUNC_TRACE();
894 return dev->data->nb_queue_pairs;
897 /** Returns the size of the aesni gcm session structure */
899 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
901 PMD_INIT_FUNC_TRACE();
903 return sizeof(dpaa2_sec_session);
907 dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
908 void *sess __rte_unused)
910 PMD_INIT_FUNC_TRACE();
914 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
915 struct rte_crypto_sym_xform *xform,
916 dpaa2_sec_session *session)
918 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
919 struct alginfo cipherdata;
921 struct ctxt_priv *priv;
922 struct sec_flow_context *flc;
924 PMD_INIT_FUNC_TRACE();
926 /* For SEC CIPHER only one descriptor is required. */
927 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
928 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
929 RTE_CACHE_LINE_SIZE);
931 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
935 priv->fle_pool = dev_priv->fle_pool;
937 flc = &priv->flc_desc[0].flc;
939 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
940 RTE_CACHE_LINE_SIZE);
941 if (session->cipher_key.data == NULL) {
942 RTE_LOG(ERR, PMD, "No Memory for cipher key");
946 session->cipher_key.length = xform->cipher.key.length;
948 memcpy(session->cipher_key.data, xform->cipher.key.data,
949 xform->cipher.key.length);
950 cipherdata.key = (uint64_t)session->cipher_key.data;
951 cipherdata.keylen = session->cipher_key.length;
952 cipherdata.key_enc_flags = 0;
953 cipherdata.key_type = RTA_DATA_IMM;
955 /* Set IV parameters */
956 session->iv.offset = xform->cipher.iv.offset;
957 session->iv.length = xform->cipher.iv.length;
959 switch (xform->cipher.algo) {
960 case RTE_CRYPTO_CIPHER_AES_CBC:
961 cipherdata.algtype = OP_ALG_ALGSEL_AES;
962 cipherdata.algmode = OP_ALG_AAI_CBC;
963 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
965 case RTE_CRYPTO_CIPHER_3DES_CBC:
966 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
967 cipherdata.algmode = OP_ALG_AAI_CBC;
968 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
970 case RTE_CRYPTO_CIPHER_AES_CTR:
971 cipherdata.algtype = OP_ALG_ALGSEL_AES;
972 cipherdata.algmode = OP_ALG_AAI_CTR;
973 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
975 case RTE_CRYPTO_CIPHER_3DES_CTR:
976 case RTE_CRYPTO_CIPHER_AES_ECB:
977 case RTE_CRYPTO_CIPHER_3DES_ECB:
978 case RTE_CRYPTO_CIPHER_AES_XTS:
979 case RTE_CRYPTO_CIPHER_AES_F8:
980 case RTE_CRYPTO_CIPHER_ARC4:
981 case RTE_CRYPTO_CIPHER_KASUMI_F8:
982 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
983 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
984 case RTE_CRYPTO_CIPHER_NULL:
985 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
989 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
993 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
996 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
997 &cipherdata, NULL, session->iv.length,
1000 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1005 flc->mode_bits = 0x8000;
1007 flc->word1_sdl = (uint8_t)bufsize;
1008 flc->word2_rflc_31_0 = lower_32_bits(
1009 (uint64_t)&(((struct dpaa2_sec_qp *)
1010 dev->data->queue_pairs[0])->rx_vq));
1011 flc->word3_rflc_63_32 = upper_32_bits(
1012 (uint64_t)&(((struct dpaa2_sec_qp *)
1013 dev->data->queue_pairs[0])->rx_vq));
1014 session->ctxt = priv;
1016 for (i = 0; i < bufsize; i++)
1017 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1018 i, priv->flc_desc[0].desc[i]);
1023 rte_free(session->cipher_key.data);
1029 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1030 struct rte_crypto_sym_xform *xform,
1031 dpaa2_sec_session *session)
1033 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1034 struct alginfo authdata;
1035 unsigned int bufsize, i;
1036 struct ctxt_priv *priv;
1037 struct sec_flow_context *flc;
1039 PMD_INIT_FUNC_TRACE();
1041 /* For SEC AUTH three descriptors are required for various stages */
1042 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1043 sizeof(struct ctxt_priv) + 3 *
1044 sizeof(struct sec_flc_desc),
1045 RTE_CACHE_LINE_SIZE);
1047 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1051 priv->fle_pool = dev_priv->fle_pool;
1052 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1054 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1055 RTE_CACHE_LINE_SIZE);
1056 if (session->auth_key.data == NULL) {
1057 RTE_LOG(ERR, PMD, "No Memory for auth key");
1061 session->auth_key.length = xform->auth.key.length;
1063 memcpy(session->auth_key.data, xform->auth.key.data,
1064 xform->auth.key.length);
1065 authdata.key = (uint64_t)session->auth_key.data;
1066 authdata.keylen = session->auth_key.length;
1067 authdata.key_enc_flags = 0;
1068 authdata.key_type = RTA_DATA_IMM;
1070 session->digest_length = xform->auth.digest_length;
1072 switch (xform->auth.algo) {
1073 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1074 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1075 authdata.algmode = OP_ALG_AAI_HMAC;
1076 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1078 case RTE_CRYPTO_AUTH_MD5_HMAC:
1079 authdata.algtype = OP_ALG_ALGSEL_MD5;
1080 authdata.algmode = OP_ALG_AAI_HMAC;
1081 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1083 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1084 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1085 authdata.algmode = OP_ALG_AAI_HMAC;
1086 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1088 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1089 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1090 authdata.algmode = OP_ALG_AAI_HMAC;
1091 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1093 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1094 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1095 authdata.algmode = OP_ALG_AAI_HMAC;
1096 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1098 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1099 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1100 authdata.algmode = OP_ALG_AAI_HMAC;
1101 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1103 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1104 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1105 case RTE_CRYPTO_AUTH_NULL:
1106 case RTE_CRYPTO_AUTH_SHA1:
1107 case RTE_CRYPTO_AUTH_SHA256:
1108 case RTE_CRYPTO_AUTH_SHA512:
1109 case RTE_CRYPTO_AUTH_SHA224:
1110 case RTE_CRYPTO_AUTH_SHA384:
1111 case RTE_CRYPTO_AUTH_MD5:
1112 case RTE_CRYPTO_AUTH_AES_GMAC:
1113 case RTE_CRYPTO_AUTH_KASUMI_F9:
1114 case RTE_CRYPTO_AUTH_AES_CMAC:
1115 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1116 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1117 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1121 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1125 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1128 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1129 1, 0, &authdata, !session->dir,
1130 session->digest_length);
1132 flc->word1_sdl = (uint8_t)bufsize;
1133 flc->word2_rflc_31_0 = lower_32_bits(
1134 (uint64_t)&(((struct dpaa2_sec_qp *)
1135 dev->data->queue_pairs[0])->rx_vq));
1136 flc->word3_rflc_63_32 = upper_32_bits(
1137 (uint64_t)&(((struct dpaa2_sec_qp *)
1138 dev->data->queue_pairs[0])->rx_vq));
1139 session->ctxt = priv;
1140 for (i = 0; i < bufsize; i++)
1141 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1142 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1148 rte_free(session->auth_key.data);
1154 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1155 struct rte_crypto_sym_xform *xform,
1156 dpaa2_sec_session *session)
1158 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1159 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1160 struct alginfo aeaddata;
1161 unsigned int bufsize, i;
1162 struct ctxt_priv *priv;
1163 struct sec_flow_context *flc;
1164 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1167 PMD_INIT_FUNC_TRACE();
1169 /* Set IV parameters */
1170 session->iv.offset = aead_xform->iv.offset;
1171 session->iv.length = aead_xform->iv.length;
1172 session->ctxt_type = DPAA2_SEC_AEAD;
1174 /* For SEC AEAD only one descriptor is required */
1175 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1176 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1177 RTE_CACHE_LINE_SIZE);
1179 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1183 priv->fle_pool = dev_priv->fle_pool;
1184 flc = &priv->flc_desc[0].flc;
1186 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1187 RTE_CACHE_LINE_SIZE);
1188 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1189 RTE_LOG(ERR, PMD, "No Memory for aead key");
1193 memcpy(session->aead_key.data, aead_xform->key.data,
1194 aead_xform->key.length);
1196 session->digest_length = aead_xform->digest_length;
1197 session->aead_key.length = aead_xform->key.length;
1198 ctxt->auth_only_len = aead_xform->add_auth_data_length;
1200 aeaddata.key = (uint64_t)session->aead_key.data;
1201 aeaddata.keylen = session->aead_key.length;
1202 aeaddata.key_enc_flags = 0;
1203 aeaddata.key_type = RTA_DATA_IMM;
1205 switch (aead_xform->algo) {
1206 case RTE_CRYPTO_AEAD_AES_GCM:
1207 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1208 aeaddata.algmode = OP_ALG_AAI_GCM;
1209 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
1211 case RTE_CRYPTO_AEAD_AES_CCM:
1212 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
1216 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1220 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1223 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1224 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1226 (unsigned int *)priv->flc_desc[0].desc,
1227 &priv->flc_desc[0].desc[1], 1);
1230 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1233 if (priv->flc_desc[0].desc[1] & 1) {
1234 aeaddata.key_type = RTA_DATA_IMM;
1236 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1237 aeaddata.key_type = RTA_DATA_PTR;
1239 priv->flc_desc[0].desc[0] = 0;
1240 priv->flc_desc[0].desc[1] = 0;
1242 if (session->dir == DIR_ENC)
1243 bufsize = cnstr_shdsc_gcm_encap(
1244 priv->flc_desc[0].desc, 1, 0,
1245 &aeaddata, session->iv.length,
1246 session->digest_length);
1248 bufsize = cnstr_shdsc_gcm_decap(
1249 priv->flc_desc[0].desc, 1, 0,
1250 &aeaddata, session->iv.length,
1251 session->digest_length);
1252 flc->word1_sdl = (uint8_t)bufsize;
1253 flc->word2_rflc_31_0 = lower_32_bits(
1254 (uint64_t)&(((struct dpaa2_sec_qp *)
1255 dev->data->queue_pairs[0])->rx_vq));
1256 flc->word3_rflc_63_32 = upper_32_bits(
1257 (uint64_t)&(((struct dpaa2_sec_qp *)
1258 dev->data->queue_pairs[0])->rx_vq));
1259 session->ctxt = priv;
1260 for (i = 0; i < bufsize; i++)
1261 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1262 i, priv->flc_desc[0].desc[i]);
1267 rte_free(session->aead_key.data);
1274 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1275 struct rte_crypto_sym_xform *xform,
1276 dpaa2_sec_session *session)
1278 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1279 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1280 struct alginfo authdata, cipherdata;
1281 unsigned int bufsize, i;
1282 struct ctxt_priv *priv;
1283 struct sec_flow_context *flc;
1284 struct rte_crypto_cipher_xform *cipher_xform;
1285 struct rte_crypto_auth_xform *auth_xform;
1288 PMD_INIT_FUNC_TRACE();
1290 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1291 cipher_xform = &xform->cipher;
1292 auth_xform = &xform->next->auth;
1293 session->ctxt_type =
1294 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1295 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1297 cipher_xform = &xform->next->cipher;
1298 auth_xform = &xform->auth;
1299 session->ctxt_type =
1300 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1301 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1304 /* Set IV parameters */
1305 session->iv.offset = cipher_xform->iv.offset;
1306 session->iv.length = cipher_xform->iv.length;
1308 /* For SEC AEAD only one descriptor is required */
1309 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1310 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1311 RTE_CACHE_LINE_SIZE);
1313 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1317 priv->fle_pool = dev_priv->fle_pool;
1318 flc = &priv->flc_desc[0].flc;
1320 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1321 RTE_CACHE_LINE_SIZE);
1322 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1323 RTE_LOG(ERR, PMD, "No Memory for cipher key");
1327 session->cipher_key.length = cipher_xform->key.length;
1328 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1329 RTE_CACHE_LINE_SIZE);
1330 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1331 RTE_LOG(ERR, PMD, "No Memory for auth key");
1332 rte_free(session->cipher_key.data);
1336 session->auth_key.length = auth_xform->key.length;
1337 memcpy(session->cipher_key.data, cipher_xform->key.data,
1338 cipher_xform->key.length);
1339 memcpy(session->auth_key.data, auth_xform->key.data,
1340 auth_xform->key.length);
1342 authdata.key = (uint64_t)session->auth_key.data;
1343 authdata.keylen = session->auth_key.length;
1344 authdata.key_enc_flags = 0;
1345 authdata.key_type = RTA_DATA_IMM;
1347 session->digest_length = auth_xform->digest_length;
1349 switch (auth_xform->algo) {
1350 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1351 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1352 authdata.algmode = OP_ALG_AAI_HMAC;
1353 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1355 case RTE_CRYPTO_AUTH_MD5_HMAC:
1356 authdata.algtype = OP_ALG_ALGSEL_MD5;
1357 authdata.algmode = OP_ALG_AAI_HMAC;
1358 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1360 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1361 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1362 authdata.algmode = OP_ALG_AAI_HMAC;
1363 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1365 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1366 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1367 authdata.algmode = OP_ALG_AAI_HMAC;
1368 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1370 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1371 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1372 authdata.algmode = OP_ALG_AAI_HMAC;
1373 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1375 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1376 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1377 authdata.algmode = OP_ALG_AAI_HMAC;
1378 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1380 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1381 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1382 case RTE_CRYPTO_AUTH_NULL:
1383 case RTE_CRYPTO_AUTH_SHA1:
1384 case RTE_CRYPTO_AUTH_SHA256:
1385 case RTE_CRYPTO_AUTH_SHA512:
1386 case RTE_CRYPTO_AUTH_SHA224:
1387 case RTE_CRYPTO_AUTH_SHA384:
1388 case RTE_CRYPTO_AUTH_MD5:
1389 case RTE_CRYPTO_AUTH_AES_GMAC:
1390 case RTE_CRYPTO_AUTH_KASUMI_F9:
1391 case RTE_CRYPTO_AUTH_AES_CMAC:
1392 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1393 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1394 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1398 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1402 cipherdata.key = (uint64_t)session->cipher_key.data;
1403 cipherdata.keylen = session->cipher_key.length;
1404 cipherdata.key_enc_flags = 0;
1405 cipherdata.key_type = RTA_DATA_IMM;
1407 switch (cipher_xform->algo) {
1408 case RTE_CRYPTO_CIPHER_AES_CBC:
1409 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1410 cipherdata.algmode = OP_ALG_AAI_CBC;
1411 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1413 case RTE_CRYPTO_CIPHER_3DES_CBC:
1414 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1415 cipherdata.algmode = OP_ALG_AAI_CBC;
1416 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1418 case RTE_CRYPTO_CIPHER_AES_CTR:
1419 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1420 cipherdata.algmode = OP_ALG_AAI_CTR;
1421 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1423 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1424 case RTE_CRYPTO_CIPHER_NULL:
1425 case RTE_CRYPTO_CIPHER_3DES_ECB:
1426 case RTE_CRYPTO_CIPHER_AES_ECB:
1427 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1428 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
1429 cipher_xform->algo);
1432 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1433 cipher_xform->algo);
1436 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1439 priv->flc_desc[0].desc[0] = cipherdata.keylen;
1440 priv->flc_desc[0].desc[1] = authdata.keylen;
1441 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1443 (unsigned int *)priv->flc_desc[0].desc,
1444 &priv->flc_desc[0].desc[2], 2);
1447 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1450 if (priv->flc_desc[0].desc[2] & 1) {
1451 cipherdata.key_type = RTA_DATA_IMM;
1453 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1454 cipherdata.key_type = RTA_DATA_PTR;
1456 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1457 authdata.key_type = RTA_DATA_IMM;
1459 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1460 authdata.key_type = RTA_DATA_PTR;
1462 priv->flc_desc[0].desc[0] = 0;
1463 priv->flc_desc[0].desc[1] = 0;
1464 priv->flc_desc[0].desc[2] = 0;
1466 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1467 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1468 0, &cipherdata, &authdata,
1470 ctxt->auth_only_len,
1471 session->digest_length,
1474 RTE_LOG(ERR, PMD, "Hash before cipher not supported");
1478 flc->word1_sdl = (uint8_t)bufsize;
1479 flc->word2_rflc_31_0 = lower_32_bits(
1480 (uint64_t)&(((struct dpaa2_sec_qp *)
1481 dev->data->queue_pairs[0])->rx_vq));
1482 flc->word3_rflc_63_32 = upper_32_bits(
1483 (uint64_t)&(((struct dpaa2_sec_qp *)
1484 dev->data->queue_pairs[0])->rx_vq));
1485 session->ctxt = priv;
1486 for (i = 0; i < bufsize; i++)
1487 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1488 i, priv->flc_desc[0].desc[i]);
1493 rte_free(session->cipher_key.data);
1494 rte_free(session->auth_key.data);
1500 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1501 struct rte_crypto_sym_xform *xform, void *sess)
1503 dpaa2_sec_session *session = sess;
1505 PMD_INIT_FUNC_TRACE();
1507 if (unlikely(sess == NULL)) {
1508 RTE_LOG(ERR, PMD, "invalid session struct");
1512 /* Default IV length = 0 */
1513 session->iv.length = 0;
1516 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1517 session->ctxt_type = DPAA2_SEC_CIPHER;
1518 dpaa2_sec_cipher_init(dev, xform, session);
1520 /* Authentication Only */
1521 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1522 xform->next == NULL) {
1523 session->ctxt_type = DPAA2_SEC_AUTH;
1524 dpaa2_sec_auth_init(dev, xform, session);
1526 /* Cipher then Authenticate */
1527 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1528 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1529 session->ext_params.aead_ctxt.auth_cipher_text = true;
1530 dpaa2_sec_aead_chain_init(dev, xform, session);
1532 /* Authenticate then Cipher */
1533 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1534 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1535 session->ext_params.aead_ctxt.auth_cipher_text = false;
1536 dpaa2_sec_aead_chain_init(dev, xform, session);
1538 /* AEAD operation for AES-GCM kind of Algorithms */
1539 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1540 xform->next == NULL) {
1541 dpaa2_sec_aead_init(dev, xform, session);
1544 RTE_LOG(ERR, PMD, "Invalid crypto type");
1551 /** Clear the memory of session so it doesn't leave key material behind */
1553 dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
1555 PMD_INIT_FUNC_TRACE();
1556 dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
1560 rte_free(s->cipher_key.data);
1561 rte_free(s->auth_key.data);
1562 memset(sess, 0, sizeof(dpaa2_sec_session));
1567 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1568 struct rte_cryptodev_config *config __rte_unused)
1570 PMD_INIT_FUNC_TRACE();
1576 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1578 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1579 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1580 struct dpseci_attr attr;
1581 struct dpaa2_queue *dpaa2_q;
1582 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1583 dev->data->queue_pairs;
1584 struct dpseci_rx_queue_attr rx_attr;
1585 struct dpseci_tx_queue_attr tx_attr;
1588 PMD_INIT_FUNC_TRACE();
1590 memset(&attr, 0, sizeof(struct dpseci_attr));
1592 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1594 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1596 goto get_attr_failure;
1598 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1601 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1602 goto get_attr_failure;
1604 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1605 dpaa2_q = &qp[i]->rx_vq;
1606 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1608 dpaa2_q->fqid = rx_attr.fqid;
1609 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1611 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1612 dpaa2_q = &qp[i]->tx_vq;
1613 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1615 dpaa2_q->fqid = tx_attr.fqid;
1616 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1621 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1626 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1628 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1629 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1632 PMD_INIT_FUNC_TRACE();
1634 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1636 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1641 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1643 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1650 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1652 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1653 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1656 PMD_INIT_FUNC_TRACE();
1658 /* Function is reverse of dpaa2_sec_dev_init.
1659 * It does the following:
1660 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1661 * 2. Close the DPSECI device
1662 * 3. Free the allocated resources.
1665 /*Close the device at underlying layer*/
1666 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1668 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1669 " error code %d\n", ret);
1673 /*Free the allocated memory for ethernet private data and dpseci*/
1681 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1682 struct rte_cryptodev_info *info)
1684 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1686 PMD_INIT_FUNC_TRACE();
1688 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1689 info->feature_flags = dev->feature_flags;
1690 info->capabilities = dpaa2_sec_capabilities;
1691 info->sym.max_nb_sessions = internals->max_nb_sessions;
1692 info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1697 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1698 struct rte_cryptodev_stats *stats)
1700 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1701 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1702 struct dpseci_sec_counters counters = {0};
1703 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1704 dev->data->queue_pairs;
1707 PMD_INIT_FUNC_TRACE();
1708 if (stats == NULL) {
1709 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1712 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1713 if (qp[i] == NULL) {
1714 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1718 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1719 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1720 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1721 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1724 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1727 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1729 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1730 "\n\tNumber of Requests Dequeued = %lu"
1731 "\n\tNumber of Outbound Encrypt Requests = %lu"
1732 "\n\tNumber of Inbound Decrypt Requests = %lu"
1733 "\n\tNumber of Outbound Bytes Encrypted = %lu"
1734 "\n\tNumber of Outbound Bytes Protected = %lu"
1735 "\n\tNumber of Inbound Bytes Decrypted = %lu"
1736 "\n\tNumber of Inbound Bytes Validated = %lu",
1737 counters.dequeued_requests,
1738 counters.ob_enc_requests,
1739 counters.ib_dec_requests,
1740 counters.ob_enc_bytes,
1741 counters.ob_prot_bytes,
1742 counters.ib_dec_bytes,
1743 counters.ib_valid_bytes);
1748 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1751 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1752 (dev->data->queue_pairs);
1754 PMD_INIT_FUNC_TRACE();
1756 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1757 if (qp[i] == NULL) {
1758 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1761 qp[i]->tx_vq.rx_pkts = 0;
1762 qp[i]->tx_vq.tx_pkts = 0;
1763 qp[i]->tx_vq.err_pkts = 0;
1764 qp[i]->rx_vq.rx_pkts = 0;
1765 qp[i]->rx_vq.tx_pkts = 0;
1766 qp[i]->rx_vq.err_pkts = 0;
1770 static struct rte_cryptodev_ops crypto_ops = {
1771 .dev_configure = dpaa2_sec_dev_configure,
1772 .dev_start = dpaa2_sec_dev_start,
1773 .dev_stop = dpaa2_sec_dev_stop,
1774 .dev_close = dpaa2_sec_dev_close,
1775 .dev_infos_get = dpaa2_sec_dev_infos_get,
1776 .stats_get = dpaa2_sec_stats_get,
1777 .stats_reset = dpaa2_sec_stats_reset,
1778 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
1779 .queue_pair_release = dpaa2_sec_queue_pair_release,
1780 .queue_pair_start = dpaa2_sec_queue_pair_start,
1781 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
1782 .queue_pair_count = dpaa2_sec_queue_pair_count,
1783 .session_get_size = dpaa2_sec_session_get_size,
1784 .session_initialize = dpaa2_sec_session_initialize,
1785 .session_configure = dpaa2_sec_session_configure,
1786 .session_clear = dpaa2_sec_session_clear,
1790 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1792 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1794 rte_mempool_free(internals->fle_pool);
1796 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1797 dev->data->name, rte_socket_id());
1803 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1805 struct dpaa2_sec_dev_private *internals;
1806 struct rte_device *dev = cryptodev->device;
1807 struct rte_dpaa2_device *dpaa2_dev;
1808 struct fsl_mc_io *dpseci;
1810 struct dpseci_attr attr;
1814 PMD_INIT_FUNC_TRACE();
1815 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1816 if (dpaa2_dev == NULL) {
1817 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1820 hw_id = dpaa2_dev->object_id;
1822 cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1823 cryptodev->dev_ops = &crypto_ops;
1825 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1826 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1827 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1828 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1829 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1831 internals = cryptodev->data->dev_private;
1832 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1835 * For secondary processes, we don't initialise any further as primary
1836 * has already done this work. Only check we don't need a different
1839 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1840 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1843 /*Open the rte device via MC and save the handle for further use*/
1844 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1845 sizeof(struct fsl_mc_io), 0);
1848 "Error in allocating the memory for dpsec object");
1851 dpseci->regs = rte_mcp_ptr_list[0];
1853 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1855 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1859 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1862 "Cannot get dpsec device attributed: Error = %x",
1866 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1868 internals->max_nb_queue_pairs = attr.num_tx_queues;
1869 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1870 internals->hw = dpseci;
1871 internals->token = token;
1873 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
1874 internals->fle_pool = rte_mempool_create((const char *)str,
1877 FLE_POOL_CACHE_SIZE, 0,
1878 NULL, NULL, NULL, NULL,
1880 if (!internals->fle_pool) {
1881 RTE_LOG(ERR, PMD, "%s create failed", str);
1884 RTE_LOG(INFO, PMD, "%s created: %p\n", str,
1885 internals->fle_pool);
1887 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1891 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1893 /* dpaa2_sec_uninit(crypto_dev_name); */
1898 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
1899 struct rte_dpaa2_device *dpaa2_dev)
1901 struct rte_cryptodev *cryptodev;
1902 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1906 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1908 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1909 if (cryptodev == NULL)
1912 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1913 cryptodev->data->dev_private = rte_zmalloc_socket(
1914 "cryptodev private structure",
1915 sizeof(struct dpaa2_sec_dev_private),
1916 RTE_CACHE_LINE_SIZE,
1919 if (cryptodev->data->dev_private == NULL)
1920 rte_panic("Cannot allocate memzone for private "
1924 dpaa2_dev->cryptodev = cryptodev;
1925 cryptodev->device = &dpaa2_dev->device;
1926 cryptodev->device->driver = &dpaa2_drv->driver;
1928 /* init user callbacks */
1929 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1931 /* Invoke PMD device initialization function */
1932 retval = dpaa2_sec_dev_init(cryptodev);
1936 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1937 rte_free(cryptodev->data->dev_private);
1939 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1945 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1947 struct rte_cryptodev *cryptodev;
1950 cryptodev = dpaa2_dev->cryptodev;
1951 if (cryptodev == NULL)
1954 ret = dpaa2_sec_uninit(cryptodev);
1958 /* free crypto device */
1959 rte_cryptodev_pmd_release_device(cryptodev);
1961 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1962 rte_free(cryptodev->data->dev_private);
1964 cryptodev->device = NULL;
1965 cryptodev->data = NULL;
1970 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
1971 .drv_type = DPAA2_MC_DPSECI_DEVID,
1973 .name = "DPAA2 SEC PMD"
1975 .probe = cryptodev_dpaa2_sec_probe,
1976 .remove = cryptodev_dpaa2_sec_remove,
1979 RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);