4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63 * a pointer to the shared descriptor
65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID 0x1957
67 #define FSL_DEVICE_ID 0x410
68 #define FSL_SUBSYSTEM_SEC 1
69 #define FSL_MC_DPSECI_DEVID 3
72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
73 #define FLE_POOL_NUM_BUFS 32000
74 #define FLE_POOL_BUF_SIZE 256
75 #define FLE_POOL_CACHE_SIZE 512
77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
79 static uint8_t cryptodev_driver_id;
82 build_authenc_gcm_fd(dpaa2_sec_session *sess,
83 struct rte_crypto_op *op,
84 struct qbman_fd *fd, uint16_t bpid)
86 struct rte_crypto_sym_op *sym_op = op->sym;
87 struct ctxt_priv *priv = sess->ctxt;
88 struct qbman_fle *fle, *sge;
89 struct sec_flow_context *flc;
90 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
91 int icv_len = sess->digest_length, retval;
93 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
96 PMD_INIT_FUNC_TRACE();
98 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
99 * Currently we donot know which FLE has the mbuf stored.
100 * So while retreiving we can go back 1 FLE from the FD -ADDR
101 * to get the MBUF Addr from the previous FLE.
102 * We can have a better approach to use the inline Mbuf
104 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
106 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
109 memset(fle, 0, FLE_POOL_BUF_SIZE);
110 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
111 DPAA2_FLE_SAVE_CTXT(fle, priv);
114 if (likely(bpid < MAX_BPID)) {
115 DPAA2_SET_FD_BPID(fd, bpid);
116 DPAA2_SET_FLE_BPID(fle, bpid);
117 DPAA2_SET_FLE_BPID(fle + 1, bpid);
118 DPAA2_SET_FLE_BPID(sge, bpid);
119 DPAA2_SET_FLE_BPID(sge + 1, bpid);
120 DPAA2_SET_FLE_BPID(sge + 2, bpid);
121 DPAA2_SET_FLE_BPID(sge + 3, bpid);
123 DPAA2_SET_FD_IVP(fd);
124 DPAA2_SET_FLE_IVP(fle);
125 DPAA2_SET_FLE_IVP((fle + 1));
126 DPAA2_SET_FLE_IVP(sge);
127 DPAA2_SET_FLE_IVP((sge + 1));
128 DPAA2_SET_FLE_IVP((sge + 2));
129 DPAA2_SET_FLE_IVP((sge + 3));
132 /* Save the shared descriptor */
133 flc = &priv->flc_desc[0].flc;
134 /* Configure FD as a FRAME LIST */
135 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
136 DPAA2_SET_FD_COMPOUND_FMT(fd);
137 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
140 "iv-len=%d data_off: 0x%x\n",
141 sym_op->aead.data.offset,
142 sym_op->aead.data.length,
143 sym_op->aead.digest.length,
145 sym_op->m_src->data_off);
147 /* Configure Output FLE with Scatter/Gather Entry */
148 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
150 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
151 fle->length = (sess->dir == DIR_ENC) ?
152 (sym_op->aead.data.length + icv_len + auth_only_len) :
153 sym_op->aead.data.length + auth_only_len;
155 DPAA2_SET_FLE_SG_EXT(fle);
157 /* Configure Output SGE for Encap/Decap */
158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
159 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
160 sym_op->m_src->data_off - auth_only_len);
161 sge->length = sym_op->aead.data.length + auth_only_len;
163 if (sess->dir == DIR_ENC) {
165 DPAA2_SET_FLE_ADDR(sge,
166 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
167 sge->length = sess->digest_length;
168 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
169 sess->iv.length + auth_only_len));
171 DPAA2_SET_FLE_FIN(sge);
176 /* Configure Input FLE with Scatter/Gather Entry */
177 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
178 DPAA2_SET_FLE_SG_EXT(fle);
179 DPAA2_SET_FLE_FIN(fle);
180 fle->length = (sess->dir == DIR_ENC) ?
181 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
182 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
183 sess->digest_length);
185 /* Configure Input SGE for Encap/Decap */
186 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
187 sge->length = sess->iv.length;
190 DPAA2_SET_FLE_ADDR(sge,
191 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
192 sge->length = auth_only_len;
193 DPAA2_SET_FLE_BPID(sge, bpid);
197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
198 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
199 sym_op->m_src->data_off);
200 sge->length = sym_op->aead.data.length;
201 if (sess->dir == DIR_DEC) {
203 old_icv = (uint8_t *)(sge + 1);
204 memcpy(old_icv, sym_op->aead.digest.data,
205 sess->digest_length);
206 memset(sym_op->aead.digest.data, 0, sess->digest_length);
207 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
208 sge->length = sess->digest_length;
209 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
210 sess->digest_length +
214 DPAA2_SET_FLE_FIN(sge);
217 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
218 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
225 build_authenc_fd(dpaa2_sec_session *sess,
226 struct rte_crypto_op *op,
227 struct qbman_fd *fd, uint16_t bpid)
229 struct rte_crypto_sym_op *sym_op = op->sym;
230 struct ctxt_priv *priv = sess->ctxt;
231 struct qbman_fle *fle, *sge;
232 struct sec_flow_context *flc;
233 uint32_t auth_only_len = sym_op->auth.data.length -
234 sym_op->cipher.data.length;
235 int icv_len = sess->digest_length, retval;
237 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
240 PMD_INIT_FUNC_TRACE();
242 /* we are using the first FLE entry to store Mbuf.
243 * Currently we donot know which FLE has the mbuf stored.
244 * So while retreiving we can go back 1 FLE from the FD -ADDR
245 * to get the MBUF Addr from the previous FLE.
246 * We can have a better approach to use the inline Mbuf
248 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
250 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
253 memset(fle, 0, FLE_POOL_BUF_SIZE);
254 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
255 DPAA2_FLE_SAVE_CTXT(fle, priv);
258 if (likely(bpid < MAX_BPID)) {
259 DPAA2_SET_FD_BPID(fd, bpid);
260 DPAA2_SET_FLE_BPID(fle, bpid);
261 DPAA2_SET_FLE_BPID(fle + 1, bpid);
262 DPAA2_SET_FLE_BPID(sge, bpid);
263 DPAA2_SET_FLE_BPID(sge + 1, bpid);
264 DPAA2_SET_FLE_BPID(sge + 2, bpid);
265 DPAA2_SET_FLE_BPID(sge + 3, bpid);
267 DPAA2_SET_FD_IVP(fd);
268 DPAA2_SET_FLE_IVP(fle);
269 DPAA2_SET_FLE_IVP((fle + 1));
270 DPAA2_SET_FLE_IVP(sge);
271 DPAA2_SET_FLE_IVP((sge + 1));
272 DPAA2_SET_FLE_IVP((sge + 2));
273 DPAA2_SET_FLE_IVP((sge + 3));
276 /* Save the shared descriptor */
277 flc = &priv->flc_desc[0].flc;
278 /* Configure FD as a FRAME LIST */
279 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
280 DPAA2_SET_FD_COMPOUND_FMT(fd);
281 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
283 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
284 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
285 sym_op->auth.data.offset,
286 sym_op->auth.data.length,
288 sym_op->cipher.data.offset,
289 sym_op->cipher.data.length,
291 sym_op->m_src->data_off);
293 /* Configure Output FLE with Scatter/Gather Entry */
294 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
296 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
297 fle->length = (sess->dir == DIR_ENC) ?
298 (sym_op->cipher.data.length + icv_len) :
299 sym_op->cipher.data.length;
301 DPAA2_SET_FLE_SG_EXT(fle);
303 /* Configure Output SGE for Encap/Decap */
304 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
305 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
306 sym_op->m_src->data_off);
307 sge->length = sym_op->cipher.data.length;
309 if (sess->dir == DIR_ENC) {
311 DPAA2_SET_FLE_ADDR(sge,
312 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
313 sge->length = sess->digest_length;
314 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
317 DPAA2_SET_FLE_FIN(sge);
322 /* Configure Input FLE with Scatter/Gather Entry */
323 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
324 DPAA2_SET_FLE_SG_EXT(fle);
325 DPAA2_SET_FLE_FIN(fle);
326 fle->length = (sess->dir == DIR_ENC) ?
327 (sym_op->auth.data.length + sess->iv.length) :
328 (sym_op->auth.data.length + sess->iv.length +
329 sess->digest_length);
331 /* Configure Input SGE for Encap/Decap */
332 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
333 sge->length = sess->iv.length;
336 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
337 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
338 sym_op->m_src->data_off);
339 sge->length = sym_op->auth.data.length;
340 if (sess->dir == DIR_DEC) {
342 old_icv = (uint8_t *)(sge + 1);
343 memcpy(old_icv, sym_op->auth.digest.data,
344 sess->digest_length);
345 memset(sym_op->auth.digest.data, 0, sess->digest_length);
346 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
347 sge->length = sess->digest_length;
348 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
349 sess->digest_length +
352 DPAA2_SET_FLE_FIN(sge);
354 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
355 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
361 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
362 struct qbman_fd *fd, uint16_t bpid)
364 struct rte_crypto_sym_op *sym_op = op->sym;
365 struct qbman_fle *fle, *sge;
366 struct sec_flow_context *flc;
367 struct ctxt_priv *priv = sess->ctxt;
371 PMD_INIT_FUNC_TRACE();
373 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
375 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
378 memset(fle, 0, FLE_POOL_BUF_SIZE);
379 /* TODO we are using the first FLE entry to store Mbuf.
380 * Currently we donot know which FLE has the mbuf stored.
381 * So while retreiving we can go back 1 FLE from the FD -ADDR
382 * to get the MBUF Addr from the previous FLE.
383 * We can have a better approach to use the inline Mbuf
385 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
386 DPAA2_FLE_SAVE_CTXT(fle, priv);
389 if (likely(bpid < MAX_BPID)) {
390 DPAA2_SET_FD_BPID(fd, bpid);
391 DPAA2_SET_FLE_BPID(fle, bpid);
392 DPAA2_SET_FLE_BPID(fle + 1, bpid);
394 DPAA2_SET_FD_IVP(fd);
395 DPAA2_SET_FLE_IVP(fle);
396 DPAA2_SET_FLE_IVP((fle + 1));
398 flc = &priv->flc_desc[DESC_INITFINAL].flc;
399 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
401 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
402 fle->length = sess->digest_length;
404 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
405 DPAA2_SET_FD_COMPOUND_FMT(fd);
408 if (sess->dir == DIR_ENC) {
409 DPAA2_SET_FLE_ADDR(fle,
410 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
411 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
412 sym_op->m_src->data_off);
413 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
414 fle->length = sym_op->auth.data.length;
417 DPAA2_SET_FLE_SG_EXT(fle);
418 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
420 if (likely(bpid < MAX_BPID)) {
421 DPAA2_SET_FLE_BPID(sge, bpid);
422 DPAA2_SET_FLE_BPID(sge + 1, bpid);
424 DPAA2_SET_FLE_IVP(sge);
425 DPAA2_SET_FLE_IVP((sge + 1));
427 DPAA2_SET_FLE_ADDR(sge,
428 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
429 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
430 sym_op->m_src->data_off);
432 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
433 sess->digest_length);
434 sge->length = sym_op->auth.data.length;
436 old_digest = (uint8_t *)(sge + 1);
437 rte_memcpy(old_digest, sym_op->auth.digest.data,
438 sess->digest_length);
439 memset(sym_op->auth.digest.data, 0, sess->digest_length);
440 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
441 sge->length = sess->digest_length;
442 fle->length = sym_op->auth.data.length +
444 DPAA2_SET_FLE_FIN(sge);
446 DPAA2_SET_FLE_FIN(fle);
452 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
453 struct qbman_fd *fd, uint16_t bpid)
455 struct rte_crypto_sym_op *sym_op = op->sym;
456 struct qbman_fle *fle, *sge;
458 struct sec_flow_context *flc;
459 struct ctxt_priv *priv = sess->ctxt;
460 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
463 PMD_INIT_FUNC_TRACE();
465 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
467 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
470 memset(fle, 0, FLE_POOL_BUF_SIZE);
471 /* TODO we are using the first FLE entry to store Mbuf.
472 * Currently we donot know which FLE has the mbuf stored.
473 * So while retreiving we can go back 1 FLE from the FD -ADDR
474 * to get the MBUF Addr from the previous FLE.
475 * We can have a better approach to use the inline Mbuf
477 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
478 DPAA2_FLE_SAVE_CTXT(fle, priv);
482 if (likely(bpid < MAX_BPID)) {
483 DPAA2_SET_FD_BPID(fd, bpid);
484 DPAA2_SET_FLE_BPID(fle, bpid);
485 DPAA2_SET_FLE_BPID(fle + 1, bpid);
486 DPAA2_SET_FLE_BPID(sge, bpid);
487 DPAA2_SET_FLE_BPID(sge + 1, bpid);
489 DPAA2_SET_FD_IVP(fd);
490 DPAA2_SET_FLE_IVP(fle);
491 DPAA2_SET_FLE_IVP((fle + 1));
492 DPAA2_SET_FLE_IVP(sge);
493 DPAA2_SET_FLE_IVP((sge + 1));
496 flc = &priv->flc_desc[0].flc;
497 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
498 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
500 DPAA2_SET_FD_COMPOUND_FMT(fd);
501 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
503 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
504 sym_op->cipher.data.offset,
505 sym_op->cipher.data.length,
507 sym_op->m_src->data_off);
509 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
510 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
511 sym_op->m_src->data_off);
513 fle->length = sym_op->cipher.data.length + sess->iv.length;
515 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
516 flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
520 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
521 fle->length = sym_op->cipher.data.length + sess->iv.length;
523 DPAA2_SET_FLE_SG_EXT(fle);
525 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
526 sge->length = sess->iv.length;
529 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
530 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
531 sym_op->m_src->data_off);
533 sge->length = sym_op->cipher.data.length;
534 DPAA2_SET_FLE_FIN(sge);
535 DPAA2_SET_FLE_FIN(fle);
537 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
538 (void *)DPAA2_GET_FD_ADDR(fd),
539 DPAA2_GET_FD_BPID(fd),
540 rte_dpaa2_bpid_info[bpid].meta_data_size,
541 DPAA2_GET_FD_OFFSET(fd),
542 DPAA2_GET_FD_LEN(fd));
548 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
549 struct qbman_fd *fd, uint16_t bpid)
553 PMD_INIT_FUNC_TRACE();
555 switch (sess->ctxt_type) {
556 case DPAA2_SEC_CIPHER:
557 ret = build_cipher_fd(sess, op, fd, bpid);
560 ret = build_auth_fd(sess, op, fd, bpid);
563 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
565 case DPAA2_SEC_CIPHER_HASH:
566 ret = build_authenc_fd(sess, op, fd, bpid);
568 case DPAA2_SEC_HASH_CIPHER:
570 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
576 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
579 /* Function to transmit the frames to given device and VQ*/
582 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
583 uint32_t frames_to_send;
584 struct qbman_eq_desc eqdesc;
585 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
586 struct qbman_swp *swp;
588 /*todo - need to support multiple buffer pools */
590 struct rte_mempool *mb_pool;
591 dpaa2_sec_session *sess;
593 if (unlikely(nb_ops == 0))
596 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
597 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
600 /*Prepare enqueue descriptor*/
601 qbman_eq_desc_clear(&eqdesc);
602 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
603 qbman_eq_desc_set_response(&eqdesc, 0, 0);
604 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
606 if (!DPAA2_PER_LCORE_SEC_DPIO) {
607 ret = dpaa2_affine_qbman_swp_sec();
609 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
613 swp = DPAA2_PER_LCORE_SEC_PORTAL;
616 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
618 for (loop = 0; loop < frames_to_send; loop++) {
619 /*Clear the unused FD fields before sending*/
620 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
621 sess = (dpaa2_sec_session *)
622 get_session_private_data(
623 (*ops)->sym->session,
624 cryptodev_driver_id);
625 mb_pool = (*ops)->sym->m_src->pool;
626 bpid = mempool_to_bpid(mb_pool);
627 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
629 PMD_DRV_LOG(ERR, "error: Improper packet"
630 " contents for crypto operation\n");
636 while (loop < frames_to_send) {
637 loop += qbman_swp_send_multiple(swp, &eqdesc,
639 frames_to_send - loop);
642 num_tx += frames_to_send;
643 nb_ops -= frames_to_send;
646 dpaa2_qp->tx_vq.tx_pkts += num_tx;
647 dpaa2_qp->tx_vq.err_pkts += nb_ops;
651 static inline struct rte_crypto_op *
652 sec_fd_to_mbuf(const struct qbman_fd *fd)
654 struct qbman_fle *fle;
655 struct rte_crypto_op *op;
656 struct ctxt_priv *priv;
658 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
660 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
661 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
663 /* we are using the first FLE entry to store Mbuf.
664 * Currently we donot know which FLE has the mbuf stored.
665 * So while retreiving we can go back 1 FLE from the FD -ADDR
666 * to get the MBUF Addr from the previous FLE.
667 * We can have a better approach to use the inline Mbuf
670 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
671 /* TODO complete it. */
672 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
675 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
676 DPAA2_GET_FLE_ADDR((fle - 1)));
679 rte_prefetch0(op->sym->m_src);
681 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
682 (void *)op->sym->m_src, op->sym->m_src->buf_addr);
684 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
685 (void *)DPAA2_GET_FD_ADDR(fd),
686 DPAA2_GET_FD_BPID(fd),
687 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
688 DPAA2_GET_FD_OFFSET(fd),
689 DPAA2_GET_FD_LEN(fd));
691 /* free the fle memory */
692 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
693 rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
699 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
702 /* Function is responsible to receive frames for a given device and VQ*/
703 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
704 struct qbman_result *dq_storage;
705 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
707 uint8_t is_last = 0, status;
708 struct qbman_swp *swp;
709 const struct qbman_fd *fd;
710 struct qbman_pull_desc pulldesc;
712 if (!DPAA2_PER_LCORE_SEC_DPIO) {
713 ret = dpaa2_affine_qbman_swp_sec();
715 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
719 swp = DPAA2_PER_LCORE_SEC_PORTAL;
720 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
722 qbman_pull_desc_clear(&pulldesc);
723 qbman_pull_desc_set_numframes(&pulldesc,
724 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
725 DPAA2_DQRR_RING_SIZE : nb_ops);
726 qbman_pull_desc_set_fq(&pulldesc, fqid);
727 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
728 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
731 /*Issue a volatile dequeue command. */
733 if (qbman_swp_pull(swp, &pulldesc)) {
734 RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
736 /* Portal was busy, try again */
742 /* Receive the packets till Last Dequeue entry is found with
743 * respect to the above issues PULL command.
746 /* Check if the previous issued command is completed.
747 * Also seems like the SWP is shared between the Ethernet Driver
748 * and the SEC driver.
750 while (!qbman_check_command_complete(swp, dq_storage))
753 /* Loop until the dq_storage is updated with
756 while (!qbman_result_has_new_result(swp, dq_storage))
758 /* Check whether Last Pull command is Expired and
759 * setting Condition for Loop termination
761 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
763 /* Check for valid frame. */
764 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
766 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
767 PMD_RX_LOG(DEBUG, "No frame is delivered");
772 fd = qbman_result_DQ_fd(dq_storage);
773 ops[num_rx] = sec_fd_to_mbuf(fd);
775 if (unlikely(fd->simple.frc)) {
776 /* TODO Parse SEC errors */
777 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
779 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
781 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
786 } /* End of Packet Rx loop */
788 dpaa2_qp->rx_vq.rx_pkts += num_rx;
790 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
791 /*Return the total number of packets received to DPAA2 app*/
795 /** Release queue pair */
797 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
799 struct dpaa2_sec_qp *qp =
800 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
802 PMD_INIT_FUNC_TRACE();
804 if (qp->rx_vq.q_storage) {
805 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
806 rte_free(qp->rx_vq.q_storage);
810 dev->data->queue_pairs[queue_pair_id] = NULL;
815 /** Setup a queue pair */
817 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
818 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
819 __rte_unused int socket_id)
821 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
822 struct dpaa2_sec_qp *qp;
823 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
824 struct dpseci_rx_queue_cfg cfg;
827 PMD_INIT_FUNC_TRACE();
829 /* If qp is already in use free ring memory and qp metadata. */
830 if (dev->data->queue_pairs[qp_id] != NULL) {
831 PMD_DRV_LOG(INFO, "QP already setup");
835 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
836 dev, qp_id, qp_conf);
838 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
840 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
841 RTE_CACHE_LINE_SIZE);
843 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
849 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
850 sizeof(struct queue_storage_info_t),
851 RTE_CACHE_LINE_SIZE);
852 if (!qp->rx_vq.q_storage) {
853 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
856 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
858 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
859 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
863 dev->data->queue_pairs[qp_id] = qp;
865 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
866 cfg.user_ctx = (uint64_t)(&qp->rx_vq);
867 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
872 /** Start queue pair */
874 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
875 __rte_unused uint16_t queue_pair_id)
877 PMD_INIT_FUNC_TRACE();
882 /** Stop queue pair */
884 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
885 __rte_unused uint16_t queue_pair_id)
887 PMD_INIT_FUNC_TRACE();
892 /** Return the number of allocated queue pairs */
894 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
896 PMD_INIT_FUNC_TRACE();
898 return dev->data->nb_queue_pairs;
901 /** Returns the size of the aesni gcm session structure */
903 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
905 PMD_INIT_FUNC_TRACE();
907 return sizeof(dpaa2_sec_session);
911 dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
912 void *sess __rte_unused)
914 PMD_INIT_FUNC_TRACE();
918 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
919 struct rte_crypto_sym_xform *xform,
920 dpaa2_sec_session *session)
922 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
923 struct alginfo cipherdata;
925 struct ctxt_priv *priv;
926 struct sec_flow_context *flc;
928 PMD_INIT_FUNC_TRACE();
930 /* For SEC CIPHER only one descriptor is required. */
931 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
932 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
933 RTE_CACHE_LINE_SIZE);
935 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
939 priv->fle_pool = dev_priv->fle_pool;
941 flc = &priv->flc_desc[0].flc;
943 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
944 RTE_CACHE_LINE_SIZE);
945 if (session->cipher_key.data == NULL) {
946 RTE_LOG(ERR, PMD, "No Memory for cipher key");
950 session->cipher_key.length = xform->cipher.key.length;
952 memcpy(session->cipher_key.data, xform->cipher.key.data,
953 xform->cipher.key.length);
954 cipherdata.key = (uint64_t)session->cipher_key.data;
955 cipherdata.keylen = session->cipher_key.length;
956 cipherdata.key_enc_flags = 0;
957 cipherdata.key_type = RTA_DATA_IMM;
959 /* Set IV parameters */
960 session->iv.offset = xform->cipher.iv.offset;
961 session->iv.length = xform->cipher.iv.length;
963 switch (xform->cipher.algo) {
964 case RTE_CRYPTO_CIPHER_AES_CBC:
965 cipherdata.algtype = OP_ALG_ALGSEL_AES;
966 cipherdata.algmode = OP_ALG_AAI_CBC;
967 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
969 case RTE_CRYPTO_CIPHER_3DES_CBC:
970 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
971 cipherdata.algmode = OP_ALG_AAI_CBC;
972 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
974 case RTE_CRYPTO_CIPHER_AES_CTR:
975 cipherdata.algtype = OP_ALG_ALGSEL_AES;
976 cipherdata.algmode = OP_ALG_AAI_CTR;
977 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
979 case RTE_CRYPTO_CIPHER_3DES_CTR:
980 case RTE_CRYPTO_CIPHER_AES_ECB:
981 case RTE_CRYPTO_CIPHER_3DES_ECB:
982 case RTE_CRYPTO_CIPHER_AES_XTS:
983 case RTE_CRYPTO_CIPHER_AES_F8:
984 case RTE_CRYPTO_CIPHER_ARC4:
985 case RTE_CRYPTO_CIPHER_KASUMI_F8:
986 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
987 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
988 case RTE_CRYPTO_CIPHER_NULL:
989 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
993 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
997 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1000 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1001 &cipherdata, NULL, session->iv.length,
1004 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1009 flc->mode_bits = 0x8000;
1011 flc->word1_sdl = (uint8_t)bufsize;
1012 flc->word2_rflc_31_0 = lower_32_bits(
1013 (uint64_t)&(((struct dpaa2_sec_qp *)
1014 dev->data->queue_pairs[0])->rx_vq));
1015 flc->word3_rflc_63_32 = upper_32_bits(
1016 (uint64_t)&(((struct dpaa2_sec_qp *)
1017 dev->data->queue_pairs[0])->rx_vq));
1018 session->ctxt = priv;
1020 for (i = 0; i < bufsize; i++)
1021 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1022 i, priv->flc_desc[0].desc[i]);
1027 rte_free(session->cipher_key.data);
1033 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1034 struct rte_crypto_sym_xform *xform,
1035 dpaa2_sec_session *session)
1037 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1038 struct alginfo authdata;
1039 unsigned int bufsize, i;
1040 struct ctxt_priv *priv;
1041 struct sec_flow_context *flc;
1043 PMD_INIT_FUNC_TRACE();
1045 /* For SEC AUTH three descriptors are required for various stages */
1046 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1047 sizeof(struct ctxt_priv) + 3 *
1048 sizeof(struct sec_flc_desc),
1049 RTE_CACHE_LINE_SIZE);
1051 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1055 priv->fle_pool = dev_priv->fle_pool;
1056 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1058 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1059 RTE_CACHE_LINE_SIZE);
1060 if (session->auth_key.data == NULL) {
1061 RTE_LOG(ERR, PMD, "No Memory for auth key");
1065 session->auth_key.length = xform->auth.key.length;
1067 memcpy(session->auth_key.data, xform->auth.key.data,
1068 xform->auth.key.length);
1069 authdata.key = (uint64_t)session->auth_key.data;
1070 authdata.keylen = session->auth_key.length;
1071 authdata.key_enc_flags = 0;
1072 authdata.key_type = RTA_DATA_IMM;
1074 session->digest_length = xform->auth.digest_length;
1076 switch (xform->auth.algo) {
1077 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1078 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1079 authdata.algmode = OP_ALG_AAI_HMAC;
1080 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1082 case RTE_CRYPTO_AUTH_MD5_HMAC:
1083 authdata.algtype = OP_ALG_ALGSEL_MD5;
1084 authdata.algmode = OP_ALG_AAI_HMAC;
1085 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1087 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1088 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1089 authdata.algmode = OP_ALG_AAI_HMAC;
1090 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1092 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1093 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1094 authdata.algmode = OP_ALG_AAI_HMAC;
1095 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1097 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1098 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1099 authdata.algmode = OP_ALG_AAI_HMAC;
1100 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1102 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1103 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1104 authdata.algmode = OP_ALG_AAI_HMAC;
1105 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1107 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1108 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1109 case RTE_CRYPTO_AUTH_NULL:
1110 case RTE_CRYPTO_AUTH_SHA1:
1111 case RTE_CRYPTO_AUTH_SHA256:
1112 case RTE_CRYPTO_AUTH_SHA512:
1113 case RTE_CRYPTO_AUTH_SHA224:
1114 case RTE_CRYPTO_AUTH_SHA384:
1115 case RTE_CRYPTO_AUTH_MD5:
1116 case RTE_CRYPTO_AUTH_AES_GMAC:
1117 case RTE_CRYPTO_AUTH_KASUMI_F9:
1118 case RTE_CRYPTO_AUTH_AES_CMAC:
1119 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1120 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1121 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1125 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1129 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1132 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1133 1, 0, &authdata, !session->dir,
1134 session->digest_length);
1136 flc->word1_sdl = (uint8_t)bufsize;
1137 flc->word2_rflc_31_0 = lower_32_bits(
1138 (uint64_t)&(((struct dpaa2_sec_qp *)
1139 dev->data->queue_pairs[0])->rx_vq));
1140 flc->word3_rflc_63_32 = upper_32_bits(
1141 (uint64_t)&(((struct dpaa2_sec_qp *)
1142 dev->data->queue_pairs[0])->rx_vq));
1143 session->ctxt = priv;
1144 for (i = 0; i < bufsize; i++)
1145 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1146 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1152 rte_free(session->auth_key.data);
1158 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1159 struct rte_crypto_sym_xform *xform,
1160 dpaa2_sec_session *session)
1162 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1163 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1164 struct alginfo aeaddata;
1165 unsigned int bufsize, i;
1166 struct ctxt_priv *priv;
1167 struct sec_flow_context *flc;
1168 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1171 PMD_INIT_FUNC_TRACE();
1173 /* Set IV parameters */
1174 session->iv.offset = aead_xform->iv.offset;
1175 session->iv.length = aead_xform->iv.length;
1176 session->ctxt_type = DPAA2_SEC_AEAD;
1178 /* For SEC AEAD only one descriptor is required */
1179 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1180 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1181 RTE_CACHE_LINE_SIZE);
1183 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1187 priv->fle_pool = dev_priv->fle_pool;
1188 flc = &priv->flc_desc[0].flc;
1190 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1191 RTE_CACHE_LINE_SIZE);
1192 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1193 RTE_LOG(ERR, PMD, "No Memory for aead key");
1197 memcpy(session->aead_key.data, aead_xform->key.data,
1198 aead_xform->key.length);
1200 session->digest_length = aead_xform->digest_length;
1201 session->aead_key.length = aead_xform->key.length;
1202 ctxt->auth_only_len = aead_xform->add_auth_data_length;
1204 aeaddata.key = (uint64_t)session->aead_key.data;
1205 aeaddata.keylen = session->aead_key.length;
1206 aeaddata.key_enc_flags = 0;
1207 aeaddata.key_type = RTA_DATA_IMM;
1209 switch (aead_xform->algo) {
1210 case RTE_CRYPTO_AEAD_AES_GCM:
1211 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1212 aeaddata.algmode = OP_ALG_AAI_GCM;
1213 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
1215 case RTE_CRYPTO_AEAD_AES_CCM:
1216 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
1220 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1224 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1227 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1228 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1230 (unsigned int *)priv->flc_desc[0].desc,
1231 &priv->flc_desc[0].desc[1], 1);
1234 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1237 if (priv->flc_desc[0].desc[1] & 1) {
1238 aeaddata.key_type = RTA_DATA_IMM;
1240 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1241 aeaddata.key_type = RTA_DATA_PTR;
1243 priv->flc_desc[0].desc[0] = 0;
1244 priv->flc_desc[0].desc[1] = 0;
1246 if (session->dir == DIR_ENC)
1247 bufsize = cnstr_shdsc_gcm_encap(
1248 priv->flc_desc[0].desc, 1, 0,
1249 &aeaddata, session->iv.length,
1250 session->digest_length);
1252 bufsize = cnstr_shdsc_gcm_decap(
1253 priv->flc_desc[0].desc, 1, 0,
1254 &aeaddata, session->iv.length,
1255 session->digest_length);
1256 flc->word1_sdl = (uint8_t)bufsize;
1257 flc->word2_rflc_31_0 = lower_32_bits(
1258 (uint64_t)&(((struct dpaa2_sec_qp *)
1259 dev->data->queue_pairs[0])->rx_vq));
1260 flc->word3_rflc_63_32 = upper_32_bits(
1261 (uint64_t)&(((struct dpaa2_sec_qp *)
1262 dev->data->queue_pairs[0])->rx_vq));
1263 session->ctxt = priv;
1264 for (i = 0; i < bufsize; i++)
1265 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1266 i, priv->flc_desc[0].desc[i]);
1271 rte_free(session->aead_key.data);
1278 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1279 struct rte_crypto_sym_xform *xform,
1280 dpaa2_sec_session *session)
1282 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1283 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1284 struct alginfo authdata, cipherdata;
1285 unsigned int bufsize, i;
1286 struct ctxt_priv *priv;
1287 struct sec_flow_context *flc;
1288 struct rte_crypto_cipher_xform *cipher_xform;
1289 struct rte_crypto_auth_xform *auth_xform;
1292 PMD_INIT_FUNC_TRACE();
1294 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1295 cipher_xform = &xform->cipher;
1296 auth_xform = &xform->next->auth;
1297 session->ctxt_type =
1298 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1299 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1301 cipher_xform = &xform->next->cipher;
1302 auth_xform = &xform->auth;
1303 session->ctxt_type =
1304 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1305 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1308 /* Set IV parameters */
1309 session->iv.offset = cipher_xform->iv.offset;
1310 session->iv.length = cipher_xform->iv.length;
1312 /* For SEC AEAD only one descriptor is required */
1313 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1314 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1315 RTE_CACHE_LINE_SIZE);
1317 RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1321 priv->fle_pool = dev_priv->fle_pool;
1322 flc = &priv->flc_desc[0].flc;
1324 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1325 RTE_CACHE_LINE_SIZE);
1326 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1327 RTE_LOG(ERR, PMD, "No Memory for cipher key");
1331 session->cipher_key.length = cipher_xform->key.length;
1332 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1333 RTE_CACHE_LINE_SIZE);
1334 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1335 RTE_LOG(ERR, PMD, "No Memory for auth key");
1336 rte_free(session->cipher_key.data);
1340 session->auth_key.length = auth_xform->key.length;
1341 memcpy(session->cipher_key.data, cipher_xform->key.data,
1342 cipher_xform->key.length);
1343 memcpy(session->auth_key.data, auth_xform->key.data,
1344 auth_xform->key.length);
1346 authdata.key = (uint64_t)session->auth_key.data;
1347 authdata.keylen = session->auth_key.length;
1348 authdata.key_enc_flags = 0;
1349 authdata.key_type = RTA_DATA_IMM;
1351 session->digest_length = auth_xform->digest_length;
1353 switch (auth_xform->algo) {
1354 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1355 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1356 authdata.algmode = OP_ALG_AAI_HMAC;
1357 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1359 case RTE_CRYPTO_AUTH_MD5_HMAC:
1360 authdata.algtype = OP_ALG_ALGSEL_MD5;
1361 authdata.algmode = OP_ALG_AAI_HMAC;
1362 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1364 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1365 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1366 authdata.algmode = OP_ALG_AAI_HMAC;
1367 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1369 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1370 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1371 authdata.algmode = OP_ALG_AAI_HMAC;
1372 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1374 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1375 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1376 authdata.algmode = OP_ALG_AAI_HMAC;
1377 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1379 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1380 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1381 authdata.algmode = OP_ALG_AAI_HMAC;
1382 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1384 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1385 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1386 case RTE_CRYPTO_AUTH_NULL:
1387 case RTE_CRYPTO_AUTH_SHA1:
1388 case RTE_CRYPTO_AUTH_SHA256:
1389 case RTE_CRYPTO_AUTH_SHA512:
1390 case RTE_CRYPTO_AUTH_SHA224:
1391 case RTE_CRYPTO_AUTH_SHA384:
1392 case RTE_CRYPTO_AUTH_MD5:
1393 case RTE_CRYPTO_AUTH_AES_GMAC:
1394 case RTE_CRYPTO_AUTH_KASUMI_F9:
1395 case RTE_CRYPTO_AUTH_AES_CMAC:
1396 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1397 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1398 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1402 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1406 cipherdata.key = (uint64_t)session->cipher_key.data;
1407 cipherdata.keylen = session->cipher_key.length;
1408 cipherdata.key_enc_flags = 0;
1409 cipherdata.key_type = RTA_DATA_IMM;
1411 switch (cipher_xform->algo) {
1412 case RTE_CRYPTO_CIPHER_AES_CBC:
1413 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1414 cipherdata.algmode = OP_ALG_AAI_CBC;
1415 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1417 case RTE_CRYPTO_CIPHER_3DES_CBC:
1418 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1419 cipherdata.algmode = OP_ALG_AAI_CBC;
1420 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1422 case RTE_CRYPTO_CIPHER_AES_CTR:
1423 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1424 cipherdata.algmode = OP_ALG_AAI_CTR;
1425 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1427 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1428 case RTE_CRYPTO_CIPHER_NULL:
1429 case RTE_CRYPTO_CIPHER_3DES_ECB:
1430 case RTE_CRYPTO_CIPHER_AES_ECB:
1431 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1432 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
1433 cipher_xform->algo);
1436 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1437 cipher_xform->algo);
1440 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1443 priv->flc_desc[0].desc[0] = cipherdata.keylen;
1444 priv->flc_desc[0].desc[1] = authdata.keylen;
1445 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1447 (unsigned int *)priv->flc_desc[0].desc,
1448 &priv->flc_desc[0].desc[2], 2);
1451 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1454 if (priv->flc_desc[0].desc[2] & 1) {
1455 cipherdata.key_type = RTA_DATA_IMM;
1457 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1458 cipherdata.key_type = RTA_DATA_PTR;
1460 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1461 authdata.key_type = RTA_DATA_IMM;
1463 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1464 authdata.key_type = RTA_DATA_PTR;
1466 priv->flc_desc[0].desc[0] = 0;
1467 priv->flc_desc[0].desc[1] = 0;
1468 priv->flc_desc[0].desc[2] = 0;
1470 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1471 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1472 0, &cipherdata, &authdata,
1474 ctxt->auth_only_len,
1475 session->digest_length,
1478 RTE_LOG(ERR, PMD, "Hash before cipher not supported");
1482 flc->word1_sdl = (uint8_t)bufsize;
1483 flc->word2_rflc_31_0 = lower_32_bits(
1484 (uint64_t)&(((struct dpaa2_sec_qp *)
1485 dev->data->queue_pairs[0])->rx_vq));
1486 flc->word3_rflc_63_32 = upper_32_bits(
1487 (uint64_t)&(((struct dpaa2_sec_qp *)
1488 dev->data->queue_pairs[0])->rx_vq));
1489 session->ctxt = priv;
1490 for (i = 0; i < bufsize; i++)
1491 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1492 i, priv->flc_desc[0].desc[i]);
1497 rte_free(session->cipher_key.data);
1498 rte_free(session->auth_key.data);
1504 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
1505 struct rte_crypto_sym_xform *xform, void *sess)
1507 dpaa2_sec_session *session = sess;
1509 PMD_INIT_FUNC_TRACE();
1511 if (unlikely(sess == NULL)) {
1512 RTE_LOG(ERR, PMD, "invalid session struct");
1516 /* Default IV length = 0 */
1517 session->iv.length = 0;
1520 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1521 session->ctxt_type = DPAA2_SEC_CIPHER;
1522 dpaa2_sec_cipher_init(dev, xform, session);
1524 /* Authentication Only */
1525 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1526 xform->next == NULL) {
1527 session->ctxt_type = DPAA2_SEC_AUTH;
1528 dpaa2_sec_auth_init(dev, xform, session);
1530 /* Cipher then Authenticate */
1531 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1532 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1533 session->ext_params.aead_ctxt.auth_cipher_text = true;
1534 dpaa2_sec_aead_chain_init(dev, xform, session);
1536 /* Authenticate then Cipher */
1537 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1538 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1539 session->ext_params.aead_ctxt.auth_cipher_text = false;
1540 dpaa2_sec_aead_chain_init(dev, xform, session);
1542 /* AEAD operation for AES-GCM kind of Algorithms */
1543 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1544 xform->next == NULL) {
1545 dpaa2_sec_aead_init(dev, xform, session);
1548 RTE_LOG(ERR, PMD, "Invalid crypto type");
1556 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1557 struct rte_crypto_sym_xform *xform,
1558 struct rte_cryptodev_sym_session *sess,
1559 struct rte_mempool *mempool)
1561 void *sess_private_data;
1563 if (rte_mempool_get(mempool, &sess_private_data)) {
1565 "Couldn't get object from session mempool");
1569 if (dpaa2_sec_set_session_parameters(dev, xform, sess_private_data) != 0) {
1570 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
1571 "session parameters");
1573 /* Return session to mempool */
1574 rte_mempool_put(mempool, sess_private_data);
1578 set_session_private_data(sess, dev->driver_id,
1584 /** Clear the memory of session so it doesn't leave key material behind */
1586 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
1587 struct rte_cryptodev_sym_session *sess)
1589 PMD_INIT_FUNC_TRACE();
1590 uint8_t index = dev->driver_id;
1591 void *sess_priv = get_session_private_data(sess, index);
1592 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
1596 rte_free(s->cipher_key.data);
1597 rte_free(s->auth_key.data);
1598 memset(sess, 0, sizeof(dpaa2_sec_session));
1599 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1600 set_session_private_data(sess, index, NULL);
1601 rte_mempool_put(sess_mp, sess_priv);
1606 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1607 struct rte_cryptodev_config *config __rte_unused)
1609 PMD_INIT_FUNC_TRACE();
1615 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1617 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1618 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1619 struct dpseci_attr attr;
1620 struct dpaa2_queue *dpaa2_q;
1621 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1622 dev->data->queue_pairs;
1623 struct dpseci_rx_queue_attr rx_attr;
1624 struct dpseci_tx_queue_attr tx_attr;
1627 PMD_INIT_FUNC_TRACE();
1629 memset(&attr, 0, sizeof(struct dpseci_attr));
1631 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1633 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1635 goto get_attr_failure;
1637 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1640 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1641 goto get_attr_failure;
1643 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1644 dpaa2_q = &qp[i]->rx_vq;
1645 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1647 dpaa2_q->fqid = rx_attr.fqid;
1648 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1650 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1651 dpaa2_q = &qp[i]->tx_vq;
1652 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1654 dpaa2_q->fqid = tx_attr.fqid;
1655 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1660 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1665 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1667 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1668 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1671 PMD_INIT_FUNC_TRACE();
1673 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1675 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1680 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1682 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1689 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1691 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1692 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1695 PMD_INIT_FUNC_TRACE();
1697 /* Function is reverse of dpaa2_sec_dev_init.
1698 * It does the following:
1699 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1700 * 2. Close the DPSECI device
1701 * 3. Free the allocated resources.
1704 /*Close the device at underlying layer*/
1705 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1707 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1708 " error code %d\n", ret);
1712 /*Free the allocated memory for ethernet private data and dpseci*/
1720 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1721 struct rte_cryptodev_info *info)
1723 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1725 PMD_INIT_FUNC_TRACE();
1727 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1728 info->feature_flags = dev->feature_flags;
1729 info->capabilities = dpaa2_sec_capabilities;
1730 info->sym.max_nb_sessions = internals->max_nb_sessions;
1731 info->driver_id = cryptodev_driver_id;
1736 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1737 struct rte_cryptodev_stats *stats)
1739 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1740 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1741 struct dpseci_sec_counters counters = {0};
1742 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1743 dev->data->queue_pairs;
1746 PMD_INIT_FUNC_TRACE();
1747 if (stats == NULL) {
1748 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1751 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1752 if (qp[i] == NULL) {
1753 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1757 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1758 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1759 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1760 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1763 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1766 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1768 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1769 "\n\tNumber of Requests Dequeued = %lu"
1770 "\n\tNumber of Outbound Encrypt Requests = %lu"
1771 "\n\tNumber of Inbound Decrypt Requests = %lu"
1772 "\n\tNumber of Outbound Bytes Encrypted = %lu"
1773 "\n\tNumber of Outbound Bytes Protected = %lu"
1774 "\n\tNumber of Inbound Bytes Decrypted = %lu"
1775 "\n\tNumber of Inbound Bytes Validated = %lu",
1776 counters.dequeued_requests,
1777 counters.ob_enc_requests,
1778 counters.ib_dec_requests,
1779 counters.ob_enc_bytes,
1780 counters.ob_prot_bytes,
1781 counters.ib_dec_bytes,
1782 counters.ib_valid_bytes);
1787 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1790 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1791 (dev->data->queue_pairs);
1793 PMD_INIT_FUNC_TRACE();
1795 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1796 if (qp[i] == NULL) {
1797 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1800 qp[i]->tx_vq.rx_pkts = 0;
1801 qp[i]->tx_vq.tx_pkts = 0;
1802 qp[i]->tx_vq.err_pkts = 0;
1803 qp[i]->rx_vq.rx_pkts = 0;
1804 qp[i]->rx_vq.tx_pkts = 0;
1805 qp[i]->rx_vq.err_pkts = 0;
1809 static struct rte_cryptodev_ops crypto_ops = {
1810 .dev_configure = dpaa2_sec_dev_configure,
1811 .dev_start = dpaa2_sec_dev_start,
1812 .dev_stop = dpaa2_sec_dev_stop,
1813 .dev_close = dpaa2_sec_dev_close,
1814 .dev_infos_get = dpaa2_sec_dev_infos_get,
1815 .stats_get = dpaa2_sec_stats_get,
1816 .stats_reset = dpaa2_sec_stats_reset,
1817 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
1818 .queue_pair_release = dpaa2_sec_queue_pair_release,
1819 .queue_pair_start = dpaa2_sec_queue_pair_start,
1820 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
1821 .queue_pair_count = dpaa2_sec_queue_pair_count,
1822 .session_get_size = dpaa2_sec_session_get_size,
1823 .session_initialize = dpaa2_sec_session_initialize,
1824 .session_configure = dpaa2_sec_session_configure,
1825 .session_clear = dpaa2_sec_session_clear,
1829 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1831 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1833 rte_mempool_free(internals->fle_pool);
1835 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1836 dev->data->name, rte_socket_id());
1842 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1844 struct dpaa2_sec_dev_private *internals;
1845 struct rte_device *dev = cryptodev->device;
1846 struct rte_dpaa2_device *dpaa2_dev;
1847 struct fsl_mc_io *dpseci;
1849 struct dpseci_attr attr;
1853 PMD_INIT_FUNC_TRACE();
1854 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1855 if (dpaa2_dev == NULL) {
1856 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1859 hw_id = dpaa2_dev->object_id;
1861 cryptodev->driver_id = cryptodev_driver_id;
1862 cryptodev->dev_ops = &crypto_ops;
1864 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1865 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1866 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1867 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1868 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1870 internals = cryptodev->data->dev_private;
1871 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1874 * For secondary processes, we don't initialise any further as primary
1875 * has already done this work. Only check we don't need a different
1878 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1879 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1882 /*Open the rte device via MC and save the handle for further use*/
1883 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1884 sizeof(struct fsl_mc_io), 0);
1887 "Error in allocating the memory for dpsec object");
1890 dpseci->regs = rte_mcp_ptr_list[0];
1892 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1894 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1898 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1901 "Cannot get dpsec device attributed: Error = %x",
1905 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1907 internals->max_nb_queue_pairs = attr.num_tx_queues;
1908 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1909 internals->hw = dpseci;
1910 internals->token = token;
1912 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
1913 internals->fle_pool = rte_mempool_create((const char *)str,
1916 FLE_POOL_CACHE_SIZE, 0,
1917 NULL, NULL, NULL, NULL,
1919 if (!internals->fle_pool) {
1920 RTE_LOG(ERR, PMD, "%s create failed", str);
1923 RTE_LOG(INFO, PMD, "%s created: %p\n", str,
1924 internals->fle_pool);
1926 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1930 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1932 /* dpaa2_sec_uninit(crypto_dev_name); */
1937 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
1938 struct rte_dpaa2_device *dpaa2_dev)
1940 struct rte_cryptodev *cryptodev;
1941 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1945 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1947 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1948 if (cryptodev == NULL)
1951 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1952 cryptodev->data->dev_private = rte_zmalloc_socket(
1953 "cryptodev private structure",
1954 sizeof(struct dpaa2_sec_dev_private),
1955 RTE_CACHE_LINE_SIZE,
1958 if (cryptodev->data->dev_private == NULL)
1959 rte_panic("Cannot allocate memzone for private "
1963 dpaa2_dev->cryptodev = cryptodev;
1964 cryptodev->device = &dpaa2_dev->device;
1965 cryptodev->device->driver = &dpaa2_drv->driver;
1967 /* init user callbacks */
1968 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1970 /* Invoke PMD device initialization function */
1971 retval = dpaa2_sec_dev_init(cryptodev);
1975 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1976 rte_free(cryptodev->data->dev_private);
1978 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1984 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1986 struct rte_cryptodev *cryptodev;
1989 cryptodev = dpaa2_dev->cryptodev;
1990 if (cryptodev == NULL)
1993 ret = dpaa2_sec_uninit(cryptodev);
1997 /* free crypto device */
1998 rte_cryptodev_pmd_release_device(cryptodev);
2000 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2001 rte_free(cryptodev->data->dev_private);
2003 cryptodev->device = NULL;
2004 cryptodev->data = NULL;
2009 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2010 .drv_type = DPAA2_MC_DPSECI_DEVID,
2012 .name = "DPAA2 SEC PMD"
2014 .probe = cryptodev_dpaa2_sec_probe,
2015 .remove = cryptodev_dpaa2_sec_remove,
2018 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2019 RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);