4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63 * a pointer to the shared descriptor
65 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID 0x1957
67 #define FSL_DEVICE_ID 0x410
68 #define FSL_SUBSYSTEM_SEC 1
69 #define FSL_MC_DPSECI_DEVID 3
72 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
73 #define FLE_POOL_NUM_BUFS 32000
74 #define FLE_POOL_BUF_SIZE 256
75 #define FLE_POOL_CACHE_SIZE 512
77 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
79 static uint8_t cryptodev_driver_id;
82 build_authenc_gcm_fd(dpaa2_sec_session *sess,
83 struct rte_crypto_op *op,
84 struct qbman_fd *fd, uint16_t bpid)
86 struct rte_crypto_sym_op *sym_op = op->sym;
87 struct ctxt_priv *priv = sess->ctxt;
88 struct qbman_fle *fle, *sge;
89 struct sec_flow_context *flc;
90 uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
91 int icv_len = sess->digest_length, retval;
93 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
96 PMD_INIT_FUNC_TRACE();
98 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
99 * Currently we donot know which FLE has the mbuf stored.
100 * So while retreiving we can go back 1 FLE from the FD -ADDR
101 * to get the MBUF Addr from the previous FLE.
102 * We can have a better approach to use the inline Mbuf
104 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
106 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
109 memset(fle, 0, FLE_POOL_BUF_SIZE);
110 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
111 DPAA2_FLE_SAVE_CTXT(fle, priv);
114 if (likely(bpid < MAX_BPID)) {
115 DPAA2_SET_FD_BPID(fd, bpid);
116 DPAA2_SET_FLE_BPID(fle, bpid);
117 DPAA2_SET_FLE_BPID(fle + 1, bpid);
118 DPAA2_SET_FLE_BPID(sge, bpid);
119 DPAA2_SET_FLE_BPID(sge + 1, bpid);
120 DPAA2_SET_FLE_BPID(sge + 2, bpid);
121 DPAA2_SET_FLE_BPID(sge + 3, bpid);
123 DPAA2_SET_FD_IVP(fd);
124 DPAA2_SET_FLE_IVP(fle);
125 DPAA2_SET_FLE_IVP((fle + 1));
126 DPAA2_SET_FLE_IVP(sge);
127 DPAA2_SET_FLE_IVP((sge + 1));
128 DPAA2_SET_FLE_IVP((sge + 2));
129 DPAA2_SET_FLE_IVP((sge + 3));
132 /* Save the shared descriptor */
133 flc = &priv->flc_desc[0].flc;
134 /* Configure FD as a FRAME LIST */
135 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
136 DPAA2_SET_FD_COMPOUND_FMT(fd);
137 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
140 "iv-len=%d data_off: 0x%x\n",
141 sym_op->aead.data.offset,
142 sym_op->aead.data.length,
143 sym_op->aead.digest.length,
145 sym_op->m_src->data_off);
147 /* Configure Output FLE with Scatter/Gather Entry */
148 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
150 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
151 fle->length = (sess->dir == DIR_ENC) ?
152 (sym_op->aead.data.length + icv_len + auth_only_len) :
153 sym_op->aead.data.length + auth_only_len;
155 DPAA2_SET_FLE_SG_EXT(fle);
157 /* Configure Output SGE for Encap/Decap */
158 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
159 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
160 sym_op->m_src->data_off - auth_only_len);
161 sge->length = sym_op->aead.data.length + auth_only_len;
163 if (sess->dir == DIR_ENC) {
165 DPAA2_SET_FLE_ADDR(sge,
166 DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
167 sge->length = sess->digest_length;
168 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
169 sess->iv.length + auth_only_len));
171 DPAA2_SET_FLE_FIN(sge);
176 /* Configure Input FLE with Scatter/Gather Entry */
177 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
178 DPAA2_SET_FLE_SG_EXT(fle);
179 DPAA2_SET_FLE_FIN(fle);
180 fle->length = (sess->dir == DIR_ENC) ?
181 (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
182 (sym_op->aead.data.length + sess->iv.length + auth_only_len +
183 sess->digest_length);
185 /* Configure Input SGE for Encap/Decap */
186 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
187 sge->length = sess->iv.length;
190 DPAA2_SET_FLE_ADDR(sge,
191 DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
192 sge->length = auth_only_len;
193 DPAA2_SET_FLE_BPID(sge, bpid);
197 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
198 DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
199 sym_op->m_src->data_off);
200 sge->length = sym_op->aead.data.length;
201 if (sess->dir == DIR_DEC) {
203 old_icv = (uint8_t *)(sge + 1);
204 memcpy(old_icv, sym_op->aead.digest.data,
205 sess->digest_length);
206 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
207 sge->length = sess->digest_length;
208 DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
209 sess->digest_length +
213 DPAA2_SET_FLE_FIN(sge);
216 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
217 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
224 build_authenc_fd(dpaa2_sec_session *sess,
225 struct rte_crypto_op *op,
226 struct qbman_fd *fd, uint16_t bpid)
228 struct rte_crypto_sym_op *sym_op = op->sym;
229 struct ctxt_priv *priv = sess->ctxt;
230 struct qbman_fle *fle, *sge;
231 struct sec_flow_context *flc;
232 uint32_t auth_only_len = sym_op->auth.data.length -
233 sym_op->cipher.data.length;
234 int icv_len = sess->digest_length, retval;
236 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
239 PMD_INIT_FUNC_TRACE();
241 /* we are using the first FLE entry to store Mbuf.
242 * Currently we donot know which FLE has the mbuf stored.
243 * So while retreiving we can go back 1 FLE from the FD -ADDR
244 * to get the MBUF Addr from the previous FLE.
245 * We can have a better approach to use the inline Mbuf
247 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
249 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
252 memset(fle, 0, FLE_POOL_BUF_SIZE);
253 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
254 DPAA2_FLE_SAVE_CTXT(fle, priv);
257 if (likely(bpid < MAX_BPID)) {
258 DPAA2_SET_FD_BPID(fd, bpid);
259 DPAA2_SET_FLE_BPID(fle, bpid);
260 DPAA2_SET_FLE_BPID(fle + 1, bpid);
261 DPAA2_SET_FLE_BPID(sge, bpid);
262 DPAA2_SET_FLE_BPID(sge + 1, bpid);
263 DPAA2_SET_FLE_BPID(sge + 2, bpid);
264 DPAA2_SET_FLE_BPID(sge + 3, bpid);
266 DPAA2_SET_FD_IVP(fd);
267 DPAA2_SET_FLE_IVP(fle);
268 DPAA2_SET_FLE_IVP((fle + 1));
269 DPAA2_SET_FLE_IVP(sge);
270 DPAA2_SET_FLE_IVP((sge + 1));
271 DPAA2_SET_FLE_IVP((sge + 2));
272 DPAA2_SET_FLE_IVP((sge + 3));
275 /* Save the shared descriptor */
276 flc = &priv->flc_desc[0].flc;
277 /* Configure FD as a FRAME LIST */
278 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
279 DPAA2_SET_FD_COMPOUND_FMT(fd);
280 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
282 PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
283 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
284 sym_op->auth.data.offset,
285 sym_op->auth.data.length,
287 sym_op->cipher.data.offset,
288 sym_op->cipher.data.length,
290 sym_op->m_src->data_off);
292 /* Configure Output FLE with Scatter/Gather Entry */
293 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
295 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
296 fle->length = (sess->dir == DIR_ENC) ?
297 (sym_op->cipher.data.length + icv_len) :
298 sym_op->cipher.data.length;
300 DPAA2_SET_FLE_SG_EXT(fle);
302 /* Configure Output SGE for Encap/Decap */
303 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
304 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
305 sym_op->m_src->data_off);
306 sge->length = sym_op->cipher.data.length;
308 if (sess->dir == DIR_ENC) {
310 DPAA2_SET_FLE_ADDR(sge,
311 DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
312 sge->length = sess->digest_length;
313 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
316 DPAA2_SET_FLE_FIN(sge);
321 /* Configure Input FLE with Scatter/Gather Entry */
322 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
323 DPAA2_SET_FLE_SG_EXT(fle);
324 DPAA2_SET_FLE_FIN(fle);
325 fle->length = (sess->dir == DIR_ENC) ?
326 (sym_op->auth.data.length + sess->iv.length) :
327 (sym_op->auth.data.length + sess->iv.length +
328 sess->digest_length);
330 /* Configure Input SGE for Encap/Decap */
331 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
332 sge->length = sess->iv.length;
335 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
336 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
337 sym_op->m_src->data_off);
338 sge->length = sym_op->auth.data.length;
339 if (sess->dir == DIR_DEC) {
341 old_icv = (uint8_t *)(sge + 1);
342 memcpy(old_icv, sym_op->auth.digest.data,
343 sess->digest_length);
344 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
345 sge->length = sess->digest_length;
346 DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
347 sess->digest_length +
350 DPAA2_SET_FLE_FIN(sge);
352 DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
353 DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
359 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
360 struct qbman_fd *fd, uint16_t bpid)
362 struct rte_crypto_sym_op *sym_op = op->sym;
363 struct qbman_fle *fle, *sge;
364 struct sec_flow_context *flc;
365 struct ctxt_priv *priv = sess->ctxt;
369 PMD_INIT_FUNC_TRACE();
371 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
373 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
376 memset(fle, 0, FLE_POOL_BUF_SIZE);
377 /* TODO we are using the first FLE entry to store Mbuf.
378 * Currently we donot know which FLE has the mbuf stored.
379 * So while retreiving we can go back 1 FLE from the FD -ADDR
380 * to get the MBUF Addr from the previous FLE.
381 * We can have a better approach to use the inline Mbuf
383 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
384 DPAA2_FLE_SAVE_CTXT(fle, priv);
387 if (likely(bpid < MAX_BPID)) {
388 DPAA2_SET_FD_BPID(fd, bpid);
389 DPAA2_SET_FLE_BPID(fle, bpid);
390 DPAA2_SET_FLE_BPID(fle + 1, bpid);
392 DPAA2_SET_FD_IVP(fd);
393 DPAA2_SET_FLE_IVP(fle);
394 DPAA2_SET_FLE_IVP((fle + 1));
396 flc = &priv->flc_desc[DESC_INITFINAL].flc;
397 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
399 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
400 fle->length = sess->digest_length;
402 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
403 DPAA2_SET_FD_COMPOUND_FMT(fd);
406 if (sess->dir == DIR_ENC) {
407 DPAA2_SET_FLE_ADDR(fle,
408 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
409 DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
410 sym_op->m_src->data_off);
411 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
412 fle->length = sym_op->auth.data.length;
415 DPAA2_SET_FLE_SG_EXT(fle);
416 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
418 if (likely(bpid < MAX_BPID)) {
419 DPAA2_SET_FLE_BPID(sge, bpid);
420 DPAA2_SET_FLE_BPID(sge + 1, bpid);
422 DPAA2_SET_FLE_IVP(sge);
423 DPAA2_SET_FLE_IVP((sge + 1));
425 DPAA2_SET_FLE_ADDR(sge,
426 DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
427 DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
428 sym_op->m_src->data_off);
430 DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
431 sess->digest_length);
432 sge->length = sym_op->auth.data.length;
434 old_digest = (uint8_t *)(sge + 1);
435 rte_memcpy(old_digest, sym_op->auth.digest.data,
436 sess->digest_length);
437 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
438 sge->length = sess->digest_length;
439 fle->length = sym_op->auth.data.length +
441 DPAA2_SET_FLE_FIN(sge);
443 DPAA2_SET_FLE_FIN(fle);
449 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
450 struct qbman_fd *fd, uint16_t bpid)
452 struct rte_crypto_sym_op *sym_op = op->sym;
453 struct qbman_fle *fle, *sge;
455 struct sec_flow_context *flc;
456 struct ctxt_priv *priv = sess->ctxt;
457 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
460 PMD_INIT_FUNC_TRACE();
462 retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
464 RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
467 memset(fle, 0, FLE_POOL_BUF_SIZE);
468 /* TODO we are using the first FLE entry to store Mbuf.
469 * Currently we donot know which FLE has the mbuf stored.
470 * So while retreiving we can go back 1 FLE from the FD -ADDR
471 * to get the MBUF Addr from the previous FLE.
472 * We can have a better approach to use the inline Mbuf
474 DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
475 DPAA2_FLE_SAVE_CTXT(fle, priv);
479 if (likely(bpid < MAX_BPID)) {
480 DPAA2_SET_FD_BPID(fd, bpid);
481 DPAA2_SET_FLE_BPID(fle, bpid);
482 DPAA2_SET_FLE_BPID(fle + 1, bpid);
483 DPAA2_SET_FLE_BPID(sge, bpid);
484 DPAA2_SET_FLE_BPID(sge + 1, bpid);
486 DPAA2_SET_FD_IVP(fd);
487 DPAA2_SET_FLE_IVP(fle);
488 DPAA2_SET_FLE_IVP((fle + 1));
489 DPAA2_SET_FLE_IVP(sge);
490 DPAA2_SET_FLE_IVP((sge + 1));
493 flc = &priv->flc_desc[0].flc;
494 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
495 DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
497 DPAA2_SET_FD_COMPOUND_FMT(fd);
498 DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
500 PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
501 sym_op->cipher.data.offset,
502 sym_op->cipher.data.length,
504 sym_op->m_src->data_off);
506 DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
507 DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
508 sym_op->m_src->data_off);
510 fle->length = sym_op->cipher.data.length + sess->iv.length;
512 PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
513 flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
517 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
518 fle->length = sym_op->cipher.data.length + sess->iv.length;
520 DPAA2_SET_FLE_SG_EXT(fle);
522 DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
523 sge->length = sess->iv.length;
526 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
527 DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
528 sym_op->m_src->data_off);
530 sge->length = sym_op->cipher.data.length;
531 DPAA2_SET_FLE_FIN(sge);
532 DPAA2_SET_FLE_FIN(fle);
534 PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
535 (void *)DPAA2_GET_FD_ADDR(fd),
536 DPAA2_GET_FD_BPID(fd),
537 rte_dpaa2_bpid_info[bpid].meta_data_size,
538 DPAA2_GET_FD_OFFSET(fd),
539 DPAA2_GET_FD_LEN(fd));
545 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
546 struct qbman_fd *fd, uint16_t bpid)
550 PMD_INIT_FUNC_TRACE();
552 switch (sess->ctxt_type) {
553 case DPAA2_SEC_CIPHER:
554 ret = build_cipher_fd(sess, op, fd, bpid);
557 ret = build_auth_fd(sess, op, fd, bpid);
560 ret = build_authenc_gcm_fd(sess, op, fd, bpid);
562 case DPAA2_SEC_CIPHER_HASH:
563 ret = build_authenc_fd(sess, op, fd, bpid);
565 case DPAA2_SEC_HASH_CIPHER:
567 RTE_LOG(ERR, PMD, "error: Unsupported session\n");
573 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
576 /* Function to transmit the frames to given device and VQ*/
579 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
580 uint32_t frames_to_send;
581 struct qbman_eq_desc eqdesc;
582 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
583 struct qbman_swp *swp;
585 /*todo - need to support multiple buffer pools */
587 struct rte_mempool *mb_pool;
588 dpaa2_sec_session *sess;
590 if (unlikely(nb_ops == 0))
593 if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
594 RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
597 /*Prepare enqueue descriptor*/
598 qbman_eq_desc_clear(&eqdesc);
599 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
600 qbman_eq_desc_set_response(&eqdesc, 0, 0);
601 qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
603 if (!DPAA2_PER_LCORE_SEC_DPIO) {
604 ret = dpaa2_affine_qbman_swp_sec();
606 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
610 swp = DPAA2_PER_LCORE_SEC_PORTAL;
613 frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
615 for (loop = 0; loop < frames_to_send; loop++) {
616 /*Clear the unused FD fields before sending*/
617 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
618 sess = (dpaa2_sec_session *)
619 get_session_private_data(
620 (*ops)->sym->session,
621 cryptodev_driver_id);
622 mb_pool = (*ops)->sym->m_src->pool;
623 bpid = mempool_to_bpid(mb_pool);
624 ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
626 PMD_DRV_LOG(ERR, "error: Improper packet"
627 " contents for crypto operation\n");
633 while (loop < frames_to_send) {
634 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
636 frames_to_send - loop);
639 num_tx += frames_to_send;
640 nb_ops -= frames_to_send;
643 dpaa2_qp->tx_vq.tx_pkts += num_tx;
644 dpaa2_qp->tx_vq.err_pkts += nb_ops;
648 static inline struct rte_crypto_op *
649 sec_fd_to_mbuf(const struct qbman_fd *fd)
651 struct qbman_fle *fle;
652 struct rte_crypto_op *op;
653 struct ctxt_priv *priv;
655 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
657 PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
658 fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
660 /* we are using the first FLE entry to store Mbuf.
661 * Currently we donot know which FLE has the mbuf stored.
662 * So while retreiving we can go back 1 FLE from the FD -ADDR
663 * to get the MBUF Addr from the previous FLE.
664 * We can have a better approach to use the inline Mbuf
667 if (unlikely(DPAA2_GET_FD_IVP(fd))) {
668 /* TODO complete it. */
669 RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
672 op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
673 DPAA2_GET_FLE_ADDR((fle - 1)));
676 rte_prefetch0(op->sym->m_src);
678 PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
679 (void *)op->sym->m_src, op->sym->m_src->buf_addr);
681 PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
682 (void *)DPAA2_GET_FD_ADDR(fd),
683 DPAA2_GET_FD_BPID(fd),
684 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
685 DPAA2_GET_FD_OFFSET(fd),
686 DPAA2_GET_FD_LEN(fd));
688 /* free the fle memory */
689 priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
690 rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
696 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
699 /* Function is responsible to receive frames for a given device and VQ*/
700 struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
701 struct qbman_result *dq_storage;
702 uint32_t fqid = dpaa2_qp->rx_vq.fqid;
704 uint8_t is_last = 0, status;
705 struct qbman_swp *swp;
706 const struct qbman_fd *fd;
707 struct qbman_pull_desc pulldesc;
709 if (!DPAA2_PER_LCORE_SEC_DPIO) {
710 ret = dpaa2_affine_qbman_swp_sec();
712 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
716 swp = DPAA2_PER_LCORE_SEC_PORTAL;
717 dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
719 qbman_pull_desc_clear(&pulldesc);
720 qbman_pull_desc_set_numframes(&pulldesc,
721 (nb_ops > DPAA2_DQRR_RING_SIZE) ?
722 DPAA2_DQRR_RING_SIZE : nb_ops);
723 qbman_pull_desc_set_fq(&pulldesc, fqid);
724 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
725 (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
728 /*Issue a volatile dequeue command. */
730 if (qbman_swp_pull(swp, &pulldesc)) {
731 RTE_LOG(WARNING, PMD,
732 "SEC VDQ command is not issued : QBMAN busy\n");
733 /* Portal was busy, try again */
739 /* Receive the packets till Last Dequeue entry is found with
740 * respect to the above issues PULL command.
743 /* Check if the previous issued command is completed.
744 * Also seems like the SWP is shared between the Ethernet Driver
745 * and the SEC driver.
747 while (!qbman_check_command_complete(dq_storage))
750 /* Loop until the dq_storage is updated with
753 while (!qbman_check_new_result(dq_storage))
755 /* Check whether Last Pull command is Expired and
756 * setting Condition for Loop termination
758 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
760 /* Check for valid frame. */
761 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
763 (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
764 PMD_RX_LOG(DEBUG, "No frame is delivered");
769 fd = qbman_result_DQ_fd(dq_storage);
770 ops[num_rx] = sec_fd_to_mbuf(fd);
772 if (unlikely(fd->simple.frc)) {
773 /* TODO Parse SEC errors */
774 RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
776 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
778 ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
783 } /* End of Packet Rx loop */
785 dpaa2_qp->rx_vq.rx_pkts += num_rx;
787 PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
788 /*Return the total number of packets received to DPAA2 app*/
792 /** Release queue pair */
794 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
796 struct dpaa2_sec_qp *qp =
797 (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
799 PMD_INIT_FUNC_TRACE();
801 if (qp->rx_vq.q_storage) {
802 dpaa2_free_dq_storage(qp->rx_vq.q_storage);
803 rte_free(qp->rx_vq.q_storage);
807 dev->data->queue_pairs[queue_pair_id] = NULL;
812 /** Setup a queue pair */
814 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
815 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
816 __rte_unused int socket_id,
817 __rte_unused struct rte_mempool *session_pool)
819 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
820 struct dpaa2_sec_qp *qp;
821 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
822 struct dpseci_rx_queue_cfg cfg;
825 PMD_INIT_FUNC_TRACE();
827 /* If qp is already in use free ring memory and qp metadata. */
828 if (dev->data->queue_pairs[qp_id] != NULL) {
829 PMD_DRV_LOG(INFO, "QP already setup");
833 PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
834 dev, qp_id, qp_conf);
836 memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
838 qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
839 RTE_CACHE_LINE_SIZE);
841 RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
847 qp->rx_vq.q_storage = rte_malloc("sec dq storage",
848 sizeof(struct queue_storage_info_t),
849 RTE_CACHE_LINE_SIZE);
850 if (!qp->rx_vq.q_storage) {
851 RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
854 memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
856 if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
857 RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
861 dev->data->queue_pairs[qp_id] = qp;
863 cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
864 cfg.user_ctx = (uint64_t)(&qp->rx_vq);
865 retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
870 /** Start queue pair */
872 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
873 __rte_unused uint16_t queue_pair_id)
875 PMD_INIT_FUNC_TRACE();
880 /** Stop queue pair */
882 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
883 __rte_unused uint16_t queue_pair_id)
885 PMD_INIT_FUNC_TRACE();
890 /** Return the number of allocated queue pairs */
892 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
894 PMD_INIT_FUNC_TRACE();
896 return dev->data->nb_queue_pairs;
899 /** Returns the size of the aesni gcm session structure */
901 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
903 PMD_INIT_FUNC_TRACE();
905 return sizeof(dpaa2_sec_session);
909 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
910 struct rte_crypto_sym_xform *xform,
911 dpaa2_sec_session *session)
913 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
914 struct alginfo cipherdata;
916 struct ctxt_priv *priv;
917 struct sec_flow_context *flc;
919 PMD_INIT_FUNC_TRACE();
921 /* For SEC CIPHER only one descriptor is required. */
922 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
923 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
924 RTE_CACHE_LINE_SIZE);
926 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
930 priv->fle_pool = dev_priv->fle_pool;
932 flc = &priv->flc_desc[0].flc;
934 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
935 RTE_CACHE_LINE_SIZE);
936 if (session->cipher_key.data == NULL) {
937 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
941 session->cipher_key.length = xform->cipher.key.length;
943 memcpy(session->cipher_key.data, xform->cipher.key.data,
944 xform->cipher.key.length);
945 cipherdata.key = (uint64_t)session->cipher_key.data;
946 cipherdata.keylen = session->cipher_key.length;
947 cipherdata.key_enc_flags = 0;
948 cipherdata.key_type = RTA_DATA_IMM;
950 /* Set IV parameters */
951 session->iv.offset = xform->cipher.iv.offset;
952 session->iv.length = xform->cipher.iv.length;
954 switch (xform->cipher.algo) {
955 case RTE_CRYPTO_CIPHER_AES_CBC:
956 cipherdata.algtype = OP_ALG_ALGSEL_AES;
957 cipherdata.algmode = OP_ALG_AAI_CBC;
958 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
960 case RTE_CRYPTO_CIPHER_3DES_CBC:
961 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
962 cipherdata.algmode = OP_ALG_AAI_CBC;
963 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
965 case RTE_CRYPTO_CIPHER_AES_CTR:
966 cipherdata.algtype = OP_ALG_ALGSEL_AES;
967 cipherdata.algmode = OP_ALG_AAI_CTR;
968 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
970 case RTE_CRYPTO_CIPHER_3DES_CTR:
971 case RTE_CRYPTO_CIPHER_AES_ECB:
972 case RTE_CRYPTO_CIPHER_3DES_ECB:
973 case RTE_CRYPTO_CIPHER_AES_XTS:
974 case RTE_CRYPTO_CIPHER_AES_F8:
975 case RTE_CRYPTO_CIPHER_ARC4:
976 case RTE_CRYPTO_CIPHER_KASUMI_F8:
977 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
978 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
979 case RTE_CRYPTO_CIPHER_NULL:
980 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
984 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
988 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
991 bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
992 &cipherdata, NULL, session->iv.length,
995 RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
1000 flc->mode_bits = 0x8000;
1002 flc->word1_sdl = (uint8_t)bufsize;
1003 flc->word2_rflc_31_0 = lower_32_bits(
1004 (uint64_t)&(((struct dpaa2_sec_qp *)
1005 dev->data->queue_pairs[0])->rx_vq));
1006 flc->word3_rflc_63_32 = upper_32_bits(
1007 (uint64_t)&(((struct dpaa2_sec_qp *)
1008 dev->data->queue_pairs[0])->rx_vq));
1009 session->ctxt = priv;
1011 for (i = 0; i < bufsize; i++)
1012 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1013 i, priv->flc_desc[0].desc[i]);
1018 rte_free(session->cipher_key.data);
1024 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1025 struct rte_crypto_sym_xform *xform,
1026 dpaa2_sec_session *session)
1028 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1029 struct alginfo authdata;
1030 unsigned int bufsize, i;
1031 struct ctxt_priv *priv;
1032 struct sec_flow_context *flc;
1034 PMD_INIT_FUNC_TRACE();
1036 /* For SEC AUTH three descriptors are required for various stages */
1037 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1038 sizeof(struct ctxt_priv) + 3 *
1039 sizeof(struct sec_flc_desc),
1040 RTE_CACHE_LINE_SIZE);
1042 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1046 priv->fle_pool = dev_priv->fle_pool;
1047 flc = &priv->flc_desc[DESC_INITFINAL].flc;
1049 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1050 RTE_CACHE_LINE_SIZE);
1051 if (session->auth_key.data == NULL) {
1052 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1056 session->auth_key.length = xform->auth.key.length;
1058 memcpy(session->auth_key.data, xform->auth.key.data,
1059 xform->auth.key.length);
1060 authdata.key = (uint64_t)session->auth_key.data;
1061 authdata.keylen = session->auth_key.length;
1062 authdata.key_enc_flags = 0;
1063 authdata.key_type = RTA_DATA_IMM;
1065 session->digest_length = xform->auth.digest_length;
1067 switch (xform->auth.algo) {
1068 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1069 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1070 authdata.algmode = OP_ALG_AAI_HMAC;
1071 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1073 case RTE_CRYPTO_AUTH_MD5_HMAC:
1074 authdata.algtype = OP_ALG_ALGSEL_MD5;
1075 authdata.algmode = OP_ALG_AAI_HMAC;
1076 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1078 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1079 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1080 authdata.algmode = OP_ALG_AAI_HMAC;
1081 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1083 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1084 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1085 authdata.algmode = OP_ALG_AAI_HMAC;
1086 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1088 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1089 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1090 authdata.algmode = OP_ALG_AAI_HMAC;
1091 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1093 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1094 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1095 authdata.algmode = OP_ALG_AAI_HMAC;
1096 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1098 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1099 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1100 case RTE_CRYPTO_AUTH_NULL:
1101 case RTE_CRYPTO_AUTH_SHA1:
1102 case RTE_CRYPTO_AUTH_SHA256:
1103 case RTE_CRYPTO_AUTH_SHA512:
1104 case RTE_CRYPTO_AUTH_SHA224:
1105 case RTE_CRYPTO_AUTH_SHA384:
1106 case RTE_CRYPTO_AUTH_MD5:
1107 case RTE_CRYPTO_AUTH_AES_GMAC:
1108 case RTE_CRYPTO_AUTH_KASUMI_F9:
1109 case RTE_CRYPTO_AUTH_AES_CMAC:
1110 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1111 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1112 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1116 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1120 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1123 bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1124 1, 0, &authdata, !session->dir,
1125 session->digest_length);
1127 flc->word1_sdl = (uint8_t)bufsize;
1128 flc->word2_rflc_31_0 = lower_32_bits(
1129 (uint64_t)&(((struct dpaa2_sec_qp *)
1130 dev->data->queue_pairs[0])->rx_vq));
1131 flc->word3_rflc_63_32 = upper_32_bits(
1132 (uint64_t)&(((struct dpaa2_sec_qp *)
1133 dev->data->queue_pairs[0])->rx_vq));
1134 session->ctxt = priv;
1135 for (i = 0; i < bufsize; i++)
1136 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1137 i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1143 rte_free(session->auth_key.data);
1149 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1150 struct rte_crypto_sym_xform *xform,
1151 dpaa2_sec_session *session)
1153 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1154 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1155 struct alginfo aeaddata;
1156 unsigned int bufsize, i;
1157 struct ctxt_priv *priv;
1158 struct sec_flow_context *flc;
1159 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1162 PMD_INIT_FUNC_TRACE();
1164 /* Set IV parameters */
1165 session->iv.offset = aead_xform->iv.offset;
1166 session->iv.length = aead_xform->iv.length;
1167 session->ctxt_type = DPAA2_SEC_AEAD;
1169 /* For SEC AEAD only one descriptor is required */
1170 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1171 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1172 RTE_CACHE_LINE_SIZE);
1174 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1178 priv->fle_pool = dev_priv->fle_pool;
1179 flc = &priv->flc_desc[0].flc;
1181 session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1182 RTE_CACHE_LINE_SIZE);
1183 if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1184 RTE_LOG(ERR, PMD, "No Memory for aead key\n");
1188 memcpy(session->aead_key.data, aead_xform->key.data,
1189 aead_xform->key.length);
1191 session->digest_length = aead_xform->digest_length;
1192 session->aead_key.length = aead_xform->key.length;
1193 ctxt->auth_only_len = aead_xform->aad_length;
1195 aeaddata.key = (uint64_t)session->aead_key.data;
1196 aeaddata.keylen = session->aead_key.length;
1197 aeaddata.key_enc_flags = 0;
1198 aeaddata.key_type = RTA_DATA_IMM;
1200 switch (aead_xform->algo) {
1201 case RTE_CRYPTO_AEAD_AES_GCM:
1202 aeaddata.algtype = OP_ALG_ALGSEL_AES;
1203 aeaddata.algmode = OP_ALG_AAI_GCM;
1204 session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
1206 case RTE_CRYPTO_AEAD_AES_CCM:
1207 RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
1211 RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
1215 session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1218 priv->flc_desc[0].desc[0] = aeaddata.keylen;
1219 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1221 (unsigned int *)priv->flc_desc[0].desc,
1222 &priv->flc_desc[0].desc[1], 1);
1225 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
1228 if (priv->flc_desc[0].desc[1] & 1) {
1229 aeaddata.key_type = RTA_DATA_IMM;
1231 aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1232 aeaddata.key_type = RTA_DATA_PTR;
1234 priv->flc_desc[0].desc[0] = 0;
1235 priv->flc_desc[0].desc[1] = 0;
1237 if (session->dir == DIR_ENC)
1238 bufsize = cnstr_shdsc_gcm_encap(
1239 priv->flc_desc[0].desc, 1, 0,
1240 &aeaddata, session->iv.length,
1241 session->digest_length);
1243 bufsize = cnstr_shdsc_gcm_decap(
1244 priv->flc_desc[0].desc, 1, 0,
1245 &aeaddata, session->iv.length,
1246 session->digest_length);
1247 flc->word1_sdl = (uint8_t)bufsize;
1248 flc->word2_rflc_31_0 = lower_32_bits(
1249 (uint64_t)&(((struct dpaa2_sec_qp *)
1250 dev->data->queue_pairs[0])->rx_vq));
1251 flc->word3_rflc_63_32 = upper_32_bits(
1252 (uint64_t)&(((struct dpaa2_sec_qp *)
1253 dev->data->queue_pairs[0])->rx_vq));
1254 session->ctxt = priv;
1255 for (i = 0; i < bufsize; i++)
1256 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1257 i, priv->flc_desc[0].desc[i]);
1262 rte_free(session->aead_key.data);
1269 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1270 struct rte_crypto_sym_xform *xform,
1271 dpaa2_sec_session *session)
1273 struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1274 struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1275 struct alginfo authdata, cipherdata;
1276 unsigned int bufsize, i;
1277 struct ctxt_priv *priv;
1278 struct sec_flow_context *flc;
1279 struct rte_crypto_cipher_xform *cipher_xform;
1280 struct rte_crypto_auth_xform *auth_xform;
1283 PMD_INIT_FUNC_TRACE();
1285 if (session->ext_params.aead_ctxt.auth_cipher_text) {
1286 cipher_xform = &xform->cipher;
1287 auth_xform = &xform->next->auth;
1288 session->ctxt_type =
1289 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1290 DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1292 cipher_xform = &xform->next->cipher;
1293 auth_xform = &xform->auth;
1294 session->ctxt_type =
1295 (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1296 DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1299 /* Set IV parameters */
1300 session->iv.offset = cipher_xform->iv.offset;
1301 session->iv.length = cipher_xform->iv.length;
1303 /* For SEC AEAD only one descriptor is required */
1304 priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1305 sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1306 RTE_CACHE_LINE_SIZE);
1308 RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
1312 priv->fle_pool = dev_priv->fle_pool;
1313 flc = &priv->flc_desc[0].flc;
1315 session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1316 RTE_CACHE_LINE_SIZE);
1317 if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1318 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1322 session->cipher_key.length = cipher_xform->key.length;
1323 session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1324 RTE_CACHE_LINE_SIZE);
1325 if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1326 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1327 rte_free(session->cipher_key.data);
1331 session->auth_key.length = auth_xform->key.length;
1332 memcpy(session->cipher_key.data, cipher_xform->key.data,
1333 cipher_xform->key.length);
1334 memcpy(session->auth_key.data, auth_xform->key.data,
1335 auth_xform->key.length);
1337 authdata.key = (uint64_t)session->auth_key.data;
1338 authdata.keylen = session->auth_key.length;
1339 authdata.key_enc_flags = 0;
1340 authdata.key_type = RTA_DATA_IMM;
1342 session->digest_length = auth_xform->digest_length;
1344 switch (auth_xform->algo) {
1345 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1346 authdata.algtype = OP_ALG_ALGSEL_SHA1;
1347 authdata.algmode = OP_ALG_AAI_HMAC;
1348 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1350 case RTE_CRYPTO_AUTH_MD5_HMAC:
1351 authdata.algtype = OP_ALG_ALGSEL_MD5;
1352 authdata.algmode = OP_ALG_AAI_HMAC;
1353 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1355 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1356 authdata.algtype = OP_ALG_ALGSEL_SHA224;
1357 authdata.algmode = OP_ALG_AAI_HMAC;
1358 session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1360 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1361 authdata.algtype = OP_ALG_ALGSEL_SHA256;
1362 authdata.algmode = OP_ALG_AAI_HMAC;
1363 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1365 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1366 authdata.algtype = OP_ALG_ALGSEL_SHA384;
1367 authdata.algmode = OP_ALG_AAI_HMAC;
1368 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1370 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1371 authdata.algtype = OP_ALG_ALGSEL_SHA512;
1372 authdata.algmode = OP_ALG_AAI_HMAC;
1373 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1375 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1376 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1377 case RTE_CRYPTO_AUTH_NULL:
1378 case RTE_CRYPTO_AUTH_SHA1:
1379 case RTE_CRYPTO_AUTH_SHA256:
1380 case RTE_CRYPTO_AUTH_SHA512:
1381 case RTE_CRYPTO_AUTH_SHA224:
1382 case RTE_CRYPTO_AUTH_SHA384:
1383 case RTE_CRYPTO_AUTH_MD5:
1384 case RTE_CRYPTO_AUTH_AES_GMAC:
1385 case RTE_CRYPTO_AUTH_KASUMI_F9:
1386 case RTE_CRYPTO_AUTH_AES_CMAC:
1387 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1388 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1389 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1393 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1397 cipherdata.key = (uint64_t)session->cipher_key.data;
1398 cipherdata.keylen = session->cipher_key.length;
1399 cipherdata.key_enc_flags = 0;
1400 cipherdata.key_type = RTA_DATA_IMM;
1402 switch (cipher_xform->algo) {
1403 case RTE_CRYPTO_CIPHER_AES_CBC:
1404 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1405 cipherdata.algmode = OP_ALG_AAI_CBC;
1406 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1408 case RTE_CRYPTO_CIPHER_3DES_CBC:
1409 cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1410 cipherdata.algmode = OP_ALG_AAI_CBC;
1411 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1413 case RTE_CRYPTO_CIPHER_AES_CTR:
1414 cipherdata.algtype = OP_ALG_ALGSEL_AES;
1415 cipherdata.algmode = OP_ALG_AAI_CTR;
1416 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1418 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1419 case RTE_CRYPTO_CIPHER_NULL:
1420 case RTE_CRYPTO_CIPHER_3DES_ECB:
1421 case RTE_CRYPTO_CIPHER_AES_ECB:
1422 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1423 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
1424 cipher_xform->algo);
1427 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1428 cipher_xform->algo);
1431 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1434 priv->flc_desc[0].desc[0] = cipherdata.keylen;
1435 priv->flc_desc[0].desc[1] = authdata.keylen;
1436 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1438 (unsigned int *)priv->flc_desc[0].desc,
1439 &priv->flc_desc[0].desc[2], 2);
1442 PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
1445 if (priv->flc_desc[0].desc[2] & 1) {
1446 cipherdata.key_type = RTA_DATA_IMM;
1448 cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1449 cipherdata.key_type = RTA_DATA_PTR;
1451 if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1452 authdata.key_type = RTA_DATA_IMM;
1454 authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1455 authdata.key_type = RTA_DATA_PTR;
1457 priv->flc_desc[0].desc[0] = 0;
1458 priv->flc_desc[0].desc[1] = 0;
1459 priv->flc_desc[0].desc[2] = 0;
1461 if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1462 bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1463 0, &cipherdata, &authdata,
1465 ctxt->auth_only_len,
1466 session->digest_length,
1469 RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
1473 flc->word1_sdl = (uint8_t)bufsize;
1474 flc->word2_rflc_31_0 = lower_32_bits(
1475 (uint64_t)&(((struct dpaa2_sec_qp *)
1476 dev->data->queue_pairs[0])->rx_vq));
1477 flc->word3_rflc_63_32 = upper_32_bits(
1478 (uint64_t)&(((struct dpaa2_sec_qp *)
1479 dev->data->queue_pairs[0])->rx_vq));
1480 session->ctxt = priv;
1481 for (i = 0; i < bufsize; i++)
1482 PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
1483 i, priv->flc_desc[0].desc[i]);
1488 rte_free(session->cipher_key.data);
1489 rte_free(session->auth_key.data);
1495 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
1496 struct rte_crypto_sym_xform *xform, void *sess)
1498 dpaa2_sec_session *session = sess;
1500 PMD_INIT_FUNC_TRACE();
1502 if (unlikely(sess == NULL)) {
1503 RTE_LOG(ERR, PMD, "invalid session struct\n");
1507 /* Default IV length = 0 */
1508 session->iv.length = 0;
1511 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1512 session->ctxt_type = DPAA2_SEC_CIPHER;
1513 dpaa2_sec_cipher_init(dev, xform, session);
1515 /* Authentication Only */
1516 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1517 xform->next == NULL) {
1518 session->ctxt_type = DPAA2_SEC_AUTH;
1519 dpaa2_sec_auth_init(dev, xform, session);
1521 /* Cipher then Authenticate */
1522 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1523 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1524 session->ext_params.aead_ctxt.auth_cipher_text = true;
1525 dpaa2_sec_aead_chain_init(dev, xform, session);
1527 /* Authenticate then Cipher */
1528 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1529 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1530 session->ext_params.aead_ctxt.auth_cipher_text = false;
1531 dpaa2_sec_aead_chain_init(dev, xform, session);
1533 /* AEAD operation for AES-GCM kind of Algorithms */
1534 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1535 xform->next == NULL) {
1536 dpaa2_sec_aead_init(dev, xform, session);
1539 RTE_LOG(ERR, PMD, "Invalid crypto type\n");
1547 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1548 struct rte_crypto_sym_xform *xform,
1549 struct rte_cryptodev_sym_session *sess,
1550 struct rte_mempool *mempool)
1552 void *sess_private_data;
1555 if (rte_mempool_get(mempool, &sess_private_data)) {
1557 "Couldn't get object from session mempool");
1561 ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
1563 PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
1564 "session parameters");
1566 /* Return session to mempool */
1567 rte_mempool_put(mempool, sess_private_data);
1571 set_session_private_data(sess, dev->driver_id,
1577 /** Clear the memory of session so it doesn't leave key material behind */
1579 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
1580 struct rte_cryptodev_sym_session *sess)
1582 PMD_INIT_FUNC_TRACE();
1583 uint8_t index = dev->driver_id;
1584 void *sess_priv = get_session_private_data(sess, index);
1585 dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
1589 rte_free(s->cipher_key.data);
1590 rte_free(s->auth_key.data);
1591 memset(sess, 0, sizeof(dpaa2_sec_session));
1592 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1593 set_session_private_data(sess, index, NULL);
1594 rte_mempool_put(sess_mp, sess_priv);
1599 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1600 struct rte_cryptodev_config *config __rte_unused)
1602 PMD_INIT_FUNC_TRACE();
1608 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1610 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1611 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1612 struct dpseci_attr attr;
1613 struct dpaa2_queue *dpaa2_q;
1614 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1615 dev->data->queue_pairs;
1616 struct dpseci_rx_queue_attr rx_attr;
1617 struct dpseci_tx_queue_attr tx_attr;
1620 PMD_INIT_FUNC_TRACE();
1622 memset(&attr, 0, sizeof(struct dpseci_attr));
1624 ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1626 PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1628 goto get_attr_failure;
1630 ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1633 "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1634 goto get_attr_failure;
1636 for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1637 dpaa2_q = &qp[i]->rx_vq;
1638 dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1640 dpaa2_q->fqid = rx_attr.fqid;
1641 PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1643 for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1644 dpaa2_q = &qp[i]->tx_vq;
1645 dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1647 dpaa2_q->fqid = tx_attr.fqid;
1648 PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1653 dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1658 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1660 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1661 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1664 PMD_INIT_FUNC_TRACE();
1666 ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1668 PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1673 ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1675 PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1682 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1684 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1685 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1688 PMD_INIT_FUNC_TRACE();
1690 /* Function is reverse of dpaa2_sec_dev_init.
1691 * It does the following:
1692 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1693 * 2. Close the DPSECI device
1694 * 3. Free the allocated resources.
1697 /*Close the device at underlying layer*/
1698 ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1700 PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1701 " error code %d\n", ret);
1705 /*Free the allocated memory for ethernet private data and dpseci*/
1713 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1714 struct rte_cryptodev_info *info)
1716 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1718 PMD_INIT_FUNC_TRACE();
1720 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1721 info->feature_flags = dev->feature_flags;
1722 info->capabilities = dpaa2_sec_capabilities;
1723 info->sym.max_nb_sessions = internals->max_nb_sessions;
1724 info->driver_id = cryptodev_driver_id;
1729 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1730 struct rte_cryptodev_stats *stats)
1732 struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1733 struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1734 struct dpseci_sec_counters counters = {0};
1735 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1736 dev->data->queue_pairs;
1739 PMD_INIT_FUNC_TRACE();
1740 if (stats == NULL) {
1741 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1744 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1745 if (qp[i] == NULL) {
1746 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1750 stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1751 stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1752 stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1753 stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1756 ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1759 PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1761 PMD_DRV_LOG(INFO, "dpseci hw stats:"
1762 "\n\tNumber of Requests Dequeued = %lu"
1763 "\n\tNumber of Outbound Encrypt Requests = %lu"
1764 "\n\tNumber of Inbound Decrypt Requests = %lu"
1765 "\n\tNumber of Outbound Bytes Encrypted = %lu"
1766 "\n\tNumber of Outbound Bytes Protected = %lu"
1767 "\n\tNumber of Inbound Bytes Decrypted = %lu"
1768 "\n\tNumber of Inbound Bytes Validated = %lu",
1769 counters.dequeued_requests,
1770 counters.ob_enc_requests,
1771 counters.ib_dec_requests,
1772 counters.ob_enc_bytes,
1773 counters.ob_prot_bytes,
1774 counters.ib_dec_bytes,
1775 counters.ib_valid_bytes);
1780 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1783 struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1784 (dev->data->queue_pairs);
1786 PMD_INIT_FUNC_TRACE();
1788 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1789 if (qp[i] == NULL) {
1790 PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1793 qp[i]->tx_vq.rx_pkts = 0;
1794 qp[i]->tx_vq.tx_pkts = 0;
1795 qp[i]->tx_vq.err_pkts = 0;
1796 qp[i]->rx_vq.rx_pkts = 0;
1797 qp[i]->rx_vq.tx_pkts = 0;
1798 qp[i]->rx_vq.err_pkts = 0;
1802 static struct rte_cryptodev_ops crypto_ops = {
1803 .dev_configure = dpaa2_sec_dev_configure,
1804 .dev_start = dpaa2_sec_dev_start,
1805 .dev_stop = dpaa2_sec_dev_stop,
1806 .dev_close = dpaa2_sec_dev_close,
1807 .dev_infos_get = dpaa2_sec_dev_infos_get,
1808 .stats_get = dpaa2_sec_stats_get,
1809 .stats_reset = dpaa2_sec_stats_reset,
1810 .queue_pair_setup = dpaa2_sec_queue_pair_setup,
1811 .queue_pair_release = dpaa2_sec_queue_pair_release,
1812 .queue_pair_start = dpaa2_sec_queue_pair_start,
1813 .queue_pair_stop = dpaa2_sec_queue_pair_stop,
1814 .queue_pair_count = dpaa2_sec_queue_pair_count,
1815 .session_get_size = dpaa2_sec_session_get_size,
1816 .session_configure = dpaa2_sec_session_configure,
1817 .session_clear = dpaa2_sec_session_clear,
1821 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1823 struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1825 rte_mempool_free(internals->fle_pool);
1827 PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1828 dev->data->name, rte_socket_id());
1834 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1836 struct dpaa2_sec_dev_private *internals;
1837 struct rte_device *dev = cryptodev->device;
1838 struct rte_dpaa2_device *dpaa2_dev;
1839 struct fsl_mc_io *dpseci;
1841 struct dpseci_attr attr;
1845 PMD_INIT_FUNC_TRACE();
1846 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1847 if (dpaa2_dev == NULL) {
1848 PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1851 hw_id = dpaa2_dev->object_id;
1853 cryptodev->driver_id = cryptodev_driver_id;
1854 cryptodev->dev_ops = &crypto_ops;
1856 cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1857 cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1858 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1859 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1860 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1862 internals = cryptodev->data->dev_private;
1863 internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1866 * For secondary processes, we don't initialise any further as primary
1867 * has already done this work. Only check we don't need a different
1870 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1871 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1874 /*Open the rte device via MC and save the handle for further use*/
1875 dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1876 sizeof(struct fsl_mc_io), 0);
1879 "Error in allocating the memory for dpsec object");
1882 dpseci->regs = rte_mcp_ptr_list[0];
1884 retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1886 PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1890 retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1893 "Cannot get dpsec device attributed: Error = %x",
1897 sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1899 internals->max_nb_queue_pairs = attr.num_tx_queues;
1900 cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1901 internals->hw = dpseci;
1902 internals->token = token;
1904 sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
1905 internals->fle_pool = rte_mempool_create((const char *)str,
1908 FLE_POOL_CACHE_SIZE, 0,
1909 NULL, NULL, NULL, NULL,
1911 if (!internals->fle_pool) {
1912 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1916 PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1920 PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1922 /* dpaa2_sec_uninit(crypto_dev_name); */
1927 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
1928 struct rte_dpaa2_device *dpaa2_dev)
1930 struct rte_cryptodev *cryptodev;
1931 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1935 sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1937 cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1938 if (cryptodev == NULL)
1941 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1942 cryptodev->data->dev_private = rte_zmalloc_socket(
1943 "cryptodev private structure",
1944 sizeof(struct dpaa2_sec_dev_private),
1945 RTE_CACHE_LINE_SIZE,
1948 if (cryptodev->data->dev_private == NULL)
1949 rte_panic("Cannot allocate memzone for private "
1953 dpaa2_dev->cryptodev = cryptodev;
1954 cryptodev->device = &dpaa2_dev->device;
1955 cryptodev->device->driver = &dpaa2_drv->driver;
1957 /* init user callbacks */
1958 TAILQ_INIT(&(cryptodev->link_intr_cbs));
1960 /* Invoke PMD device initialization function */
1961 retval = dpaa2_sec_dev_init(cryptodev);
1965 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1966 rte_free(cryptodev->data->dev_private);
1968 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1974 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1976 struct rte_cryptodev *cryptodev;
1979 cryptodev = dpaa2_dev->cryptodev;
1980 if (cryptodev == NULL)
1983 ret = dpaa2_sec_uninit(cryptodev);
1987 /* free crypto device */
1988 rte_cryptodev_pmd_release_device(cryptodev);
1990 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1991 rte_free(cryptodev->data->dev_private);
1993 cryptodev->device = NULL;
1994 cryptodev->data = NULL;
1999 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2000 .drv_type = DPAA2_CRYPTO,
2002 .name = "DPAA2 SEC PMD"
2004 .probe = cryptodev_dpaa2_sec_probe,
2005 .remove = cryptodev_dpaa2_sec_remove,
2008 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2010 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2011 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2012 cryptodev_driver_id);